input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
= "extpolControllerCont"
EXTPOL_EP = "extpolEp"
EXTPOL_EP_FSM = "extpolEpFsm"
EXTPOL_EP_FSM_STAGE = "extpolEpFsmStage"
EXTPOL_EP_FSM_TASK = "extpolEpFsmTask"
EXTPOL_PROVIDER = "extpolProvider"
EXTPOL_PROVIDER_CONT = "extpolProviderCont"
EXTPOL_PROVIDER_FSM = "extpolProviderFsm"
EXTPOL_PROVIDER_FSM_STAGE = "extpolProviderFsmStage"
EXTPOL_PROVIDER_FSM_TASK = "extpolProviderFsmTask"
EXTPOL_REGISTRY = "extpolRegistry"
EXTPOL_REGISTRY_FSM = "extpolRegistryFsm"
EXTPOL_REGISTRY_FSM_STAGE = "extpolRegistryFsmStage"
EXTPOL_REGISTRY_FSM_TASK = "extpolRegistryFsmTask"
EXTPOL_SYSTEM_CONTEXT = "extpolSystemContext"
EXTVMM_EP = "extvmmEp"
EXTVMM_EP_FSM = "extvmmEpFsm"
EXTVMM_EP_FSM_STAGE = "extvmmEpFsmStage"
EXTVMM_EP_FSM_TASK = "extvmmEpFsmTask"
EXTVMM_KEY_INST = "extvmmKeyInst"
EXTVMM_KEY_RING = "extvmmKeyRing"
EXTVMM_KEY_STORE = "extvmmKeyStore"
EXTVMM_KEY_STORE_FSM = "extvmmKeyStoreFsm"
EXTVMM_KEY_STORE_FSM_STAGE = "extvmmKeyStoreFsmStage"
EXTVMM_KEY_STORE_FSM_TASK = "extvmmKeyStoreFsmTask"
EXTVMM_MASTER_EXT_KEY = "extvmmMasterExtKey"
EXTVMM_MASTER_EXT_KEY_FSM = "extvmmMasterExtKeyFsm"
EXTVMM_MASTER_EXT_KEY_FSM_STAGE = "extvmmMasterExtKeyFsmStage"
EXTVMM_MASTER_EXT_KEY_FSM_TASK = "extvmmMasterExtKeyFsmTask"
EXTVMM_PROVIDER = "extvmmProvider"
EXTVMM_PROVIDER_FSM = "extvmmProviderFsm"
EXTVMM_PROVIDER_FSM_STAGE = "extvmmProviderFsmStage"
EXTVMM_PROVIDER_FSM_TASK = "extvmmProviderFsmTask"
EXTVMM_SWITCH_DEL_TASK = "extvmmSwitchDelTask"
EXTVMM_SWITCH_DEL_TASK_FSM = "extvmmSwitchDelTaskFsm"
EXTVMM_SWITCH_DEL_TASK_FSM_STAGE = "extvmmSwitchDelTaskFsmStage"
EXTVMM_SWITCH_DEL_TASK_FSM_TASK = "extvmmSwitchDelTaskFsmTask"
EXTVMM_SWITCH_SET = "extvmmSwitchSet"
FABRIC_BHVLAN = "fabricBHVlan"
FABRIC_CHASSIS_EP = "fabricChassisEp"
FABRIC_COMPUTE_PH_EP = "fabricComputePhEp"
FABRIC_COMPUTE_SLOT_EP = "fabricComputeSlotEp"
FABRIC_COMPUTE_SLOT_EP_FSM = "fabricComputeSlotEpFsm"
FABRIC_COMPUTE_SLOT_EP_FSM_STAGE = "fabricComputeSlotEpFsmStage"
FABRIC_COMPUTE_SLOT_EP_FSM_TASK = "fabricComputeSlotEpFsmTask"
FABRIC_DCE_SRV = "fabricDceSrv"
FABRIC_DCE_SW_SRV = "fabricDceSwSrv"
FABRIC_DCE_SW_SRV_EP = "fabricDceSwSrvEp"
FABRIC_DCE_SW_SRV_PC = "fabricDceSwSrvPc"
FABRIC_DCE_SW_SRV_PC_EP = "fabricDceSwSrvPcEp"
FABRIC_EP = "fabricEp"
FABRIC_EP_MGR = "fabricEpMgr"
FABRIC_EP_MGR_FSM = "fabricEpMgrFsm"
FABRIC_EP_MGR_FSM_STAGE = "fabricEpMgrFsmStage"
FABRIC_EP_MGR_FSM_TASK = "fabricEpMgrFsmTask"
FABRIC_ETH_ESTC = "fabricEthEstc"
FABRIC_ETH_ESTC_CLOUD = "fabricEthEstcCloud"
FABRIC_ETH_ESTC_EP = "fabricEthEstcEp"
FABRIC_ETH_ESTC_PC = "fabricEthEstcPc"
FABRIC_ETH_ESTC_PC_EP = "fabricEthEstcPcEp"
FABRIC_ETH_LAN = "fabricEthLan"
FABRIC_ETH_LAN_EP = "fabricEthLanEp"
FABRIC_ETH_LAN_PC = "fabricEthLanPc"
FABRIC_ETH_LAN_PC_EP = "fabricEthLanPcEp"
FABRIC_ETH_MON = "fabricEthMon"
FABRIC_ETH_MON_DEST_EP = "fabricEthMonDestEp"
FABRIC_ETH_MON_FILT_EP = "fabricEthMonFiltEp"
FABRIC_ETH_MON_FILT_REF = "fabricEthMonFiltRef"
FABRIC_ETH_MON_LAN = "fabricEthMonLan"
FABRIC_ETH_MON_SRC_EP = "fabricEthMonSrcEp"
FABRIC_ETH_MON_SRC_REF = "fabricEthMonSrcRef"
FABRIC_ETH_TARGET_EP = "fabricEthTargetEp"
FABRIC_ETH_VLAN_PC = "fabricEthVlanPc"
FABRIC_ETH_VLAN_PORT_EP = "fabricEthVlanPortEp"
FABRIC_FC_ESTC = "fabricFcEstc"
FABRIC_FC_ESTC_CLOUD = "fabricFcEstcCloud"
FABRIC_FC_ESTC_EP = "fabricFcEstcEp"
FABRIC_FC_MON = "fabricFcMon"
FABRIC_FC_MON_DEST_EP = "fabricFcMonDestEp"
FABRIC_FC_MON_FILT_EP = "fabricFcMonFiltEp"
FABRIC_FC_MON_FILT_REF = "fabricFcMonFiltRef"
FABRIC_FC_MON_SAN = "fabricFcMonSan"
FABRIC_FC_MON_SRC_EP = "fabricFcMonSrcEp"
FABRIC_FC_MON_SRC_REF = "fabricFcMonSrcRef"
FABRIC_FC_SAN = "fabricFcSan"
FABRIC_FC_SAN_EP = "fabricFcSanEp"
FABRIC_FC_SAN_PC = "fabricFcSanPc"
FABRIC_FC_SAN_PC_EP = "fabricFcSanPcEp"
FABRIC_FC_VSAN_PC = "fabricFcVsanPc"
FABRIC_FC_VSAN_PORT_EP = "fabricFcVsanPortEp"
FABRIC_FCOE_ESTC_EP = "fabricFcoeEstcEp"
FABRIC_FCOE_SAN_EP = "fabricFcoeSanEp"
FABRIC_FCOE_SAN_PC = "fabricFcoeSanPc"
FABRIC_FCOE_SAN_PC_EP = "fabricFcoeSanPcEp"
FABRIC_FCOE_VSAN_PC = "fabricFcoeVsanPc"
FABRIC_FCOE_VSAN_PORT_EP = "fabricFcoeVsanPortEp"
FABRIC_IF = "fabricIf"
FABRIC_LAN_ACCESS_MGR = "fabricLanAccessMgr"
FABRIC_LAN_CLOUD = "fabricLanCloud"
FABRIC_LAN_CLOUD_FSM = "fabricLanCloudFsm"
FABRIC_LAN_CLOUD_FSM_STAGE = "fabricLanCloudFsmStage"
FABRIC_LAN_CLOUD_FSM_TASK = "fabricLanCloudFsmTask"
FABRIC_LAN_MON_CLOUD = "fabricLanMonCloud"
FABRIC_LAN_PIN_GROUP = "fabricLanPinGroup"
FABRIC_LAN_PIN_TARGET = "fabricLanPinTarget"
FABRIC_LAST_ACKED_SLOT = "fabricLastAckedSlot"
FABRIC_LOCALE = "fabricLocale"
FABRIC_MULTICAST_POLICY = "fabricMulticastPolicy"
FABRIC_NET_GROUP = "fabricNetGroup"
FABRIC_ORG_VLAN_POLICY = "fabricOrgVlanPolicy"
FABRIC_PATH = "fabricPath"
FABRIC_PATH_CONN = "fabricPathConn"
FABRIC_PATH_EP = "fabricPathEp"
FABRIC_POOLABLE_VLAN = "fabricPoolableVlan"
FABRIC_POOLED_VLAN = "fabricPooledVlan"
FABRIC_SAN_CLOUD = "fabricSanCloud"
FABRIC_SAN_CLOUD_FSM = "fabricSanCloudFsm"
FABRIC_SAN_CLOUD_FSM_STAGE = "fabricSanCloudFsmStage"
FABRIC_SAN_CLOUD_FSM_TASK = "fabricSanCloudFsmTask"
FABRIC_SAN_MON_CLOUD = "fabricSanMonCloud"
FABRIC_SAN_PIN_GROUP = "fabricSanPinGroup"
FABRIC_SAN_PIN_TARGET = "fabricSanPinTarget"
FABRIC_SW_CH_PH_EP = "fabricSwChPhEp"
FABRIC_VCON = "fabricVCon"
FABRIC_VCON_PROFILE = "fabricVConProfile"
FABRIC_VLAN = "fabricVlan"
FABRIC_VLAN_EP = "fabricVlanEp"
FABRIC_VLAN_GROUP_REQ = "fabricVlanGroupReq"
FABRIC_VLAN_PERMIT = "fabricVlanPermit"
FABRIC_VLAN_REQ = "fabricVlanReq"
FABRIC_VSAN = "fabricVsan"
FABRIC_VSAN_EP = "fabricVsanEp"
FABRIC_VSAN_MEMBERSHIP = "fabricVsanMembership"
FABRIC_ZONE_ID_UNIVERSE = "fabricZoneIdUniverse"
FAULT_ACK_FAULT = "faultAckFault"
FAULT_ACK_FAULTS = "faultAckFaults"
FAULT_AFFECTED_CLASS = "faultAffectedClass"
FAULT_HOLDER = "faultHolder"
FAULT_INST = "faultInst"
FAULT_LOCAL_TYPED_HOLDER = "faultLocalTypedHolder"
FAULT_POLICY = "faultPolicy"
FAULT_RESOLVE_FAULT = "faultResolveFault"
FAULT_SUPPRESS_POLICY = "faultSuppressPolicy"
FAULT_SUPPRESS_POLICY_ITEM = "faultSuppressPolicyItem"
FAULT_SUPPRESS_TASK = "faultSuppressTask"
FC_ERR_STATS = "fcErrStats"
FC_ERR_STATS_HIST = "fcErrStatsHist"
FC_NIC_IF_CONFIG = "fcNicIfConfig"
FC_PIO = "fcPIo"
FC_PIO_FSM = "fcPIoFsm"
FC_PIO_FSM_STAGE = "fcPIoFsmStage"
FC_STATS = "fcStats"
FC_STATS_HIST = "fcStatsHist"
FC_SW_IF_CONFIG = "fcSwIfConfig"
FCPOOL_ADDR = "fcpoolAddr"
FCPOOL_BLOCK = "fcpoolBlock"
FCPOOL_BOOT_TARGET = "fcpoolBootTarget"
FCPOOL_FORMAT = "fcpoolFormat"
FCPOOL_INITIATOR = "fcpoolInitiator"
FCPOOL_INITIATOR_EP = "fcpoolInitiatorEp"
FCPOOL_INITIATORS = "fcpoolInitiators"
FCPOOL_POOLABLE = "fcpoolPoolable"
FCPOOL_UNIVERSE = "fcpoolUniverse"
FILTER_FILTER = "filterFilter"
FIRMWARE_ACK = "firmwareAck"
FIRMWARE_BLADE = "firmwareBlade"
FIRMWARE_BOOT_DEFINITION = "firmwareBootDefinition"
FIRMWARE_BOOT_UNIT = "firmwareBootUnit"
FIRMWARE_BUNDLE_INFO = "firmwareBundleInfo"
FIRMWARE_BUNDLE_INFO_DIGEST = "firmwareBundleInfoDigest"
FIRMWARE_BUNDLE_TYPE = "firmwareBundleType"
FIRMWARE_BUNDLE_TYPE_CAP_PROVIDER = "firmwareBundleTypeCapProvider"
FIRMWARE_CATALOG_PACK = "firmwareCatalogPack"
FIRMWARE_CATALOGUE = "firmwareCatalogue"
FIRMWARE_COMP_SOURCE = "firmwareCompSource"
FIRMWARE_COMP_TARGET = "firmwareCompTarget"
FIRMWARE_COMPUTE_HOST_PACK = "firmwareComputeHostPack"
FIRMWARE_COMPUTE_MGMT_PACK = "firmwareComputeMgmtPack"
FIRMWARE_DEPENDENCY = "firmwareDependency"
FIRMWARE_DIST_IMAGE = "firmwareDistImage"
FIRMWARE_DISTRIBUTABLE = "firmwareDistributable"
FIRMWARE_DISTRIBUTABLE_FSM = "firmwareDistributableFsm"
FIRMWARE_DISTRIBUTABLE_FSM_STAGE = "firmwareDistributableFsmStage"
FIRMWARE_DISTRIBUTABLE_FSM_TASK = "firmwareDistributableFsmTask"
FIRMWARE_DOWNLOADER = "firmwareDownloader"
FIRMWARE_DOWNLOADER_FSM = "firmwareDownloaderFsm"
FIRMWARE_DOWNLOADER_FSM_STAGE = "firmwareDownloaderFsmStage"
FIRMWARE_DOWNLOADER_FSM_TASK = "firmwareDownloaderFsmTask"
FIRMWARE_HOST = "firmwareHost"
FIRMWARE_HOST_PACK_MOD_IMPACT = "firmwareHostPackModImpact"
FIRMWARE_IMAGE = "firmwareImage"
FIRMWARE_IMAGE_FSM = "firmwareImageFsm"
FIRMWARE_IMAGE_FSM_STAGE = "firmwareImageFsmStage"
FIRMWARE_IMAGE_FSM_TASK = "firmwareImageFsmTask"
FIRMWARE_INFRA = "firmwareInfra"
FIRMWARE_INFRA_PACK = "firmwareInfraPack"
FIRMWARE_INSTALL_IMPACT = "firmwareInstallImpact"
FIRMWARE_INSTALLABLE = "firmwareInstallable"
FIRMWARE_PACK_ITEM = "firmwarePackItem"
FIRMWARE_RACK = "firmwareRack"
FIRMWARE_RUNNING = "firmwareRunning"
FIRMWARE_SPEC = "firmwareSpec"
FIRMWARE_STATUS = "firmwareStatus"
FIRMWARE_SYSTEM = "firmwareSystem"
FIRMWARE_SYSTEM_COMP_CHECK_RESULT = "firmwareSystemCompCheckResult"
FIRMWARE_SYSTEM_FSM = "firmwareSystemFsm"
FIRMWARE_SYSTEM_FSM_STAGE = "firmwareSystemFsmStage"
FIRMWARE_SYSTEM_FSM_TASK = "firmwareSystemFsmTask"
FIRMWARE_TYPE = "firmwareType"
FIRMWARE_UPDATABLE = "firmwareUpdatable"
FIRMWARE_UPGRADE_CONSTRAINT = "firmwareUpgradeConstraint"
FIRMWARE_UPGRADE_DETAIL = "firmwareUpgradeDetail"
FIRMWARE_UPGRADE_INFO = "firmwareUpgradeInfo"
FLOWCTRL_DEFINITION = "flowctrlDefinition"
FLOWCTRL_ITEM = "flowctrlItem"
FSM_DEBUG_ACTION = "fsmDebugAction"
FSM_STATUS = "fsmStatus"
GE_FILTER = "geFilter"
GT_FILTER = "gtFilter"
HOSTIMG_POLICY = "hostimgPolicy"
HOSTIMG_TARGET = "hostimgTarget"
ID = "id"
ID_SET = "idSet"
IDENT_IDENT_CTX = "identIdentCtx"
IDENT_IDENT_REQUEST = "identIdentRequest"
IDENT_IDENT_REQUEST_FSM = "identIdentRequestFsm"
IDENT_IDENT_REQUEST_FSM_STAGE = "identIdentRequestFsmStage"
IDENT_IDENT_REQUEST_FSM_TASK = "identIdentRequestFsmTask"
IDENT_META_SYSTEM = "identMetaSystem"
IDENT_META_SYSTEM_FSM = "identMetaSystemFsm"
IDENT_META_SYSTEM_FSM_STAGE = "identMetaSystemFsmStage"
IDENT_META_SYSTEM_FSM_TASK = "identMetaSystemFsmTask"
IDENT_META_VERSE = "identMetaVerse"
IDENT_REQUEST_EP = "identRequestEp"
IDENT_SYS_INFO = "identSysInfo"
IMGPROV_POLICY = "imgprovPolicy"
IMGPROV_TARGET = "imgprovTarget"
IMGSEC_KEY = "imgsecKey"
IMGSEC_POLICY = "imgsecPolicy"
INITIATOR_FC_INITIATOR_EP = "initiatorFcInitiatorEp"
INITIATOR_GROUP_EP = "initiatorGroupEp"
INITIATOR_ISCSI_INITIATOR_EP = "initiatorIScsiInitiatorEp"
INITIATOR_LUN_EP = "initiatorLunEp"
INITIATOR_MEMBER_EP = "initiatorMemberEp"
INITIATOR_REQUESTOR_EP = "initiatorRequestorEp"
INITIATOR_REQUESTOR_GRP_EP = "initiatorRequestorGrpEp"
INITIATOR_STORE_EP = "initiatorStoreEp"
INITIATOR_UNIT_EP = "initiatorUnitEp"
IP_IPV4_DNS = "ipIPv4Dns"
IP_IP_V4_STATIC_ADDR = "ipIpV4StaticAddr"
IP_SERVICE_IF = "ipServiceIf"
IPPOOL_ADDR = "ippoolAddr"
IPPOOL_BLOCK = "ippoolBlock"
IPPOOL_POOL = "ippoolPool"
IPPOOL_POOLABLE = "ippoolPoolable"
IPPOOL_POOLED = "ippoolPooled"
IPPOOL_UNIVERSE = "ippoolUniverse"
IQNPOOL_ADDR = "iqnpoolAddr"
IQNPOOL_BLOCK = "iqnpoolBlock"
IQNPOOL_FORMAT = "iqnpoolFormat"
IQNPOOL_POOL = "iqnpoolPool"
IQNPOOL_POOLABLE = "iqnpoolPoolable"
IQNPOOL_POOLED = "iqnpoolPooled"
IQNPOOL_UNIVERSE = "iqnpoolUniverse"
ISCSI_AUTH_PROFILE = "iscsiAuthProfile"
ISCSI_PROTOCOL_PROFILE = "iscsiProtocolProfile"
LE_FILTER = "leFilter"
LICENSE_CONTENTS = "licenseContents"
LICENSE_DOWNLOADER = "licenseDownloader"
LICENSE_DOWNLOADER_FSM = "licenseDownloaderFsm"
LICENSE_DOWNLOADER_FSM_STAGE = "licenseDownloaderFsmStage"
LICENSE_DOWNLOADER_FSM_TASK = "licenseDownloaderFsmTask"
LICENSE_EP = "licenseEp"
LICENSE_FEATURE = "licenseFeature"
LICENSE_FEATURE_CAP_PROVIDER = "licenseFeatureCapProvider"
LICENSE_FEATURE_LINE = "licenseFeatureLine"
LICENSE_FILE = "licenseFile"
LICENSE_FILE_FSM = "licenseFileFsm"
LICENSE_FILE_FSM_STAGE = "licenseFileFsmStage"
LICENSE_FILE_FSM_TASK = "licenseFileFsmTask"
LICENSE_INSTANCE = "licenseInstance"
LICENSE_INSTANCE_FSM = "licenseInstanceFsm"
LICENSE_INSTANCE_FSM_STAGE = "licenseInstanceFsmStage"
LICENSE_INSTANCE_FSM_TASK = "licenseInstanceFsmTask"
LICENSE_PROP = "licenseProp"
LICENSE_SERVER_HOST_ID = "licenseServerHostId"
LICENSE_SOURCE = "licenseSource"
LICENSE_SOURCE_FILE = "licenseSourceFile"
LLDP_ACQUIRED = "lldpAcquired"
LOGGING_SYNC_OCNS = "loggingSyncOcns"
LS_AGENT_POLICY = "lsAgentPolicy"
LS_BINDING = "lsBinding"
LS_CLONE = "lsClone"
LS_FC_LOCALE = "lsFcLocale"
LS_FC_ZONE = "lsFcZone"
LS_FC_ZONE_GROUP = "lsFcZoneGroup"
LS_INSTANTIATE_NNAMED_TEMPLATE = "lsInstantiateNNamedTemplate"
LS_INSTANTIATE_NTEMPLATE = "lsInstantiateNTemplate"
LS_INSTANTIATE_TEMPLATE = "lsInstantiateTemplate"
LS_POWER = "lsPower"
LS_REQUIREMENT = "lsRequirement"
LS_RESOLVE_TEMPLATES = "lsResolveTemplates"
LS_SERVER = "lsServer"
LS_SERVER_ASSOC_CTX = "lsServerAssocCtx"
LS_SERVER_FSM = "lsServerFsm"
LS_SERVER_FSM_STAGE = "lsServerFsmStage"
LS_SERVER_FSM_TASK = "lsServerFsmTask"
LS_TEMPLATISE = "lsTemplatise"
LS_TIER = "lsTier"
LS_VCON_ASSIGN = "lsVConAssign"
LS_VERSION_BEH = "lsVersionBeh"
LS_ZONE_INITIATOR_MEMBER = "lsZoneInitiatorMember"
LS_ZONE_TARGET_MEMBER = "lsZoneTargetMember"
LSBOOT_DEF = "lsbootDef"
LSBOOT_ISCSI = "lsbootIScsi"
LSBOOT_ISCSI_IMAGE_PATH = "lsbootIScsiImagePath"
LSBOOT_LAN = "lsbootLan"
LSBOOT_LAN_IMAGE_PATH = "lsbootLanImagePath"
LSBOOT_LOCAL_STORAGE = "lsbootLocalStorage"
LSBOOT_POLICY = "lsbootPolicy"
LSBOOT_SAN_IMAGE = "lsbootSanImage"
LSBOOT_SAN_IMAGE_PATH = "lsbootSanImagePath"
LSBOOT_STORAGE = "lsbootStorage"
LSBOOT_VIRTUAL_MEDIA = "lsbootVirtualMedia"
LSMAINT_ACK = "lsmaintAck"
LSMAINT_MAINT_POLICY = "lsmaintMaintPolicy"
LT_FILTER = "ltFilter"
MACPOOL_ADDR = "macpoolAddr"
MACPOOL_BLOCK = "macpoolBlock"
MACPOOL_FORMAT = "macpoolFormat"
MACPOOL_POOL = "macpoolPool"
MACPOOL_POOLABLE = "macpoolPoolable"
MACPOOL_POOLED = "macpoolPooled"
MACPOOL_UNIVERSE = "macpoolUniverse"
MEMORY_ARRAY = "memoryArray"
MEMORY_ARRAY_ENV_STATS = "memoryArrayEnvStats"
MEMORY_ARRAY_ENV_STATS_HIST = "memoryArrayEnvStatsHist"
MEMORY_BUFFER_UNIT = "memoryBufferUnit"
MEMORY_BUFFER_UNIT_ENV_STATS = "memoryBufferUnitEnvStats"
MEMORY_BUFFER_UNIT_ENV_STATS_HIST = "memoryBufferUnitEnvStatsHist"
MEMORY_ERROR_STATS = "memoryErrorStats"
MEMORY_QUAL = "memoryQual"
MEMORY_RUNTIME = "memoryRuntime"
MEMORY_RUNTIME_HIST = "memoryRuntimeHist"
MEMORY_UNIT = "memoryUnit"
MEMORY_UNIT_ENV_STATS = "memoryUnitEnvStats"
MEMORY_UNIT_ENV_STATS_HIST = "memoryUnitEnvStatsHist"
METHOD_VESSEL = "methodVessel"
MGMT_ACCESS_POLICY = "mgmtAccessPolicy"
MGMT_ACCESS_POLICY_ITEM = "mgmtAccessPolicyItem"
MGMT_ACCESS_PORT = "mgmtAccessPort"
MGMT_BACKUP = "mgmtBackup"
MGMT_BACKUP_FSM = "mgmtBackupFsm"
MGMT_BACKUP_FSM_STAGE = "mgmtBackupFsmStage"
MGMT_BACKUP_FSM_TASK = "mgmtBackupFsmTask"
MGMT_BACKUP_POLICY = "mgmtBackupPolicy"
MGMT_BACKUP_POLICY_FSM = "mgmtBackupPolicyFsm"
MGMT_BACKUP_POLICY_FSM_STAGE = "mgmtBackupPolicyFsmStage"
MGMT_CFG_EXPORT_POLICY = "mgmtCfgExportPolicy"
MGMT_CFG_EXPORT_POLICY_FSM = "mgmtCfgExportPolicyFsm"
MGMT_CFG_EXPORT_POLICY_FSM_STAGE = "mgmtCfgExportPolicyFsmStage"
MGMT_CONNECTION = "mgmtConnection"
MGMT_CONTROLLER = "mgmtController"
MGMT_CONTROLLER_FSM = "mgmtControllerFsm"
MGMT_CONTROLLER_FSM_STAGE = "mgmtControllerFsmStage"
MGMT_CONTROLLER_FSM_TASK = "mgmtControllerFsmTask"
MGMT_ENTITY = "mgmtEntity"
MGMT_EXPORT_POLICY_FSM = "mgmtExportPolicyFsm"
MGMT_EXPORT_POLICY_FSM_STAGE = "mgmtExportPolicyFsmStage"
MGMT_EXPORT_POLICY_FSM_TASK = "mgmtExportPolicyFsmTask"
MGMT_IF = "mgmtIf"
MGMT_IF_FSM = "mgmtIfFsm"
MGMT_IF_FSM_STAGE = "mgmtIfFsmStage"
MGMT_IF_FSM_TASK = "mgmtIfFsmTask"
MGMT_IMPORTER = "mgmtImporter"
MGMT_IMPORTER_FSM = "mgmtImporterFsm"
MGMT_IMPORTER_FSM_STAGE = "mgmtImporterFsmStage"
MGMT_IMPORTER_FSM_TASK = "mgmtImporterFsmTask"
MGMT_INT_AUTH_POLICY = "mgmtIntAuthPolicy"
MGMT_PMON_ENTRY = "mgmtPmonEntry"
MGMT_RESOLVE_BACKUP_FILENAMES = "mgmtResolveBackupFilenames"
NE_FILTER = "neFilter"
NETWORK_ELEMENT = "networkElement"
NETWORK_IF_STATS = "networkIfStats"
NFS_EP = "nfsEp"
NFS_MOUNT_DEF = "nfsMountDef"
NFS_MOUNT_DEF_FSM = "nfsMountDefFsm"
NFS_MOUNT_DEF_FSM_STAGE = "nfsMountDefFsmStage"
NFS_MOUNT_DEF_FSM_TASK = "nfsMountDefFsmTask"
NFS_MOUNT_INST = "nfsMountInst"
NFS_MOUNT_INST_FSM = "nfsMountInstFsm"
NFS_MOUNT_INST_FSM_STAGE = "nfsMountInstFsmStage"
NFS_MOUNT_INST_FSM_TASK = "nfsMountInstFsmTask"
NOT_FILTER = "notFilter"
NWCTRL_DEFINITION = "nwctrlDefinition"
OBSERVE_FILTER = "observeFilter"
OBSERVE_OBSERVED = "observeObserved"
OBSERVE_OBSERVED_CONT = "observeObservedCont"
OBSERVE_OBSERVED_FSM = "observeObservedFsm"
OBSERVE_OBSERVED_FSM_STAGE = "observeObservedFsmStage"
OBSERVE_OBSERVED_FSM_TASK = "observeObservedFsmTask"
OR_FILTER = "orFilter"
ORG_ORG = "orgOrg"
ORG_RESOLVE_ELEMENTS = "orgResolveElements"
ORG_RESOLVE_LOGICAL_PARENTS = "orgResolveLogicalParents"
OS_AGENT = "osAgent"
OS_INSTANCE = "osInstance"
PAIR = "pair"
PCI_EQUIP_SLOT = "pciEquipSlot"
PCI_UNIT = "pciUnit"
PKI_CERT_REQ = "pkiCertReq"
PKI_EP = "pkiEp"
PKI_EP_FSM = "pkiEpFsm"
PKI_EP_FSM_STAGE = "pkiEpFsmStage"
PKI_EP_FSM_TASK = "pkiEpFsmTask"
PKI_KEY_RING = "pkiKeyRing"
PKI_TP = "pkiTP"
POLICY_COMMUNICATION = "policyCommunication"
POLICY_CONFIG_BACKUP = "policyConfigBackup"
POLICY_CONTROL_EP = "policyControlEp"
POLICY_CONTROL_EP_FSM = "policyControlEpFsm"
POLICY_CONTROL_EP_FSM_STAGE = "policyControlEpFsmStage"
POLICY_CONTROL_EP_FSM_TASK = "policyControlEpFsmTask"
POLICY_CONTROLLED_INSTANCE = "policyControlledInstance"
POLICY_CONTROLLED_TYPE = "policyControlledType"
POLICY_DATE_TIME = "policyDateTime"
POLICY_DIGEST = "policyDigest"
POLICY_DISCOVERY = "policyDiscovery"
POLICY_DNS = "policyDns"
POLICY_FAULT = "policyFault"
POLICY_INFRA_FIRMWARE = "policyInfraFirmware"
POLICY_MEP = "policyMEp"
POLICY_MONITORING = "policyMonitoring"
POLICY_POLICY_EP = "policyPolicyEp"
POLICY_POLICY_REQUESTOR = "policyPolicyRequestor"
POLICY_POLICY_SCOPE = "policyPolicyScope"
POLICY_POLICY_SCOPE_CONT = "policyPolicyScopeCont"
POLICY_POLICY_SCOPE_CONTEXT = "policyPolicyScopeContext"
POLICY_POLICY_SCOPE_FSM = "policyPolicyScopeFsm"
POLICY_POLICY_SCOPE_FSM_STAGE = "policyPolicyScopeFsmStage"
POLICY_POLICY_SCOPE_FSM_TASK = "policyPolicyScopeFsmTask"
POLICY_POWER_MGMT = "policyPowerMgmt"
POLICY_PSU = "policyPsu"
POLICY_RESOLVE_NAMES = "policyResolveNames"
POLICY_SECURITY = "policySecurity"
POOL_RESOLVE_IN_SCOPE = "poolResolveInScope"
PORT_DOMAIN_EP = "portDomainEp"
PORT_GROUP = "portGroup"
PORT_PIO_FSM = "portPIoFsm"
PORT_PIO_FSM_STAGE = "portPIoFsmStage"
PORT_PIO_FSM_TASK = "portPIoFsmTask"
PORT_TRUST_MODE = "portTrustMode"
POWER_BUDGET = "powerBudget"
POWER_CHASSIS_MEMBER = "powerChassisMember"
POWER_EP = "powerEp"
POWER_GROUP = "powerGroup"
POWER_GROUP_ADDITION_POLICY = "powerGroupAdditionPolicy"
POWER_GROUP_QUAL = "powerGroupQual"
POWER_GROUP_STATS = "powerGroupStats"
POWER_GROUP_STATS_HIST = "powerGroupStatsHist"
POWER_MGMT_POLICY = "powerMgmtPolicy"
POWER_PLACEMENT = "powerPlacement"
POWER_POLICY = "powerPolicy"
POWER_PRIO_WGHT = "powerPrioWght"
POWER_RACK_UNIT_MEMBER = "powerRackUnitMember"
PROC_DOER = "procDoer"
PROC_MANAGER = "procManager"
PROC_PRT = "procPrt"
PROC_PRT_COUNTS = "procPrtCounts"
PROC_STIMULUS_COUNTS = "procStimulusCounts"
PROC_SVC = "procSvc"
PROC_TX_COUNTS = "procTxCounts"
PROCESSOR_CORE = "processorCore"
PROCESSOR_ENV_STATS = "processorEnvStats"
PROCESSOR_ENV_STATS_HIST = "processorEnvStatsHist"
PROCESSOR_ERROR_STATS = "processorErrorStats"
PROCESSOR_QUAL = "processorQual"
PROCESSOR_RUNTIME = "processorRuntime"
PROCESSOR_RUNTIME_HIST = "processorRuntimeHist"
PROCESSOR_THREAD = "processorThread"
PROCESSOR_UNIT = "processorUnit"
PROCESSOR_UNIT_ASSOC_CTX = "processorUnitAssocCtx"
QOSCLASS_DEFINITION = "qosclassDefinition"
QOSCLASS_DEFINITION_FSM = "qosclassDefinitionFsm"
QOSCLASS_DEFINITION_FSM_STAGE = "qosclassDefinitionFsmStage"
QOSCLASS_DEFINITION_FSM_TASK = "qosclassDefinitionFsmTask"
QOSCLASS_ETH_BE = "qosclassEthBE"
QOSCLASS_ETH_CLASSIFIED = "qosclassEthClassified"
QOSCLASS_FC = "qosclassFc"
SOL_CONFIG = "solConfig"
SOL_IF = "solIf"
SOL_POLICY = "solPolicy"
STATS_CLEAR_INTERVAL = "statsClearInterval"
STATS_COLLECTION_POLICY = "statsCollectionPolicy"
STATS_COLLECTION_POLICY_FSM = "statsCollectionPolicyFsm"
STATS_COLLECTION_POLICY_FSM_STAGE = "statsCollectionPolicyFsmStage"
STATS_COLLECTION_POLICY_FSM_TASK = "statsCollectionPolicyFsmTask"
STATS_HOLDER = "statsHolder"
STATS_RESOLVE_THRESHOLD_POLICY = "statsResolveThresholdPolicy"
STATS_THR32_DEFINITION = "statsThr32Definition"
STATS_THR32_VALUE = "statsThr32Value"
STATS_THR64_DEFINITION = "statsThr64Definition"
STATS_THR64_VALUE = "statsThr64Value"
STATS_THR_FLOAT_DEFINITION = "statsThrFloatDefinition"
STATS_THR_FLOAT_VALUE = "statsThrFloatValue"
STATS_THRESHOLD_CLASS = "statsThresholdClass"
STATS_THRESHOLD_POLICY = "statsThresholdPolicy"
STORAGE_AUTH_KEY = "storageAuthKey"
STORAGE_CONNECTION_DEF = "storageConnectionDef"
STORAGE_CONNECTION_POLICY = "storageConnectionPolicy"
STORAGE_CONTROLLER = "storageController"
STORAGE_DOMAIN_EP = "storageDomainEp"
STORAGE_DRIVE = "storageDrive"
STORAGE_ENCLOSURE = "storageEnclosure"
STORAGE_EP_USER = "storageEpUser"
STORAGE_ETHER_IF = "storageEtherIf"
STORAGE_FC_IF = "storageFcIf"
STORAGE_FC_TARGET_EP = "storageFcTargetEp"
STORAGE_FC_TARGET_IF = "storageFcTargetIf"
STORAGE_ISCSI_TARGET_IF = "storageIScsiTargetIf"
STORAGE_INI_GROUP = "storageIniGroup"
STORAGE_INITIATOR = "storageInitiator"
STORAGE_ITEM = "storageItem"
STORAGE_LOCAL_DISK = "storageLocalDisk"
STORAGE_LOCAL_DISK_CONFIG_DEF = "storageLocalDiskConfigDef"
STORAGE_LOCAL_DISK_CONFIG_POLICY = "storageLocalDiskConfigPolicy"
STORAGE_LOCAL_DISK_PARTITION = "storageLocalDiskPartition"
STORAGE_LOCAL_DISK_SLOT_EP = "storageLocalDiskSlotEp"
STORAGE_LOCAL_LUN = "storageLocalLun"
STORAGE_LUN_DISK = "storageLunDisk"
STORAGE_NODE_EP = "storageNodeEp"
STORAGE_QUAL = "storageQual"
STORAGE_RAID_BATTERY = "storageRaidBattery"
STORAGE_SYSTEM = "storageSystem"
STORAGE_SYSTEM_FSM = "storageSystemFsm"
STORAGE_SYSTEM_FSM_STAGE = "storageSystemFsmStage"
STORAGE_SYSTEM_FSM_TASK = "storageSystemFsmTask"
STORAGE_VIRTUAL_DRIVE = "storageVirtualDrive"
STORAGE_VSAN_REF = "storageVsanRef"
SW_ACCESS_DOMAIN = "swAccessDomain"
SW_ACCESS_DOMAIN_FSM = "swAccessDomainFsm"
SW_ACCESS_DOMAIN_FSM_STAGE = "swAccessDomainFsmStage"
SW_ACCESS_DOMAIN_FSM_TASK = "swAccessDomainFsmTask"
SW_ACCESS_EP = "swAccessEp"
SW_CARD_ENV_STATS = "swCardEnvStats"
SW_CARD_ENV_STATS_HIST = "swCardEnvStatsHist"
SW_ENV_STATS = "swEnvStats"
SW_ENV_STATS_HIST = "swEnvStatsHist"
SW_ETH_ESTC_EP = "swEthEstcEp"
SW_ETH_ESTC_PC = "swEthEstcPc"
SW_ETH_LAN_BORDER = "swEthLanBorder"
SW_ETH_LAN_BORDER_FSM = "swEthLanBorderFsm"
SW_ETH_LAN_BORDER_FSM_STAGE = "swEthLanBorderFsmStage"
SW_ETH_LAN_BORDER_FSM_TASK = "swEthLanBorderFsmTask"
SW_ETH_LAN_EP = "swEthLanEp"
SW_ETH_LAN_MON = "swEthLanMon"
SW_ETH_LAN_PC = "swEthLanPc"
SW_ETH_MON = "swEthMon"
SW_ETH_MON_DEST_EP = "swEthMonDestEp"
SW_ETH_MON_FSM = "swEthMonFsm"
SW_ETH_MON_FSM_STAGE = "swEthMonFsmStage"
SW_ETH_MON_FSM_TASK = "swEthMonFsmTask"
SW_ETH_MON_SRC_EP = "swEthMonSrcEp"
SW_ETH_TARGET_EP = "swEthTargetEp"
SW_FABRIC_ZONE_NS = "swFabricZoneNs"
SW_FABRIC_ZONE_NS_OVERRIDE = "swFabricZoneNsOverride"
SW_FC_ESTC_EP = "swFcEstcEp"
SW_FC_MON = "swFcMon"
SW_FC_MON_DEST_EP = "swFcMonDestEp"
SW_FC_MON_FSM = "swFcMonFsm"
SW_FC_MON_FSM_STAGE = "swFcMonFsmStage"
SW_FC_MON_FSM_TASK = "swFcMonFsmTask"
SW_FC_MON_SRC_EP = "swFcMonSrcEp"
SW_FC_SAN_BORDER = "swFcSanBorder"
SW_FC_SAN_BORDER_FSM = "swFcSanBorderFsm"
SW_FC_SAN_BORDER_FSM_STAGE = "swFcSanBorderFsmStage"
SW_FC_SAN_BORDER_FSM_TASK = "swFcSanBorderFsmTask"
SW_FC_SAN_EP = "swFcSanEp"
SW_FC_SAN_MON = "swFcSanMon"
SW_FC_SAN_PC = "swFcSanPc"
SW_FC_SERVER_ZONE_GROUP = "swFcServerZoneGroup"
SW_FC_ZONE = "swFcZone"
SW_FC_ZONE_SET = "swFcZoneSet"
SW_FCOE_ESTC_EP = "swFcoeEstcEp"
SW_FCOE_SAN_EP = "swFcoeSanEp"
SW_FCOE_SAN_PC = "swFcoeSanPc"
SW_PHYS = "swPhys"
SW_PHYS_ETHER_EP = "swPhysEtherEp"
SW_PHYS_FC_EP = "swPhysFcEp"
SW_PHYS_FSM = "swPhysFsm"
SW_PHYS_FSM_STAGE = "swPhysFsmStage"
SW_PHYS_FSM_TASK = "swPhysFsmTask"
SW_SYSTEM_STATS = "swSystemStats"
SW_SYSTEM_STATS_HIST = "swSystemStatsHist"
SW_ULAN = "swUlan"
SW_UTILITY_DOMAIN = "swUtilityDomain"
SW_UTILITY_DOMAIN_FSM = "swUtilityDomainFsm"
SW_UTILITY_DOMAIN_FSM_STAGE = "swUtilityDomainFsmStage"
SW_UTILITY_DOMAIN_FSM_TASK = "swUtilityDomainFsmTask"
SW_VLAN = "swVlan"
SW_VLAN_GROUP = "swVlanGroup"
SW_VLAN_PORT_NS = "swVlanPortNs"
SW_VLAN_PORT_NS_OVERRIDE = "swVlanPortNsOverride"
SW_VLAN_REF = "swVlanRef"
SW_VSAN = "swVsan"
SW_ZONE_INITIATOR_MEMBER = "swZoneInitiatorMember"
SW_ZONE_TARGET_MEMBER = "swZoneTargetMember"
SWAT_ACTION = "swatAction"
SWAT_CONDITION = "swatCondition"
SWAT_EXAMPLE = "swatExample"
SWAT_GETSTATS = "swatGetstats"
SWAT_INJECT = "swatInject"
SWAT_INJECTION = "swatInjection"
SWAT_RESULTSTATS = "swatResultstats"
SWAT_TARGET = "swatTarget"
SWAT_TRIGGER = "swatTrigger"
SYNTHETIC_DIRECTORY = "syntheticDirectory"
SYNTHETIC_FSOBJ_INVENTORY = "syntheticFSObjInventory"
SYNTHETIC_FSOBJ_INVENTORY_B = "syntheticFSObjInventoryB"
SYNTHETIC_FILE = "syntheticFile"
SYNTHETIC_FILE_SYSTEM = "syntheticFileSystem"
SYNTHETIC_FS_OBJ = "syntheticFsObj"
SYNTHETIC_FS_OBJ_FSM = "syntheticFsObjFsm"
SYNTHETIC_FS_OBJ_FSM_STAGE = "syntheticFsObjFsmStage"
SYNTHETIC_FS_OBJ_FSM_TASK = "syntheticFsObjFsmTask"
SYNTHETIC_TEST_TX = "syntheticTestTx"
SYNTHETIC_TIME = "syntheticTime"
SYSDEBUG_AUTO_CORE_FILE_EXPORT_TARGET = "sysdebugAutoCoreFileExportTarget"
SYSDEBUG_AUTO_CORE_FILE_EXPORT_TARGET_FSM = "sysdebugAutoCoreFileExportTargetFsm"
SYSDEBUG_AUTO_CORE_FILE_EXPORT_TARGET_FSM_STAGE = "sysdebugAutoCoreFileExportTargetFsmStage"
SYSDEBUG_AUTO_CORE_FILE_EXPORT_TARGET_FSM_TASK = "sysdebugAutoCoreFileExportTargetFsmTask"
SYSDEBUG_BACKUP_BEHAVIOR = "sysdebugBackupBehavior"
SYSDEBUG_CORE = "sysdebugCore"
SYSDEBUG_CORE_FILE_REPOSITORY = "sysdebugCoreFileRepository"
SYSDEBUG_CORE_FSM = "sysdebugCoreFsm"
SYSDEBUG_CORE_FSM_STAGE = "sysdebugCoreFsmStage"
SYSDEBUG_CORE_FSM_TASK = "sysdebugCoreFsmTask"
SYSDEBUG_EP = "sysdebugEp"
SYSDEBUG_LOG_CONTROL_DESTINATION_FILE = "sysdebugLogControlDestinationFile"
SYSDEBUG_LOG_CONTROL_DESTINATION_SYSLOG = "sysdebugLogControlDestinationSyslog"
SYSDEBUG_LOG_CONTROL_DOMAIN | |
Gs_bi,Ls_bi,pi,(tchans,rchans),di in zip(Gs, Ls, boneParents, boneDofs, dofSplits):
nt,nr = len(tchans),len(rchans)
if pi == -1: Gs_pi = np.array([[1,0,0,0],[0,1,0,0],[0,0,1,0]],dtype=np.float32)
else : Gs_pi = Gs[pi]
#Gs_bi = Gs_pi * Ls[bi] * Dof0 * Dof1 * ...
np.dot(Gs_pi[:,:3], Ls_bi, out=Gs_bi)
Gs_bi[:,3] += Gs_pi[:,3]
if nt: # translation DOFs
for c,v in zip(tchans, dofValues[di:di+nt]):
Gs_bi[:,3] += Gs_pi[:,ord(c)-ord('x')] * v
di += nt
if nr: # rotation DOFs
Gs_bi[:,:3] = np.dot(Gs_bi[:,:3], composeR(dofValues[di:di+nr],axes=rchans))
return Gs
def extractSkeletonDofs(Gs, Ls, boneParents, boneDofs, dofSplits, dofValues):
'''Fill in the dofValues and global rotations, given target joint positions.'''
targets = Gs[:,:,3].copy()
# debugging ... let's make sure we don't use uninitialised data!
assert(dofValues.shape == (dofSplits[-1],))
Gs[:,:,:] = float('inf')
dofValues[:] = float('inf')
numBones = len(boneParents)
boneDofCounts = dofSplits[1:] - dofSplits[:-1] # number of dofs per bone
# this "list of bones that are directly driven only by this bone" could be precomputed
# here we remove the influence of child bones with translation dofs, since we added end-of-bones points to deal with that
# we also remove the influence of child bones at the same position, since that could cause numerical problems
boneZeroChildren = [[bi for bi in np.where(boneParents == bi)[0] if boneDofs[bi][0] == '' and not np.all(Ls[bi,:,3]==0)] for bi in xrange(numBones)]
# for our skeletons, it is good enough to consider only grandchildren; but here all zero-dof descendents are added
for bi in xrange(numBones-1,-1,-1):
pi,bdc = boneParents[bi],boneDofCounts[bi]
if pi != -1 and bdc == 0: boneZeroChildren[pi].extend(boneZeroChildren[bi])
for bi,(tgt_bi,Gs_bi,Ls_bi,pi,(tchans,rchans),di,bzcs) in enumerate(zip(targets,Gs, Ls, boneParents, boneDofs, dofSplits, boneZeroChildren)):
nt,nr = len(tchans),len(rchans)
if pi == -1: Gs_pi = np.array([[1,0,0,0],[0,1,0,0],[0,0,1,0]],dtype=np.float32)
else : Gs_pi = Gs[pi]
#Gs[bi] = Gs[pi] * Ls[bi] * Dof0 * Dof1 * ...
# assume that Gs_pi is complete for now; TODO it might have an unsolved DOF, which is the rotation around the bone axis.
# we added trunnions to fix this, but it might be possible to generate the trunnions on the fly using cross products of other joints
np.dot(Gs_pi[:,:3], Ls_bi, out=Gs_bi)
Gs_bi[:,3] += Gs_pi[:,3]
if nt: # translation DOFs
for ddi,c in enumerate(tchans,start=di):
dofValues[ddi] = v = np.dot(Gs_pi[:,ord(c)-ord('x')], tgt_bi - Gs_bi[:,3])
Gs_bi[:,3] += Gs_pi[:,ord(c)-ord('x')] * v
di += nt
if nr: # rotation DOFs
numChildren = len(bzcs)
if numChildren == 0: # no way to solve the dofs, so just zero them
dofValues[di:di+nr] = 0
else:
Lt = np.zeros((numChildren, 3),dtype=np.float32)
Rt = np.zeros((numChildren, 3),dtype=np.float32)
for ci,cbi in enumerate(bzcs):
# we need to solve the R matrix from equations like: R * Ls[ck] *...* Ls[cj] * Ls[ci,:,3] = Gs[bi,:,:3].T * (Gs[ci,:,3] - Gs[bi,:,3])
# these equations are the columns of: R Lt.T = Rt.T
Lt[ci,:] = Ls[cbi,:,3]
Rt[ci,:] = np.dot(Gs_bi[:,:3].T, targets[cbi] - Gs_bi[:,3])
pbi = boneParents[cbi]
while pbi != bi:
Lt[ci,:] = np.dot(Ls[pbi,:,:3], Lt[ci,:]) + Ls[pbi,:,3]
pbi = boneParents[pbi]
rv = fitPointsAndDecomposeR(Lt, Rt, axes=rchans)
dofValues[di:di+nr] = rv[:nr]
#if nr == 2: print 'hopefully all zero',nr,rv,rv[nr:]
Gs_bi[:,:3] = np.dot(Gs_bi[:,:3], composeR(dofValues[di:di+nr],axes=rchans))
return Gs, dofValues
def composeR(rs, axes='xyz'):
'''Compose a vector of 3 radians into a 3x3 rotation matrix.
The rotation order is traditional right-to-left 'xyz'=R(z)*R(y)*R(x).
The values should be given in the same order (ie in this example: x,y,z).'''
i = ord(axes[0])-ord('x')
if len(axes) == 1: parity = 1 # single channel
else : parity = (ord(axes[1])-ord(axes[0])+3)
j,k = (i+parity)%3,(i+2*parity)%3
if ((parity%3) == 2): rs = -rs
R = np.zeros((3,3),dtype=np.float32)
if len(rs) == 1:
ci,si = math.cos(rs[0]),math.sin(rs[0])
R[i,i],R[j,i],R[k,i],R[i,j],R[j,j],R[k,j],R[i,k],R[j,k],R[k,k] = 1,0,0,0,ci,si,0,-si,ci
elif len(rs) == 2:
ci,cj,si,sj = math.cos(rs[0]),math.cos(rs[1]),math.sin(rs[0]),math.sin(rs[1])
R[i,i],R[j,i],R[k,i],R[i,j],R[j,j],R[k,j],R[i,k],R[j,k],R[k,k] = cj,0,-sj,si*sj,ci,cj*si,ci*sj,-si,cj*ci
else:
ci,cj,ck = np.cos(rs, dtype=np.float32); si,sj,sk = np.sin(rs, dtype=np.float32)
cc,cs,sc,ss = ci*ck,ci*sk,si*ck,si*sk
R[i,i],R[j,i],R[k,i],R[i,j],R[j,j],R[k,j],R[i,k],R[j,k],R[k,k] = ck*cj,sk*cj,-sj,sc*sj-cs,ss*sj+cc,cj*si,cc*sj+ss,cs*sj-sc,cj*ci
return R
def decomposeR(R, axes='xyz'):
'''Decompose a 3x3 rotation matrix into a vector of 3 radians.
The rotation order is traditional right-to-left 'xyz'=R(z)*R(y)*R(x).
The returned values will be in the order specified.'''
i = ord(axes[0])-ord('x')
if len(axes) == 1: parity = 1 # single channel
else: parity = (ord(axes[1])-ord(axes[0])+3)
j,k = (i+parity)%3,(i+2*parity)%3
cj = math.sqrt(R[i,i]*R[i,i] + R[j,i]*R[j,i])
if cj > 1e-30: ret = np.array([math.atan2(R[k,j],R[k,k]),math.atan2(-R[k,i],cj),math.atan2(R[j,i],R[i,i])],dtype=np.float32)
else: ret = np.array([math.atan2(-R[j,k],R[j,j]),math.atan2(-R[k,i],cj),0.0],dtype=np.float32)
if ((parity%3) == 2): ret = -ret
return ret #[:len(axes)]
def fitPointsAndDecomposeR(A, B, axes='xyz'):
'''Given Nx3 matrices A and B with coordinates of N corresponding points.
Solve R A.T = B.T for rotation matrix R having rotation order and degrees of freedom specified by axes.'''
R = np.dot(B.T, A) # NOT np.dot(B.T, np.linalg.pinv(A.T,rcond=0.0001))
if len(axes) == 1: # special case: minimise in 1D (otherwise the solve is unstable)
i = ord(axes[0])-ord('x')
R[i,:] = R[:,i] = 0
R[i,i] = 1
T = np.linalg.svd(R) # U,S,VT
R = np.dot(T[0],T[2])
if np.linalg.det(R) < 0: T[0][:,2] *= -1; R = np.dot(T[0],T[2])
if len(axes) == 2: # force a 2-parameter estimation of joint angles (potentially better than 3-parameter estimation & zeroing the third value)
# rewrite the matrix as the outer product of (1,sin,cos) vectors, and compose only the first singular value
i,j = ord(axes[0])-ord('x'),ord(axes[1])-ord('x')
k = (2*j+3-i)%3
svd = np.linalg.svd([[1.0,-R[k,i],R[i,i]],[-R[j,k],R[i,j],R[k,j]],[R[j,j],R[i,k],R[k,k]]])
[[_,R[k,i],R[i,i]],[R[j,k],R[i,j],R[k,j]],[R[j,j],R[i,k],R[k,k]]] = np.outer(svd[0][:,0],svd[2][0,:])*svd[1][0]
R[j,i] = 0 # this forces the third value to be 0
R[k,i]*=-1
R[j,k]*=-1
return decomposeR(R, axes)
def fitPoints(A,B, out=None):
'''Given Nx3 matrices A and B with coordinates of N corresponding points.
Solve RT A.T = B.T for rotation-translation matrix [R;T].
R (A - mean(A)).T = (B - mean(B)).T for rotation matrix R.'''
RT = out
if RT is None: RT = np.zeros((3,4),dtype = np.float32)
Bmean,Amean = np.mean(B,axis=0),np.mean(A,axis=0)
R = np.dot((B - Bmean).T, (A - Amean))
S0,S1,S2 = np.linalg.svd(R) # U,S,VT
np.dot(S0,S2,out=R)
if np.linalg.det(R) < 0: S0[:,2] *= -1; np.dot(S0,S2,out=R)
RT[:,:3] = R
RT[:,3] = (Bmean-np.dot(R,Amean))
return RT
def makeTriangles(graph):
'''Given a graph of edges (lo,hi), find all the ordered triangles.'''
gdict = {}
for lo,hi in graph: gdict[lo] = []; gdict[hi] = []
for lo,hi in graph: gdict[lo].append(hi)
tris = [[lo,mid,hi] for lo,mids in gdict.iteritems() for mid in mids for hi in gdict[mid]]
return tris
def rigidTriangles(data, threshold = 100.):
'''Given data = numFrames x numVerts x 3 animation data, compute rigid triangles.'''
print data.shape
dm, dd = makeVertsDistanceMatrix(data)
print 'dmdd',dm.shape
graph = makeGraph(dm,dd, threshold)
print 'graph',len(graph), graph[:10]
tris = makeTriangles(graph)
print 'tris',len(tris), tris[:10]
filtTris = []
for t in tris:
D = data[:,t,:] # numFrames x triVerts x 3
D0 = D[0]
dx,dy = D0[1]-D0[0],D0[2]-D0[0]
if np.dot(dx,dy)**2/(np.dot(dx,dx)*np.dot(dy,dy)) > 0.9: continue # weed out too-straight triangles
filtTris.append(t)
print 'filtTris',len(filtTris)
RTs = stabilizeGroups(data, filtTris)
return filtTris, RTs
def stabilizeAssignment(data, assignment):
'''Given data = numFrames x numVerts x 3 animation data and group label per vertex,
compute stabilizing RTs = numGroups x numFrames x 3 x 4 (to the first frame).'''
groups = [np.where(assignment == gi)[0] for gi in xrange(max(assignment)+1)]
return stabilizeGroups(data, groups)
def stabilizeGroups(data, groups):
'''Given data = numFrames x numVerts x 3 animation data and list of groups of vertices,
compute stabilizing RTs = numGroups x numFrames x 3 x 4 (to the first frame).'''
numGroups = len(groups)
numFrames = data.shape[0]
RTs = np.zeros((numGroups,numFrames,3,4), dtype=np.float32)
for group,RT in zip(groups,RTs):
D = data[:,group,:]
for r,d in zip(RT,D): fitPoints(d, D[0], out=r)
return RTs
def assignmentResidual(data, RTs, thresholdDistance):
'''Given data = numFrames x numVerts x 3 animation data and stabilizing RTs = numTris x numFrames x 3 x 4
compute the reconstruction residual for assigning each vertex to each of the triangles.'''
numTris = RTs.shape[0]
numVerts = data.shape[1]
res = np.zeros((numTris,numVerts),dtype=np.float32)
for ti,RT in enumerate(RTs):
alignData = applyRT_list(RT, data)
# calculate the variance of each point
res2 = np.mean(np.sum((alignData[0] - alignData)**2,axis=2,dtype=np.float32),axis=0,dtype=np.float32)
np.clip(res2,0,thresholdDistance,out=res[ti])
return res
def bestTriangle(res, resids):
bestImprovement,bestIndex = 0,-1
for index,res2 in enumerate(resids):
replace = np.where(res > res2)[0]
improvement = np.sum(res[replace] - res2[replace])
if improvement > bestImprovement: bestImprovement,bestIndex = improvement,index
return bestImprovement/len(res),bestIndex
def assignAndStabilize(data, RTs, thresholdDistance):
'''Given data = numFrames x numVerts x 3 animation data and stabilizing RTs = numGroups x numFrames x 3 x 4
assign each data point to one of the triangles and compute the minimum reconstruction residual.
Returns the assignment, the residuals, and the stabilized data points (to the first frame).'''
numVerts = data.shape[1]
res = np.ones(numVerts,dtype=np.float32)*thresholdDistance
assignment = -np.ones(numVerts,dtype=np.int32)
stableData = np.zeros_like(data)
for gi,RT in enumerate(RTs):
alignData = applyRT_list(RT, data)
# calculate the variance of each point
res2 = np.mean(np.sum((alignData[0] - alignData)**2,axis=2,dtype=np.float32),axis=0,dtype=np.float32)
replace = np.where(res2 < res)[0]
res[replace] = res2[replace]
stableData[:,replace,:] = alignData[:,replace,:]
assignment[replace] = gi
return assignment, res, stableData
def unstabilize(stableData, RTs):
'''Given stableData = numGroups x 3 animation data and stabilizing RTs = numGroups x numFrames x 3 x 4
Returns the data = numFrames x numGroups x 3, animated (undoing the stabilizing transform).'''
numGroups = stableData.shape[0]
numFrames = RTs.shape[1]
data = np.zeros((numFrames,numGroups,3),dtype=np.float32)
for gi,(RT,sd) in enumerate(zip(RTs,stableData)):
data[:,gi,:] = unapplyRT_list(RT,sd.reshape(1,-1)).reshape(-1,3)
return data
def invert_matrix_array(RTs):
ret = np.zeros_like(RTs)
ret[:,:3,:3] = np.transpose(RTs[:,:3,:3],(0,2,1))
for rti,rto in zip(RTs,ret):
rto[:,3] = -np.dot(rto[:3,:3],rti[:,3])
return ret
def transform_pair_residual(RT1, RT2):
numFrames = RT1.shape[0]
R1T = np.transpose(RT1[:,:,:3],axes=(0,2,1))
R2T = np.transpose(RT2[:,:,:3],axes=(0,2,1))
T1 = RT1[:,:,3]
T2 = RT2[:,:,3]
A = np.zeros((numFrames*3,3),dtype=np.float32)
B = np.zeros((numFrames*3),dtype=np.float32)
A[:] = (R1T - R2T).reshape(-1,3)
for b,r1,t1,r2,t2 in zip(B.reshape(-1,3),R1T,T1,R2T,T2): b[:] = np.dot(r1,t1)-np.dot(r2,t2)
O,res,_,_ = np.linalg.lstsq(A, B, rcond=0.0001)
res = np.mean((B-np.dot(A,O))**2) # why isn't res this?
O = np.dot(RT1[0,:,:3],O)+RT1[0,:,3]
return res,O
def sharedStablePoints(RTs, threshold=float('inf')):
'''Given stabilizing RTs = numGroups x numFrames x 3 x 4, look for pairs of groups (g1,g2) that have a common stable point.
RTs[g1,fi,:,:3] * xi + RTs[g1,fi,:,3] = O
RTs[g2,fi,:,:3] * xi + RTs[g2,fi,:,3] = O
(RTs[g2,fi,:,:3].T - RTs[g1,fi,:,:3].T) . O = RTs[g2,fi,:,:3].T . RTs[g2,fi,:,3] - RTs[g1,fi,:,:3].T . RTs[g1,fi,:,3]
Return a list of group pairs and stable points.'''
numGroups = RTs.shape[0]
ret = []
for (g1,g2) in ((g1,g2) for g1 in xrange(numGroups) for g2 in xrange(numGroups)):
if g1 | |
ASSET_LOG_LOST : T("Lost"),
ASSET_LOG_STOLEN : T("Stolen"),
ASSET_LOG_DESTROY : T("Destroyed"),
}
asset_condition_opts = {1:T("Good Condition"),
2:T("Minor Damage"),
3:T("Major Damage"),
4:T("Un-Repairable"),
5:T("Needs Maintenance"),
}
if auth.permission.format == "html":
# T isn't JSON serializable
site_types = auth.org_site_types
for key in site_types.keys():
site_types[key] = str(site_types[key])
site_types = json.dumps(site_types)
script = '''
S3OptionsFilter({
'triggerName':'organisation_id',
'targetName':'site_id',
'lookupPrefix':'org',
'lookupResource':'site',
'lookupField':'site_id',
'fncRepresent': function(record,PrepResult){
var InstanceTypeNice=%(instance_type_nice)s
return record.name+" ("+InstanceTypeNice[record.instance_type]+")"
}})''' % dict(instance_type_nice = site_types)
else:
script = None
tablename = "asset_log"
define_table(tablename,
asset_id(),
Field("status", "integer",
label = T("Status"),
requires = IS_IN_SET(asset_log_status_opts),
represent = lambda opt: \
asset_log_status_opts.get(opt, UNKNOWN_OPT)
),
s3_datetime("datetime",
default="now",
empty=False,
represent="date",
),
s3_datetime("datetime_until",
label = T("Date Until"),
represent="date",
),
person_id(label = T("Assigned To")),
Field("check_in_to_person", "boolean",
#label = T("Mobile"), # Relabel?
label = T("Track with this Person?"),
comment = DIV(_class="tooltip",
#_title="%s|%s" % (T("Mobile"),
_title="%s|%s" % (T("Track with this Person?"),
T("If selected, then this Asset's Location will be updated whenever the Person's Location is updated."))),
readable = False,
writable = False),
# The Organisation to whom the loan is made
organisation_id(
readable = False,
writable = False,
widget = None
),
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
super_link("site_id", "org_site",
label = org_site_label,
#filterby = "site_id",
#filter_opts = auth.permitted_facilities(redirect_on_error=False),
instance_types = auth.org_site_types,
updateable = True,
not_filterby = "obsolete",
not_filter_opts = [True],
#default = user.site_id if is_logged_in() else None,
readable = True,
writable = True,
empty = False,
represent = self.org_site_represent,
#widget = S3SiteAutocompleteWidget(),
script = script,
),
self.org_room_id(),
#location_id(),
Field("cancel", "boolean",
default = False,
label = T("Cancel Log Entry"),
represent = s3_yes_no_represent,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Cancel Log Entry"),
T("'Cancel' will indicate an asset log entry did not occur")))
),
Field("cond", "integer", # condition is a MySQL reserved word
requires = IS_IN_SET(asset_condition_opts,
zero = "%s..." % T("Please select")),
represent = lambda opt: \
asset_condition_opts.get(opt, UNKNOWN_OPT),
label = T("Condition")),
person_id("by_person_id",
label = T("Assigned By"), # This can either be the Asset controller if signed-out from the store
default = auth.s3_logged_in_person(), # or the previous owner if passed on directly (e.g. to successor in their post)
comment = self.pr_person_comment(child="by_person_id"),
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
ADD_ASSIGN = T("New Entry in Asset Log")
crud_strings[tablename] = Storage(
label_create = ADD_ASSIGN,
title_display = T("Asset Log Details"),
title_list = T("Asset Log"),
title_update = T("Edit Asset Log Entry"),
label_list_button = T("Asset Log"),
label_delete_button = T("Delete Asset Log Entry"),
msg_record_created = T("Entry added to Asset Log"),
msg_record_modified = T("Asset Log Entry updated"),
msg_record_deleted = T("Asset Log Entry deleted"),
msg_list_empty = T("Asset Log Empty"))
# Resource configuration
configure(tablename,
listadd = False,
list_fields = ["id",
"datetime",
"status",
"datetime_until",
"organisation_id",
"site_id",
"room_id",
"person_id",
#"location_id",
"cancel",
"cond",
"comments",
],
onaccept = self.asset_log_onaccept,
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict(asset_asset_id = asset_id,
asset_represent = self.asset_represent,
)
# -------------------------------------------------------------------------
@staticmethod
def defaults():
""" Return safe defaults for names in case the model is disabled """
asset_id = S3ReusableField("asset_id", "integer",
writable=False,
readable=False)
return dict(asset_asset_id=asset_id)
# -------------------------------------------------------------------------
@staticmethod
def asset_represent(id, row=None):
"""
Represent an Asset
"""
if row:
id = row.id
elif not id:
return current.messages["NONE"]
db = current.db
table = db.asset_asset
itable = db.supply_item
btable = db.supply_brand
query = (table.id == id) & \
(itable.id == table.item_id)
r = db(query).select(table.number,
itable.name,
btable.name,
left = btable.on(itable.brand_id == btable.id),
limitby=(0, 1)).first()
try:
represent = "%s (%s" % (r.asset_asset.number,
r.supply_item.name)
if r.supply_brand.name:
represent = "%s, %s)" % (represent,
r.supply_brand.name)
else:
represent = "%s)" % represent
except:
represent = current.messages.UNKNOWN_OPT
return represent
# -------------------------------------------------------------------------
@staticmethod
def asset_onaccept(form):
"""
After DB I/O
"""
if current.response.s3.bulk:
# Import or Sync
return
db = current.db
atable = db.asset_asset
form_vars = form.vars
kit = form_vars.get("kit", None)
site_id = form_vars.get("site_id", None)
if site_id:
stable = db.org_site
asset_id = form_vars.id
# Set the Base Location
location_id = db(stable.site_id == site_id).select(stable.location_id,
limitby=(0, 1)
).first().location_id
tracker = S3Tracker()
asset_tracker = tracker(atable, asset_id)
asset_tracker.set_base_location(location_id)
if kit:
# Also populate location_id field in component items
aitable = db.asset_item
db(aitable.asset_id == asset_id).update(location_id = location_id)
# Add a log entry for this
ltable = db.asset_log
ltable.insert(asset_id = asset_id,
status = ASSET_LOG_SET_BASE,
organisation_id = form_vars.get("organisation_id", None),
site_id = site_id,
cond = 1,
)
if kit:
# Empty any inappropriate fields
db(atable.id == asset_id).update(supplier_org_id = None,
purchase_date = None,
purchase_price = None,
purchase_currency = None,
)
else:
# Delete any component items
aitable = db.asset_item
ids = db(aitable.asset_id == asset_id).select(aitable.id).as_list()
if ids:
resource = current.s3db.resource("asset_item", id=ids)
resource.delete()
return
# -------------------------------------------------------------------------
@staticmethod
def asset_log_onaccept(form):
"""
After DB I/O
"""
request = current.request
get_vars = request.get_vars
status = get_vars.get("status", None)
if not status:
if not current.response.s3.asset_import:
# e.g. Record merger or Sync
return
# Import
db = current.db
form_vars = form.vars
asset_id = form_vars.asset_id
status = int(form_vars.status)
if status == ASSET_LOG_ASSIGN:
# Only type supported right now
# @ToDo: Support more types
type == "person"
new = True
else:
# Interactive
form_vars = form.vars
status = int(form_vars.status or status)
db = current.db
ltable = db.asset_log
row = db(ltable.id == form_vars.id).select(ltable.asset_id,
limitby=(0, 1)
).first()
try:
asset_id = row.asset_id
except:
return
current_log = asset_get_current_log(asset_id)
type = get_vars.get("type", None)
log_time = current_log.datetime
current_time = form_vars.get("datetime", None).replace(tzinfo=None)
new = log_time <= current_time
if new:
# This is a current assignment
atable = db.asset_asset
aitable = db.asset_item
tracker = S3Tracker()
asset_tracker = tracker(atable, asset_id)
if status == ASSET_LOG_SET_BASE:
# Set Base Location
site_id = form_vars.get("site_id", None)
stable = db.org_site
location_id = db(stable.site_id == site_id).select(stable.location_id,
limitby=(0, 1)
).first().location_id
asset_tracker.set_base_location(location_id)
# Also do component items
db(aitable.asset_id == asset_id).update(location_id = location_id)
elif status == ASSET_LOG_ASSIGN:
if type == "person":
if form_vars.check_in_to_person:
asset_tracker.check_in(db.pr_person, form_vars.person_id,
timestmp = request.utcnow)
# Also do component items
# @ToDo: Have these move when the person moves
locations = asset_tracker.get_location(_fields=[db.gis_location.id])
try:
db(aitable.asset_id == asset_id).update(location_id = locations[0].id)
except:
pass
else:
location_id = asset_tracker.set_location(form_vars.person_id,
timestmp = request.utcnow)
# Also do component items
db(aitable.asset_id == asset_id).update(location_id = location_id)
# Update main record for component
db(atable.id == asset_id).update(assigned_to_id=form_vars.person_id)
elif type == "site":
asset_tracker.check_in(db.org_site, form_vars.site_id,
timestmp = request.utcnow)
# Also do component items
locations = asset_tracker.get_location(_fields=[db.gis_location.id])
try:
db(aitable.asset_id == asset_id).update(location_id = locations[0].id)
except:
pass
elif type == "organisation":
site_id = form_vars.get("site_id", None)
if site_id:
asset_tracker.check_in(db.org_site, site_id,
timestmp = request.utcnow)
# Also do component items
locations = asset_tracker.get_location(_fields=[db.gis_location.id])
try:
db(aitable.asset_id == asset_id).update(location_id = locations[0].id)
except:
pass
else:
# We can no longer track location
asset_tracker.check_out()
elif status == ASSET_LOG_RETURN:
# Set location to base location
location_id = asset_tracker.set_location(asset_tracker,
timestmp = request.utcnow)
# Also do component items
db(aitable.asset_id == asset_id).update(location_id = location_id)
return
# =============================================================================
def asset_get_current_log(asset_id):
"""
Get the current log entry for this asset
"""
table = current.s3db.asset_log
query = (table.asset_id == asset_id) & \
(table.cancel == False) & \
(table.deleted == False)
# Get the log with the maximum time
asset_log = current.db(query).select(table.id,
table.status,
table.datetime,
table.cond,
table.person_id,
table.organisation_id,
table.site_id,
#table.location_id,
orderby = ~table.datetime,
limitby=(0, 1)).first()
if asset_log:
return Storage(datetime = asset_log.datetime,
person_id = asset_log.person_id,
cond = int(asset_log.cond or 0),
status = int(asset_log.status or 0),
organisation_id = asset_log.organisation_id,
site_id = asset_log.site_id,
#location_id = asset_log.location_id
)
else:
return Storage()
# =============================================================================
def asset_log_prep(r):
"""
Called by Controller
"""
T = current.T
db = current.db
request = current.request
table = db.asset_log
if r.record:
asset = Storage(r.record)
else:
# This is a new record
asset = Storage()
table.cancel.readable = False
table.cancel.writable = False
# This causes an error with the dataTables paginate
# if used only in r.interactive & not also r.representation=="aadata"
if r.method != "read" and r.method != "update":
table.cancel.readable = False
table.cancel.writable = False
current_log = asset_get_current_log(asset.id)
if request.vars.status:
status = int(request.vars.status)
else:
status = 0
if status and status != "None":
field = table.status
field.default = status
field.readable = False
field.writable = False
elif current_log:
table.status.default = current_log.status
if current_log.organisation_id:
table.organisation_id.default = current_log.organisation_id
table.site_id.requires = IS_ONE_OF(db, "org_site.site_id",
table.site_id.represent,
filterby = "organisation_id",
filter_opts = [current_log.organisation_id])
crud_strings = current.response.s3.crud_strings.asset_log
if status == ASSET_LOG_SET_BASE:
crud_strings.msg_record_created = T("Base Facility/Site Set")
table.by_person_id.label = T("Set By")
table.site_id.writable = True
table.datetime_until.readable = False
table.datetime_until.writable = False
| |
stt
# get rid of padding
slopes = slopes[2*half_window:]
return slopes
#**********************************************************************
#.. EVENT NOTIFICATION: tsunami detection
#**********************************************************************
def prs_tsunami_detection(botsflu_5minrate, tsunami_detection_threshold=1.0):
"""
Implemented by:
2015-01-14: <NAME>. Initial code.
Usage:
TF = prs_tsunami_detection(BOTSFLU-5MINRATE_L2)
where
TF = True or False; whether a tsunami event has been detected.
WARNING: This function and its data product input argument were coded as instructed
in the DPS using the pseudocode specified. The robustness of this code has
not been checked with actual data.
"""
# units of variable and threshold are [cm/min]
boolean_tsunami_detection = False
# get rid of runtime warnings if nans are present
botsflu_5minrate[np.isnan(botsflu_5minrate)] = 0.0
if np.any(np.abs(botsflu_5minrate) >= tsunami_detection_threshold):
boolean_tsunami_detection = True
return boolean_tsunami_detection
#**********************************************************************
#.. EVENT NOTIFICATION: eruption imminent
#**********************************************************************
def prs_eruption_imminent(botsflu_10minrate, eruption_imminent_threshold=5.0):
"""
Implemented by:
2015-01-14: <NAME>. Initial code.
Usage:
TF = prs_eruption_imminent(BOTSFLU-10MINRATE_L2)
where
TF = True or False; whether an eruption event is imminent.
WARNING: This function and its data product input argument were coded as instructed
in the DPS using the pseudocode specified. The robustness of this code has
not been checked with actual data.
"""
# units of variable and threshold are [cm/hr]
boolean_eruption_imminent = False
# get rid of runtime warnings if nans are present
botsflu_10minrate[np.isnan(botsflu_10minrate)] = 0.0
if np.any(botsflu_10minrate >= eruption_imminent_threshold):
boolean_eruption_imminent = True
return boolean_eruption_imminent
#**********************************************************************
#.. EVENT NOTIFICATION: eruption occurred
#**********************************************************************
def prs_eruption_occurred(botsflu_10minrate, eruption_occurred_threshold=-5.0):
"""
Implemented by:
2015-01-14: <NAME>. Initial code.
Usage:
TF = prs_eruption_occurred(BOTSFLU-10MINRATE_L2)
where
TF = True or False; whether an eruption event has occurred.
WARNING: This function and its data product input argument were coded as instructed
in the DPS using the pseudocode specified. The robustness of this code has
not been checked with actual data.
"""
# units of variable and threshold are [cm/hr]
boolean_eruption_occurred = False
# get rid of runtime warnings if nans are present
botsflu_10minrate[np.isnan(botsflu_10minrate)] = 0.0
if np.any(botsflu_10minrate <= eruption_occurred_threshold):
boolean_eruption_occurred = True
return boolean_eruption_occurred
#**********************************************************************
#.. BOTSFLU functions deprecated in May 2017 but retained
#.. for future re-use and/or documentation.
#**********************************************************************
def anchor_bin(time, data, bin_duration, mode):
"""
Description:
Calculates 'anchored' timestamps (see Notes) and binned data based on timestamps
in units of seconds since midnight. Written explicitly for the BOTSFLU DPA which
requires two stages of binning: 20hz data on 15 seconds, then the 15sec data on 24 hours.
Implemented by:
2015-01-13: <NAME>. Initial code.
2015-01-14: <NAME>. Changed output arguments and incorporated conditionals
to improve program efficiency.
2017-05-05: <NAME>. Deprecated because the new code requires different
modifications to the rawdata and detided data binning:
(1) bad value check in the rawdata
(2) 'extended' 24hr timestamp records to incorporate
non-Nan coverage thresholds for the detided data.
Usage (1):
bin_timestamps = anchor_bin(time, None, bin_duration, 'time')
where
bin_timestamps = 1D array of centered timestamps for non-empty bins
time = 1D array of timestamps, units of sec since 01-01-1900
None = not used; python placeholder object
bin_duration = size of bin [s]
mode = the string 'time'
Usage (2):
binned_data, mask_nonzero = anchor_bin(time, data, bin_duration, 'data')
where
binned_data = 1D array of binned data; no empty bins are represented
mask_nonzero = boolean where True values represent locations of non-empty bins
time = 1D array of timestamps, units of sec since 01-01-1900
data = data to be binned
bin_duration = size of bin [s]
mode = the string 'data'
Usage (3):
bin_timestamps, binned_data, mask_nonzero = anchor_bin(time, data, bin_duration, 'both')
where
bin_timestamps = 1D array of centered timestamps for non-empty bins
binned_data = 1D array of binned data; no empty bins are represented
mask_nonzero = boolean where True values represent locations of non-empty bins
time = 1D array of timestamps, units of sec since 01-01-1900
data = data to be binned
bin_duration = size of bin [s]
mode = the string 'both'
Notes:
The conditional construction is used so that only necessary statements are executed;
when multiple years' worth of 20 Hz data is operated on, each np.bincount operation
may take multiple tens of seconds to execute.
The np.bincount routine is used in the same way accumarray in matlab is used
to bin data. The key to the routine is to convert the timestamps into elapsed
time in units of bin_duration and to construct bins based on the floored
bin_duration times. The summing is then carried out by using the weighting
feature of the np.bincount function, as described in the example in the
numpy.bincount documentation as listed in the References.
The BOTSFLU data products require binning at two stages. Bin results both with
and without empty bins are required. The output arguments have been selected to
provide this flexibility (in particular mask_nonzero).
This routine has been constructed to supply 'anchored' timestamps. For example,
if the bin_duration is 86400 (the number of seconds in a day) then the start time
will be half a bin earlier than the first day of data (at noon) and all timestamps
will be 'anchored' at midnight. Similarly, if the bin_duration is 15 sec, all
timestamps will be at 00, 15, 30, and 45 seconds past the minute.
References:
http://docs.scipy.org/doc/numpy-1.8.1/reference/generated/numpy.bincount.html.
"""
half_bin = bin_duration/2.0
# anchor time-centered bins by determining the start time to be half a bin
# before the first 'anchor timestamp', which will an integral number of
# bin_durations after midnight.
start_time = np.floor((time[0] - half_bin)/bin_duration) * bin_duration + half_bin
# calculate elapsed time from start in units of bin_duration.
time_elapsed = (time - start_time)/bin_duration
# assign each timestamp a bin number index based on its elapsed time.
bin_number = np.floor(time_elapsed).astype(int)
# the number of elements in each bin is given by
bin_count = np.bincount(bin_number).astype(float)
# create a logical mask of non-zero bin_count values
mask_nonzero = (bin_count != 0)
# to calculate timestamps and to get tides, without also binning data.
# mask_nonzero is not needed.
if mode == 'time':
# directly calculate bin timestamp, units of [sec]:
# the midpoint of the data interval is used.
bin_timestamps = start_time + half_bin + bin_duration * np.arange(bin_count.size)
# keep only the bins with values
bin_timestamps = bin_timestamps[mask_nonzero]
return bin_timestamps
# for binning data when the resultant timestamps are not explicitly required.
# daydepth_plus also requires mask_nonzero for downstream products 4wkrate and 8wkrate.
elif mode == 'data':
# sum the values in each time bin, and put into the variable binned_data
binned_data = np.bincount(bin_number, data)
# divide the values in non-empty bins by the number of values in each bin
binned_data = binned_data[mask_nonzero]/bin_count[mask_nonzero]
return binned_data, mask_nonzero
# for when both timestamps and binned data are required.
elif mode == 'both':
bin_timestamps = start_time + half_bin + bin_duration * np.arange(bin_count.size)
bin_timestamps = bin_timestamps[mask_nonzero]
binned_data = np.bincount(bin_number, data)
binned_data = binned_data[mask_nonzero]/bin_count[mask_nonzero]
return bin_timestamps, binned_data, mask_nonzero
def calc_daydepth_plus(timestamp, botpres):
"""
Description:
Worker function to calculate the botsflu data product daydepth plus an
additional boolean mask required to calculate other botsflu data products
downstream from daydepth.
Implemented by:
2015-01-14: <NAME>. Initial code.
Usage
daydepth, mask_nonzero = calc_daydepth_plus(timestamp, botpres)
where
daydepth = BOTSFLU-DAYDEPTH_L2 [m]
mask_nonzero = boolean of positions of non-empty 24 hr bins
timestamp = OOI system timestamps [sec since 01-01-1900]
botpres = BOTPRES_L1 [psia]
Notes:
References:
OOI (2015). Data Product Specification for Seafloor Uplift and Subsidence
(BOTSFLU) from the BOTPT instrument. Document Control Number 1341-00080.
"""
# calculate 15sec bin timestamps and de-tided depth.
time15s, meandepth, _ = calc_meandepth_plus(timestamp, botpres)
# bin the 15sec data into 24 hour bins so that the timestamps are at midnight.
# to calculate daydepth, don't need the time24h timestamps.
_, daydepth = anchor_bin(time15s, meandepth)
# downstream data products no longer require the mask_nonzero variable
return daydepth
def calculate_all_sliding_slopes_then_Nan(data, window_size, coverage_threshold):
"""
Description:
Calculates backwards-looking sliding slopes using the normal linear
regression equations rewritten to be less susceptible to round-off
error; required for the BOTSFLU data products 4WKRATE and 8WKRATE.
Implemented by:
2017-05-03: <NAME>. Initial code. Replaces the much faster Moore-Penrose
pseudo-inverse method so that nan-masking can be
incorporated.
2017-05-08: <NAME>. Added the coverage criterion.
Usage
slopes = calculate_sliding_slopes(data, window_size, coverage_threshold)
where
slopes | |
# ##########################################################
# FlatCAM: 2D Post-processing for Manufacturing #
# File Author: <NAME> (c) #
# Date: 1/13/2020 #
# MIT Licence #
# ##########################################################
from PyQt5 import QtWidgets, QtCore, QtGui
from appTool import AppTool
from appGUI.GUIElements import FCComboBox, RadioSet, FCLabel, FCButton
import math
from shapely.geometry import Point
from shapely.affinity import translate
import gettext
import appTranslation as fcTranslate
import builtins
import logging
fcTranslate.apply_language('strings')
if '_' not in builtins.__dict__:
_ = gettext.gettext
log = logging.getLogger('base')
class AlignObjects(AppTool):
toolName = _("Align Objects")
def __init__(self, app):
AppTool.__init__(self, app)
self.app = app
self.decimals = app.decimals
self.canvas = self.app.plotcanvas
# #############################################################################
# ######################### Tool GUI ##########################################
# #############################################################################
self.ui = AlignUI(layout=self.layout, app=self.app)
self.toolName = self.ui.toolName
# Signals
self.ui.align_object_button.clicked.connect(self.on_align)
self.ui.type_obj_radio.activated_custom.connect(self.on_type_obj_changed)
self.ui.type_aligner_obj_radio.activated_custom.connect(self.on_type_aligner_changed)
self.ui.reset_button.clicked.connect(self.set_tool_ui)
self.mr = None
# if the mouse events are connected to a local method set this True
self.local_connected = False
# store the status of the grid
self.grid_status_memory = None
self.aligned_obj = None
self.aligner_obj = None
# this is one of the objects: self.aligned_obj or self.aligner_obj
self.target_obj = None
# here store the alignment points
self.clicked_points = []
self.align_type = None
# old colors of objects involved in the alignment
self.aligner_old_fill_color = None
self.aligner_old_line_color = None
self.aligned_old_fill_color = None
self.aligned_old_line_color = None
def run(self, toggle=True):
self.app.defaults.report_usage("ToolAlignObjects()")
if toggle:
# if the splitter is hidden, display it, else hide it but only if the current widget is the same
if self.app.ui.splitter.sizes()[0] == 0:
self.app.ui.splitter.setSizes([1, 1])
else:
try:
if self.app.ui.tool_scroll_area.widget().objectName() == self.toolName:
# if tab is populated with the tool but it does not have the focus, focus on it
if not self.app.ui.notebook.currentWidget() is self.app.ui.tool_tab:
# focus on Tool Tab
self.app.ui.notebook.setCurrentWidget(self.app.ui.tool_tab)
else:
self.app.ui.splitter.setSizes([0, 1])
except AttributeError:
pass
else:
if self.app.ui.splitter.sizes()[0] == 0:
self.app.ui.splitter.setSizes([1, 1])
AppTool.run(self)
self.set_tool_ui()
self.app.ui.notebook.setTabText(2, _("Align Tool"))
def install(self, icon=None, separator=None, **kwargs):
AppTool.install(self, icon, separator, shortcut='Alt+A', **kwargs)
def set_tool_ui(self):
self.reset_fields()
self.clicked_points = []
self.target_obj = None
self.aligned_obj = None
self.aligner_obj = None
self.aligner_old_fill_color = None
self.aligner_old_line_color = None
self.aligned_old_fill_color = None
self.aligned_old_line_color = None
self.ui.a_type_radio.set_value(self.app.defaults["tools_align_objects_align_type"])
self.ui.type_obj_radio.set_value('grb')
self.ui.type_aligner_obj_radio.set_value('grb')
if self.local_connected is True:
self.disconnect_cal_events()
def on_type_obj_changed(self, val):
obj_type = {'grb': 0, 'exc': 1}[val]
self.ui.object_combo.setRootModelIndex(self.app.collection.index(obj_type, 0, QtCore.QModelIndex()))
self.ui.object_combo.setCurrentIndex(0)
self.ui.object_combo.obj_type = {'grb': "Gerber", 'exc': "Excellon"}[val]
def on_type_aligner_changed(self, val):
obj_type = {'grb': 0, 'exc': 1}[val]
self.ui.aligner_object_combo.setRootModelIndex(self.app.collection.index(obj_type, 0, QtCore.QModelIndex()))
self.ui.aligner_object_combo.setCurrentIndex(0)
self.ui.aligner_object_combo.obj_type = {'grb': "Gerber", 'exc': "Excellon"}[val]
def on_align(self):
self.app.delete_selection_shape()
obj_sel_index = self.ui.object_combo.currentIndex()
obj_model_index = self.app.collection.index(obj_sel_index, 0, self.ui.object_combo.rootModelIndex())
try:
self.aligned_obj = obj_model_index.internalPointer().obj
except AttributeError:
self.app.inform.emit('[WARNING_NOTCL] %s' % _("There is no aligned FlatCAM object selected..."))
return
aligner_obj_sel_index = self.ui.aligner_object_combo.currentIndex()
aligner_obj_model_index = self.app.collection.index(
aligner_obj_sel_index, 0, self.ui.aligner_object_combo.rootModelIndex())
try:
self.aligner_obj = aligner_obj_model_index.internalPointer().obj
except AttributeError:
self.app.inform.emit('[WARNING_NOTCL] %s' % _("There is no aligner FlatCAM object selected..."))
return
self.align_type = self.ui.a_type_radio.get_value()
# disengage the grid snapping since it will be hard to find the drills or pads on grid
if self.app.ui.grid_snap_btn.isChecked():
self.grid_status_memory = True
self.app.ui.grid_snap_btn.trigger()
else:
self.grid_status_memory = False
self.local_connected = True
self.aligner_old_fill_color = self.aligner_obj.fill_color
self.aligner_old_line_color = self.aligner_obj.outline_color
self.aligned_old_fill_color = self.aligned_obj.fill_color
self.aligned_old_line_color = self.aligned_obj.outline_color
self.target_obj = self.aligned_obj
self.set_color()
self.app.inform.emit('%s: %s' % (_("First Point"), _("Click on the START point.")))
self.mr = self.canvas.graph_event_connect('mouse_release', self.on_mouse_click_release)
if self.app.is_legacy is False:
self.canvas.graph_event_disconnect('mouse_release', self.app.on_mouse_click_release_over_plot)
else:
self.canvas.graph_event_disconnect(self.app.mr)
def on_mouse_click_release(self, event):
if self.app.is_legacy is False:
event_pos = event.pos
right_button = 2
self.app.event_is_dragging = self.app.event_is_dragging
else:
event_pos = (event.xdata, event.ydata)
right_button = 3
self.app.event_is_dragging = self.app.ui.popMenu.mouse_is_panning
pos_canvas = self.canvas.translate_coords(event_pos)
if event.button == 1:
click_pt = Point([pos_canvas[0], pos_canvas[1]])
if self.app.selection_type is not None:
# delete previous selection shape
self.app.delete_selection_shape()
self.app.selection_type = None
else:
if self.target_obj.kind.lower() == 'excellon':
for tool, tool_dict in self.target_obj.tools.items():
for geo in tool_dict['solid_geometry']:
if click_pt.within(geo):
center_pt = geo.centroid
self.clicked_points.append(
[
float('%.*f' % (self.decimals, center_pt.x)),
float('%.*f' % (self.decimals, center_pt.y))
]
)
self.check_points()
elif self.target_obj.kind.lower() == 'gerber':
for apid, apid_val in self.target_obj.apertures.items():
for geo_el in apid_val['geometry']:
if 'solid' in geo_el:
if click_pt.within(geo_el['solid']):
if isinstance(geo_el['follow'], Point):
center_pt = geo_el['solid'].centroid
self.clicked_points.append(
[
float('%.*f' % (self.decimals, center_pt.x)),
float('%.*f' % (self.decimals, center_pt.y))
]
)
self.check_points()
elif event.button == right_button and self.app.event_is_dragging is False:
self.reset_color()
self.clicked_points = []
self.disconnect_cal_events()
self.app.inform.emit('[WARNING_NOTCL] %s' % _("Cancelled by user request."))
def check_points(self):
if len(self.clicked_points) == 1:
self.app.inform.emit('%s: %s. %s' % (
_("First Point"), _("Click on the DESTINATION point ..."), _("Or right click to cancel.")))
self.target_obj = self.aligner_obj
self.reset_color()
self.set_color()
if len(self.clicked_points) == 2:
if self.align_type == 'sp':
self.align_translate()
self.app.inform.emit('[success] %s' % _("Done."))
self.app.plot_all()
self.disconnect_cal_events()
return
else:
self.app.inform.emit('%s: %s. %s' % (
_("Second Point"), _("Click on the START point."), _("Or right click to cancel.")))
self.target_obj = self.aligned_obj
self.reset_color()
self.set_color()
if len(self.clicked_points) == 3:
self.app.inform.emit('%s: %s. %s' % (
_("Second Point"), _("Click on the DESTINATION point ..."), _("Or right click to cancel.")))
self.target_obj = self.aligner_obj
self.reset_color()
self.set_color()
if len(self.clicked_points) == 4:
self.align_translate()
self.align_rotate()
self.app.inform.emit('[success] %s' % _("Done."))
self.disconnect_cal_events()
self.app.plot_all()
def align_translate(self):
dx = self.clicked_points[1][0] - self.clicked_points[0][0]
dy = self.clicked_points[1][1] - self.clicked_points[0][1]
self.aligned_obj.offset((dx, dy))
# Update the object bounding box options
a, b, c, d = self.aligned_obj.bounds()
self.aligned_obj.options['xmin'] = a
self.aligned_obj.options['ymin'] = b
self.aligned_obj.options['xmax'] = c
self.aligned_obj.options['ymax'] = d
def align_rotate(self):
dx = self.clicked_points[1][0] - self.clicked_points[0][0]
dy = self.clicked_points[1][1] - self.clicked_points[0][1]
test_rotation_pt = translate(Point(self.clicked_points[2]), xoff=dx, yoff=dy)
new_start = (test_rotation_pt.x, test_rotation_pt.y)
new_dest = self.clicked_points[3]
origin_pt = self.clicked_points[1]
dxd = new_dest[0] - origin_pt[0]
dyd = new_dest[1] - origin_pt[1]
dxs = new_start[0] - origin_pt[0]
dys = new_start[1] - origin_pt[1]
rotation_not_needed = (abs(new_start[0] - new_dest[0]) <= (10 ** -self.decimals)) or \
(abs(new_start[1] - new_dest[1]) <= (10 ** -self.decimals))
if rotation_not_needed is False:
# calculate rotation angle
angle_dest = math.degrees(math.atan(dyd / dxd))
angle_start = math.degrees(math.atan(dys / dxs))
angle = angle_dest - angle_start
self.aligned_obj.rotate(angle=angle, point=origin_pt)
def disconnect_cal_events(self):
# restore the Grid snapping if it was active before
if self.grid_status_memory is True:
self.app.ui.grid_snap_btn.trigger()
self.app.mr = self.canvas.graph_event_connect('mouse_release', self.app.on_mouse_click_release_over_plot)
if self.app.is_legacy is False:
self.canvas.graph_event_disconnect('mouse_release', self.on_mouse_click_release)
else:
self.canvas.graph_event_disconnect(self.mr)
self.local_connected = False
self.aligner_old_fill_color = None
self.aligner_old_line_color = None
self.aligned_old_fill_color = None
self.aligned_old_line_color = None
def set_color(self):
new_color = "#15678abf"
new_line_color = new_color
self.target_obj.shapes.redraw(
update_colors=(new_color, new_line_color)
)
def reset_color(self):
self.aligned_obj.shapes.redraw(
update_colors=(self.aligned_old_fill_color, self.aligned_old_line_color)
)
self.aligner_obj.shapes.redraw(
update_colors=(self.aligner_old_fill_color, self.aligner_old_line_color)
)
def reset_fields(self):
self.ui.object_combo.setRootModelIndex(self.app.collection.index(0, 0, QtCore.QModelIndex()))
self.ui.aligner_object_combo.setRootModelIndex(self.app.collection.index(0, 0, QtCore.QModelIndex()))
class AlignUI:
toolName = _("Align Objects")
def __init__(self, layout, app):
self.app = app
self.decimals = self.app.decimals
self.layout = layout
# ## Title
title_label = FCLabel("%s" % self.toolName)
title_label.setStyleSheet("""
QLabel
{
font-size: 16px;
font-weight: bold;
}
""")
self.layout.addWidget(title_label)
self.layout.addWidget(QtWidgets.QLabel(""))
# Form Layout
grid0 = QtWidgets.QGridLayout()
grid0.setColumnStretch(0, 0)
grid0.setColumnStretch(1, 1)
self.layout.addLayout(grid0)
self.aligned_label = FCLabel('<b>%s:</b>' % _("MOVING object"))
grid0.addWidget(self.aligned_label, 0, 0, 1, 2)
self.aligned_label.setToolTip(
_("Specify the type of object to be aligned.\n"
"It can be of type: Gerber or Excellon.\n"
"The selection here decide the type of objects that will be\n"
"in the Object combobox.")
)
# Type of object to be aligned
self.type_obj_radio = RadioSet([
{"label": _("Gerber"), "value": "grb"},
{"label": _("Excellon"), "value": "exc"},
])
grid0.addWidget(self.type_obj_radio, 3, 0, 1, 2)
# Object to be aligned
self.object_combo = FCComboBox()
self.object_combo.setModel(self.app.collection)
self.object_combo.setRootModelIndex(self.app.collection.index(0, 0, QtCore.QModelIndex()))
self.object_combo.is_last = True
self.object_combo.setToolTip(
_("Object to be aligned.")
)
grid0.addWidget(self.object_combo, 4, 0, 1, 2)
separator_line = QtWidgets.QFrame()
separator_line.setFrameShape(QtWidgets.QFrame.HLine)
separator_line.setFrameShadow(QtWidgets.QFrame.Sunken)
grid0.addWidget(separator_line, 5, 0, 1, 2)
grid0.addWidget(FCLabel(''), 6, 0, 1, 2)
self.aligned_label = FCLabel('<b>%s:</b>' % _("DESTINATION object"))
self.aligned_label.setToolTip(
_("Specify the type of object to be aligned to.\n"
"It can be of type: Gerber or Excellon.\n"
"The selection here decide the type of objects that will be\n"
"in the Object combobox.")
)
grid0.addWidget(self.aligned_label, 7, 0, 1, 2)
# Type of object to be aligned to = aligner
self.type_aligner_obj_radio = RadioSet([
{"label": _("Gerber"), "value": "grb"},
{"label": _("Excellon"), "value": "exc"},
])
grid0.addWidget(self.type_aligner_obj_radio, 8, 0, 1, 2)
# Object to be aligned to = aligner
self.aligner_object_combo = FCComboBox()
self.aligner_object_combo.setModel(self.app.collection)
self.aligner_object_combo.setRootModelIndex(self.app.collection.index(0, 0, QtCore.QModelIndex()))
self.aligner_object_combo.is_last = True
self.aligner_object_combo.setToolTip(
_("Object to be aligned to. Aligner.")
)
grid0.addWidget(self.aligner_object_combo, 9, 0, 1, 2)
separator_line = QtWidgets.QFrame()
separator_line.setFrameShape(QtWidgets.QFrame.HLine)
separator_line.setFrameShadow(QtWidgets.QFrame.Sunken)
grid0.addWidget(separator_line, 10, 0, 1, 2)
grid0.addWidget(QtWidgets.QLabel(''), 11, 0, 1, 2)
# Alignment Type
self.a_type_lbl = FCLabel('<b>%s:</b>' % _("Alignment Type"))
self.a_type_lbl.setToolTip(
_("The type of alignment can be:\n"
"- Single Point -> it require a single point of sync, the action will be a translation\n"
"- Dual Point -> it require two points of sync, the action will be translation followed by rotation")
)
self.a_type_radio = RadioSet(
[
{'label': _('Single Point'), 'value': 'sp'},
{'label': _('Dual Point'), 'value': 'dp'}
])
grid0.addWidget(self.a_type_lbl, 12, 0, 1, 2)
grid0.addWidget(self.a_type_radio, 13, 0, 1, 2)
separator_line = QtWidgets.QFrame()
| |
cls_head=cls_head,
average=average, return_result=True, split_names=['Train', 'Validataion', 'Test'],
forward_kwargs=forward_kwargs, predict_func=predict_func, pred_kwargs=pred_kwargs,
verbose=True)
if show_results_in_notebook:
print('\nModel after the last training epoch:')
eval_classification_multi_splits(model, xs=[x_train, x_val, x_test],
ys=[y_train, y_val, y_test], batch_size=batch_size, multi_heads=multi_heads, cls_head=cls_head,
average=average, return_result=False, split_names=['Train', 'Validataion', 'Test'],
forward_kwargs=forward_kwargs, predict_func=predict_func,
pred_kwargs=pred_kwargs, verbose=True)
if plot_loss:
plot_history_multi_splits([loss_train_his, loss_val_his, loss_test_his], title='Loss',
idx=loss_idx)
if plot_acc:
plot_history_multi_splits([acc_train_his, acc_val_his, acc_test_his], title='Acc',
idx=acc_idx)
if plot_scatter:
plot_data_multi_splits(best_model, [x_train, x_val, x_test], [y_train, y_val, y_test],
num_heads=2 if multi_heads else 1,
titles=['Training', 'Validation', 'Test'], batch_size=batch_size)
return metric
def run_one_epoch_single_loss(model, x, y_true, loss_fn=nn.CrossEntropyLoss(), train=True, optimizer=None,
batch_size=None, return_loss=True, epoch=0, print_every=10, verbose=True, forward_kwargs={}):
"""Run one epoch, i.e., model(x), but split into batches
Args:
model: torch.nn.Module
x: torch.Tensor
y_true: target torch.Tensor
loss_fn: loss function
train: if False, call model.eval() and torch.set_grad_enabled(False) to save time
optimizer: needed when train is True
batch_size: if None, batch_size = len(x)
return_loss: if True, return epoch loss
epoch: for print
print_every: print epoch_loss if print_every % epoch == 0
verbose: if True, print batch_loss
forward_kwargs: default {}, used for model(x, **forward_kwargs), provide additional kwargs for forward pass;
if it is sample-related, then batch_size should be None, otherwise there can be size mismatch
"""
is_grad_enabled = torch.is_grad_enabled()
if train:
model.train()
torch.set_grad_enabled(True)
else:
model.eval()
torch.set_grad_enabled(False)
loss_history = []
is_classification = isinstance(y_true.cpu(), torch.LongTensor)
if is_classification:
acc_history = []
if batch_size is None:
batch_size = len(x)
for i in range(0, len(x), batch_size):
y_pred = model(x[i:i+batch_size], **forward_kwargs)
loss = loss_fn(y_pred, y_true[i:i+batch_size])
loss_history.append(loss.item())
if is_classification:
labels_pred = y_pred.topk(1, -1)[1].squeeze() # only calculate top 1 accuracy
acc = (labels_pred == y_true[i:i+batch_size]).float().mean().item()
acc_history.append(acc)
if verbose:
msg = 'Epoch{} {}/{}: loss={:.2e}'.format(
epoch, i//batch_size, (len(x)+batch_size-1)//batch_size, loss.item())
if is_classification:
msg = msg + f', acc={acc:.2f}'
print(msg)
if train:
optimizer.zero_grad()
loss.backward()
optimizer.step()
torch.set_grad_enabled(is_grad_enabled)
loss_epoch = np.mean(loss_history)
if is_classification:
acc_epoch = np.mean(acc_history)
if epoch % print_every == 0:
msg = 'Epoch{} {}: loss={:.2e}'.format(epoch, 'Train' if train else 'Test', np.mean(loss_history))
if is_classification:
msg = msg + f', acc={np.mean(acc_history):.2f}'
print(msg)
if return_loss:
if is_classification:
return loss_epoch, acc_epoch, loss_history, acc_history
else:
return loss_epoch, loss_history
def train_single_loss(model, x_train, y_train, x_val=[], y_val=[], x_test=[], y_test=[],
loss_fn=nn.CrossEntropyLoss(), lr=1e-2, weight_decay=1e-4, amsgrad=True, batch_size=None, num_epochs=1,
reduce_every=200, eval_every=1, print_every=1, verbose=False,
loss_train_his=[], loss_val_his=[], loss_test_his=[],
acc_train_his=[], acc_val_his=[], acc_test_his=[], return_best_val=True,
forward_kwargs_train={}, forward_kwargs_val={}, forward_kwargs_test={}):
"""Run a number of epochs to backpropagate
Args:
Most arguments are passed to run_one_epoch_single_loss
lr, weight_decay, amsgrad are passed to torch.optim.Adam
reduce_every: call adjust_learning_rate if cur_epoch % reduce_every == 0
eval_every: call run_one_epoch_single_loss on validation and test sets if cur_epoch % eval_every == 0
print_every: print epoch loss if cur_epoch % print_every == 0
verbose: if True, print batch loss
return_best_val: if True, return the best model on validation set for classification task
forward_kwargs_train: default {}, passed to run_one_epoch_single_loss for model(x, **forward_kwargs)
forward_kwargs_train, forward_kwargs_val and forward_kwargs_test
are passed to train, val, and test set, respectively;
if they are different if they are sample-related,
and in this cases, batch_size should be None, otherwise there can be size mismatch
if they are not sample-related, then they are the same for almost all cases
"""
def eval_one_epoch(x, targets, loss_his, acc_his, epoch, train=False, forward_kwargs={}):
"""Function within function; reuse parameters within proper scope
"""
results = run_one_epoch_single_loss(model, x, targets, loss_fn=loss_fn, train=train, optimizer=optimizer,
batch_size=batch_size, return_loss=True, epoch=epoch, print_every=print_every, verbose=verbose,
forward_kwargs=forward_kwargs)
if is_classification:
loss_epoch, acc_epoch, loss_history, acc_history = results
else:
loss_epoch, loss_history = results
loss_his.append(loss_epoch)
if is_classification:
acc_his.append(acc_epoch)
is_classification = isinstance(y_train.cpu(), torch.LongTensor)
best_val_acc = -1 # best_val_acc >=0 after the first epoch for classification task
for i in range(num_epochs):
if i == 0:
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),
lr=lr, weight_decay=weight_decay, amsgrad=amsgrad)
# Should I create a new torch.optim.Adam instance every time I adjust learning rate?
adjust_learning_rate(optimizer, lr, i, reduce_every=reduce_every)
eval_one_epoch(x_train, y_train, loss_train_his, acc_train_his, i, train=True,
forward_kwargs=forward_kwargs_train)
if i % eval_every == 0:
if len(x_val)>0 and len(y_val)>0:
eval_one_epoch(x_val, y_val, loss_val_his, acc_val_his, i, train=False,
forward_kwargs=forward_kwargs_val) # Set train to be False is crucial!
if is_classification:
if acc_val_his[-1] > best_val_acc:
best_val_acc = acc_val_his[-1]
best_model = copy.deepcopy(model)
best_epoch = i
print('epoch {}, best_val_acc={:.2f}, train_acc={:.2f}'.format(
best_epoch, best_val_acc, acc_train_his[-1]))
if len(x_test)>0 and len(y_test)>0:
eval_one_epoch(x_test, y_test, loss_test_his, acc_test_his, i, train=False,
forward_kwargs=forward_kwargs_test) # Set train to be False
if is_classification:
if return_best_val and len(x_val)>0 and len(y_val)>0:
return best_model, best_val_acc, best_epoch
else:
return model, acc_train_his[-1], i
def run_one_epoch_multiloss(model, x, targets, heads=[0,1], loss_fns=[nn.CrossEntropyLoss(), nn.MSELoss()],
loss_weights=[1,0], other_loss_fns=[], other_loss_weights=[], return_loss=True, batch_size=None,
train=True, optimizer=None, epoch=0, print_every=10, verbose=True):
"""Calculate a multi-head model with multiple losses including losses from the outputs and targets (head losses)
and regularizers on model parameters (non-head losses).
Args:
model: A model with multihead; for example, an AutoEncoder classifier, returns classification scores
(or regression target) and decoder output (reconstruction of input)
x: input
targets: a list of targets associated with multi-head output specified by argument heads;
e.g., for an autoencoder with two heads, targets = [y_labels, x]
targets are not needed to pair with all heads output one-to-one;
use arguments heads to specify which heads are paired with targets;
The elements of targets can be None, too;
the length of targets must be compatible with that of loss_weights, loss_fns, and heads
heads: the index for the heads paired with targets for calculating losses;
if None, set heads = list(range(len(targets)))
loss_fns: a list of loss functions for the corresponding head
loss_weights: the (non-negative) weights for the above head-losses;
heads, loss_fns, and loss_weights are closely related to each other; need to handle it carefully
other_loss_fns: a list of loss functions as regularizers on model parameters
other_loss_weights: the corresponding weights for other_loss_fns
return_loss: default True, return all losses
batch_size: default None; split data into batches
train: default True; if False, call model.eval() and torch.set_grad_enabled(False) to save time
optimizer: when train is True, optimizer must be given; default None, do not use for evaluation
epoch: for print only
print_every: print epoch losses if epoch % print_every == 0
verbose: if True, print losses for each batch
"""
is_grad_enabled = torch.is_grad_enabled()
if train:
model.train()
torch.set_grad_enabled(True)
else:
model.eval()
torch.set_grad_enabled(False)
if batch_size is None:
batch_size = len(x)
if len(targets) < len(loss_weights):
# Some losses do not require targets (using 'implicit' targets in the objective)
# Add None so that targets for later use
targets = targets + [None]*(len(loss_weights) - len(targets))
is_classification = [] # record the indices of targets that is for classification
has_unequal_size = [] # record the indices of targets that has a different size with input
is_none = [] # record the indices of the targets that is None
for j, y_true in enumerate(targets):
if y_true is not None:
if len(y_true) == len(x):
if isinstance(y_true.cpu(), torch.LongTensor):
# if targets[j] is LongTensor, treat it as classification task
is_classification.append(j)
else:
# Here is a bug: I use len(y_true)!=len(x) to decide y_true (target) is not 1-1 paired with input instances;
# however, sometimes even if len(y_true)==len(x), y_true still may not be 1-1 paired with input instances;
# since it rarely happens, I have not taken care of this bug
has_unequal_size.append(j)
else:
is_none.append(j)
loss_history = []
if len(is_classification) > 0:
acc_history = []
if heads is None: # If head is not given, then assume the targets is paired with model output in order
heads = list(range(len(targets)))
for i in range(0, len(x), batch_size):
y_pred = model(x[i:i+batch_size])
loss_batch = []
for j, w in enumerate(loss_weights):
if w>0: # only execute when w>0
if j in is_none:
loss_j = loss_fns[j](y_pred[heads[j]]) * w
elif j in has_unequal_size:
loss_j = loss_fns[j](y_pred[heads[j]], targets[j]) * w # targets[j] is the same for all batches
else:
loss_j = loss_fns[j](y_pred[heads[j]], targets[j][i:i+batch_size]) * w
loss_batch.append(loss_j)
for j, w in enumerate(other_loss_weights):
if w>0:
# The implicit 'target' is encoded in the loss function itself
# todo: in addition to argument model, make loss_fns handle other 'dynamic' arguments as well
loss_j = other_loss_fns[j](model) * w
loss_batch.append(loss_j)
loss = sum(loss_batch)
loss_batch = [v.item() for v in loss_batch]
loss_history.append(loss_batch)
# Calculate accuracy
| |
<gh_stars>1-10
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Support for resumable uploads.
Also supported here are simple (media) uploads and multipart
uploads that contain both metadata and a small file as payload.
"""
from google._async_resumable_media import _upload
from google._async_resumable_media.requests import _request_helpers
class SimpleUpload(_request_helpers.RequestsMixin, _upload.SimpleUpload):
"""Upload a resource to a Google API.
A **simple** media upload sends no metadata and completes the upload
in a single request.
Args:
upload_url (str): The URL where the content will be uploaded.
headers (Optional[Mapping[str, str]]): Extra headers that should
be sent with the request, e.g. headers for encrypted data.
Attributes:
upload_url (str): The URL where the content will be uploaded.
"""
async def transmit(
self,
transport,
data,
content_type,
timeout=_request_helpers._DEFAULT_TIMEOUT,
):
"""Transmit the resource to be uploaded.
Args:
transport (~requests.Session): A ``requests`` object which can
make authenticated requests.
data (bytes): The resource content to be uploaded.
content_type (str): The content type of the resource, e.g. a JPEG
image has content type ``image/jpeg``.
timeout (Optional[Union[float, aiohttp.ClientTimeout]]):
The number of seconds to wait for the server response.
Depending on the retry strategy, a request may be repeated
several times using the same timeout each time.
Can also be passed as an `aiohttp.ClientTimeout` object.
Returns:
~requests.Response: The HTTP response returned by ``transport``.
"""
method, url, payload, headers = self._prepare_request(data, content_type)
response = await _request_helpers.http_request(
transport,
method,
url,
data=payload,
headers=headers,
retry_strategy=self._retry_strategy,
timeout=timeout,
)
self._process_response(response)
return response
class MultipartUpload(_request_helpers.RequestsMixin, _upload.MultipartUpload):
"""Upload a resource with metadata to a Google API.
A **multipart** upload sends both metadata and the resource in a single
(multipart) request.
Args:
upload_url (str): The URL where the content will be uploaded.
headers (Optional[Mapping[str, str]]): Extra headers that should
be sent with the request, e.g. headers for encrypted data.
checksum Optional([str]): The type of checksum to compute to verify
the integrity of the object. The request metadata will be amended
to include the computed value. Using this option will override a
manually-set checksum value. Supported values are "md5",
"crc32c" and None. The default is None.
Attributes:
upload_url (str): The URL where the content will be uploaded.
"""
async def transmit(
self,
transport,
data,
metadata,
content_type,
timeout=_request_helpers._DEFAULT_TIMEOUT,
):
"""Transmit the resource to be uploaded.
Args:
transport (~requests.Session): A ``requests`` object which can
make authenticated requests.
data (bytes): The resource content to be uploaded.
metadata (Mapping[str, str]): The resource metadata, such as an
ACL list.
content_type (str): The content type of the resource, e.g. a JPEG
image has content type ``image/jpeg``.
timeout (Optional[Union[float, aiohttp.ClientTimeout]]):
The number of seconds to wait for the server response.
Depending on the retry strategy, a request may be repeated
several times using the same timeout each time.
Can also be passed as an `aiohttp.ClientTimeout` object.
Returns:
~requests.Response: The HTTP response returned by ``transport``.
"""
method, url, payload, headers = self._prepare_request(
data, metadata, content_type
)
response = await _request_helpers.http_request(
transport,
method,
url,
data=payload,
headers=headers,
retry_strategy=self._retry_strategy,
timeout=timeout,
)
self._process_response(response)
return response
class ResumableUpload(_request_helpers.RequestsMixin, _upload.ResumableUpload):
"""Initiate and fulfill a resumable upload to a Google API.
A **resumable** upload sends an initial request with the resource metadata
and then gets assigned an upload ID / upload URL to send bytes to.
Using the upload URL, the upload is then done in chunks (determined by
the user) until all bytes have been uploaded.
When constructing a resumable upload, only the resumable upload URL and
the chunk size are required:
.. testsetup:: resumable-constructor
bucket = u'bucket-foo'
.. doctest:: resumable-constructor
>>> from google.resumable_media.requests import ResumableUpload
>>>
>>> url_template = (
... u'https://www.googleapis.com/upload/storage/v1/b/{bucket}/o?'
... u'uploadType=resumable')
>>> upload_url = url_template.format(bucket=bucket)
>>>
>>> chunk_size = 3 * 1024 * 1024 # 3MB
>>> upload = ResumableUpload(upload_url, chunk_size)
When initiating an upload (via :meth:`initiate`), the caller is expected
to pass the resource being uploaded as a file-like ``stream``. If the size
of the resource is explicitly known, it can be passed in directly:
.. testsetup:: resumable-explicit-size
import os
import tempfile
import mock
import requests
from six.moves import http_client
from google.resumable_media.requests import ResumableUpload
upload_url = u'http://test.invalid'
chunk_size = 3 * 1024 * 1024 # 3MB
upload = ResumableUpload(upload_url, chunk_size)
file_desc, filename = tempfile.mkstemp()
os.close(file_desc)
data = b'some bytes!'
with open(filename, u'wb') as file_obj:
file_obj.write(data)
fake_response = requests.Response()
fake_response.status_code = int(http_client.OK)
fake_response._content = b''
resumable_url = u'http://test.invalid?upload_id=7up'
fake_response.headers[u'location'] = resumable_url
post_method = mock.Mock(return_value=fake_response, spec=[])
transport = mock.Mock(request=post_method, spec=['request'])
.. doctest:: resumable-explicit-size
>>> import os
>>>
>>> upload.total_bytes is None
True
>>>
>>> stream = open(filename, u'rb')
>>> total_bytes = os.path.getsize(filename)
>>> metadata = {u'name': filename}
>>> response = upload.initiate(
... transport, stream, metadata, u'text/plain',
... total_bytes=total_bytes)
>>> response
<Response [200]>
>>>
>>> upload.total_bytes == total_bytes
True
.. testcleanup:: resumable-explicit-size
os.remove(filename)
If the stream is in a "final" state (i.e. it won't have any more bytes
written to it), the total number of bytes can be determined implicitly
from the ``stream`` itself:
.. testsetup:: resumable-implicit-size
import io
import mock
import requests
from six.moves import http_client
from google.resumable_media.requests import ResumableUpload
upload_url = u'http://test.invalid'
chunk_size = 3 * 1024 * 1024 # 3MB
upload = ResumableUpload(upload_url, chunk_size)
fake_response = requests.Response()
fake_response.status_code = int(http_client.OK)
fake_response._content = b''
resumable_url = u'http://test.invalid?upload_id=7up'
fake_response.headers[u'location'] = resumable_url
post_method = mock.Mock(return_value=fake_response, spec=[])
transport = mock.Mock(request=post_method, spec=['request'])
data = b'some MOAR bytes!'
metadata = {u'name': u'some-file.jpg'}
content_type = u'image/jpeg'
.. doctest:: resumable-implicit-size
>>> stream = io.BytesIO(data)
>>> response = upload.initiate(
... transport, stream, metadata, content_type)
>>>
>>> upload.total_bytes == len(data)
True
If the size of the resource is **unknown** when the upload is initiated,
the ``stream_final`` argument can be used. This might occur if the
resource is being dynamically created on the client (e.g. application
logs). To use this argument:
.. testsetup:: resumable-unknown-size
import io
import mock
import requests
from six.moves import http_client
from google.resumable_media.requests import ResumableUpload
upload_url = u'http://test.invalid'
chunk_size = 3 * 1024 * 1024 # 3MB
upload = ResumableUpload(upload_url, chunk_size)
fake_response = requests.Response()
fake_response.status_code = int(http_client.OK)
fake_response._content = b''
resumable_url = u'http://test.invalid?upload_id=7up'
fake_response.headers[u'location'] = resumable_url
post_method = mock.Mock(return_value=fake_response, spec=[])
transport = mock.Mock(request=post_method, spec=['request'])
metadata = {u'name': u'some-file.jpg'}
content_type = u'application/octet-stream'
stream = io.BytesIO(b'data')
.. doctest:: resumable-unknown-size
>>> response = upload.initiate(
... transport, stream, metadata, content_type,
... stream_final=False)
>>>
>>> upload.total_bytes is None
True
Args:
upload_url (str): The URL where the resumable upload will be initiated.
chunk_size (int): The size of each chunk used to upload the resource.
headers (Optional[Mapping[str, str]]): Extra headers that should
be sent with the :meth:`initiate` request, e.g. headers for
encrypted data. These **will not** be sent with
:meth:`transmit_next_chunk` or :meth:`recover` requests.
checksum Optional([str]): The type of checksum to compute to verify
the integrity of the object. After the upload is complete, the
server-computed checksum of the resulting object will be checked
and google.resumable_media.common.DataCorruption will be raised on
a mismatch. The corrupted file will not be deleted from the remote
host automatically. Supported values are "md5", "crc32c" and None.
The default is None.
Attributes:
upload_url (str): The URL where the content will be uploaded.
Raises:
ValueError: If ``chunk_size`` is not a multiple of
:data:`.UPLOAD_CHUNK_SIZE`.
"""
async def initiate(
self,
transport,
stream,
metadata,
content_type,
total_bytes=None,
stream_final=True,
timeout=_request_helpers._DEFAULT_TIMEOUT,
):
"""Initiate a resumable upload.
By default, this method assumes your ``stream`` is in a "final"
state ready to transmit. However, ``stream_final=False`` can be used
to indicate that the size of the resource is not known. This can happen
if bytes are being dynamically fed into ``stream``, e.g. if the stream
is attached to application logs.
If ``stream_final=False`` is used, :attr:`chunk_size` bytes will be
read from the stream every time :meth:`transmit_next_chunk` | |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import numpy as np
import pandas as pd
import pytest
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pa = None
from .... import dataframe as md
from ....core.operand import OperandStage
from ....tests.core import assert_groupby_equal, require_cudf
from ....utils import arrow_array_to_objects
from ..aggregation import DataFrameGroupByAgg
class MockReduction1(md.CustomReduction):
def agg(self, v1):
return v1.sum()
class MockReduction2(md.CustomReduction):
def pre(self, value):
return value + 1, value * 2
def agg(self, v1, v2):
return v1.sum(), v2.min()
def post(self, v1, v2):
return v1 + v2
def test_groupby(setup):
rs = np.random.RandomState(0)
data_size = 100
data_dict = {'a': rs.randint(0, 10, size=(data_size,)),
'b': rs.randint(0, 10, size=(data_size,)),
'c': rs.choice(list('abcd'), size=(data_size,))}
# test groupby with DataFrames and RangeIndex
df1 = pd.DataFrame(data_dict)
mdf = md.DataFrame(df1, chunk_size=13)
grouped = mdf.groupby('b')
assert_groupby_equal(grouped.execute().fetch(),
df1.groupby('b'))
# test groupby with string index with duplications
df2 = pd.DataFrame(data_dict, index=['i' + str(i % 3) for i in range(data_size)])
mdf = md.DataFrame(df2, chunk_size=13)
grouped = mdf.groupby('b')
assert_groupby_equal(grouped.execute().fetch(),
df2.groupby('b'))
# test groupby with DataFrames by series
grouped = mdf.groupby(mdf['b'])
assert_groupby_equal(grouped.execute().fetch(),
df2.groupby(df2['b']))
# test groupby with DataFrames by multiple series
grouped = mdf.groupby(by=[mdf['b'], mdf['c']])
assert_groupby_equal(grouped.execute().fetch(),
df2.groupby(by=[df2['b'], df2['c']]))
# test groupby with DataFrames with MultiIndex
df3 = pd.DataFrame(data_dict,
index=pd.MultiIndex.from_tuples(
[(i % 3, 'i' + str(i)) for i in range(data_size)]))
mdf = md.DataFrame(df3, chunk_size=13)
grouped = mdf.groupby(level=0)
assert_groupby_equal(grouped.execute().fetch(),
df3.groupby(level=0))
# test groupby with DataFrames by integer columns
df4 = pd.DataFrame(list(data_dict.values())).T
mdf = md.DataFrame(df4, chunk_size=13)
grouped = mdf.groupby(0)
assert_groupby_equal(grouped.execute().fetch(),
df4.groupby(0))
series1 = pd.Series(data_dict['a'])
ms1 = md.Series(series1, chunk_size=13)
grouped = ms1.groupby(lambda x: x % 3)
assert_groupby_equal(grouped.execute().fetch(),
series1.groupby(lambda x: x % 3))
# test groupby series
grouped = ms1.groupby(ms1)
assert_groupby_equal(grouped.execute().fetch(),
series1.groupby(series1))
series2 = pd.Series(data_dict['a'],
index=['i' + str(i) for i in range(data_size)])
ms2 = md.Series(series2, chunk_size=13)
grouped = ms2.groupby(lambda x: int(x[1:]) % 3)
assert_groupby_equal(grouped.execute().fetch(),
series2.groupby(lambda x: int(x[1:]) % 3))
def test_groupby_getitem(setup):
rs = np.random.RandomState(0)
data_size = 100
raw = pd.DataFrame({'a': rs.randint(0, 10, size=(data_size,)),
'b': rs.randint(0, 10, size=(data_size,)),
'c': rs.choice(list('abcd'), size=(data_size,))},
index=pd.MultiIndex.from_tuples([(i % 3, 'i' + str(i)) for i in range(data_size)]))
mdf = md.DataFrame(raw, chunk_size=13)
r = mdf.groupby(level=0)[['a', 'b']]
assert_groupby_equal(r.execute().fetch(),
raw.groupby(level=0)[['a', 'b']], with_selection=True)
for method in ('tree', 'shuffle'):
r = mdf.groupby(level=0)[['a', 'b']].sum(method=method)
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
raw.groupby(level=0)[['a', 'b']].sum().sort_index())
r = mdf.groupby(level=0)[['a', 'b']].apply(lambda x: x + 1)
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
raw.groupby(level=0)[['a', 'b']].apply(lambda x: x + 1).sort_index())
r = mdf.groupby('b')[['a', 'b']]
assert_groupby_equal(r.execute().fetch(),
raw.groupby('b')[['a', 'b']], with_selection=True)
r = mdf.groupby('b')[['a', 'c']]
assert_groupby_equal(r.execute().fetch(),
raw.groupby('b')[['a', 'c']], with_selection=True)
for method in ('tree', 'shuffle'):
r = mdf.groupby('b')[['a', 'b']].sum(method=method)
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
raw.groupby('b')[['a', 'b']].sum().sort_index())
r = mdf.groupby('b')[['a', 'b']].agg(['sum', 'count'], method=method)
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
raw.groupby('b')[['a', 'b']].agg(['sum', 'count']).sort_index())
r = mdf.groupby('b')[['a', 'c']].agg(['sum', 'count'], method=method)
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
raw.groupby('b')[['a', 'c']].agg(['sum', 'count']).sort_index())
r = mdf.groupby('b')[['a', 'b']].apply(lambda x: x + 1)
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
raw.groupby('b')[['a', 'b']].apply(lambda x: x + 1).sort_index())
r = mdf.groupby('b')[['a', 'b']].transform(lambda x: x + 1)
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
raw.groupby('b')[['a', 'b']].transform(lambda x: x + 1).sort_index())
r = mdf.groupby('b')[['a', 'b']].cumsum()
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
raw.groupby('b')[['a', 'b']].cumsum().sort_index())
r = mdf.groupby('b').a
assert_groupby_equal(r.execute().fetch(),
raw.groupby('b').a, with_selection=True)
for method in ('shuffle', 'tree'):
r = mdf.groupby('b').a.sum(method=method)
pd.testing.assert_series_equal(r.execute().fetch().sort_index(),
raw.groupby('b').a.sum().sort_index())
r = mdf.groupby('b').a.agg(['sum', 'mean', 'var'], method=method)
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
raw.groupby('b').a.agg(['sum', 'mean', 'var']).sort_index())
r = mdf.groupby('b', as_index=False).a.sum(method=method)
pd.testing.assert_frame_equal(
r.execute().fetch().sort_values('b', ignore_index=True),
raw.groupby('b', as_index=False).a.sum().sort_values('b', ignore_index=True))
r = mdf.groupby('b', as_index=False).b.count(method=method)
pd.testing.assert_frame_equal(
r.execute().fetch().sort_values('b', ignore_index=True),
raw.groupby('b', as_index=False).b.count().sort_values('b', ignore_index=True))
r = mdf.groupby('b', as_index=False).b.agg({'cnt': 'count'}, method=method)
pd.testing.assert_frame_equal(
r.execute().fetch().sort_values('b', ignore_index=True),
raw.groupby('b', as_index=False).b.agg({'cnt': 'count'}).sort_values('b', ignore_index=True))
r = mdf.groupby('b').a.apply(lambda x: x + 1)
pd.testing.assert_series_equal(r.execute().fetch().sort_index(),
raw.groupby('b').a.apply(lambda x: x + 1).sort_index())
r = mdf.groupby('b').a.transform(lambda x: x + 1)
pd.testing.assert_series_equal(r.execute().fetch().sort_index(),
raw.groupby('b').a.transform(lambda x: x + 1).sort_index())
r = mdf.groupby('b').a.cumsum()
pd.testing.assert_series_equal(r.execute().fetch().sort_index(),
raw.groupby('b').a.cumsum().sort_index())
# special test for selection key == 0
raw = pd.DataFrame(rs.rand(data_size, 10))
raw[0] = 0
mdf = md.DataFrame(raw, chunk_size=13)
r = mdf.groupby(0, as_index=False)[0].agg({'cnt': 'count'}, method='tree')
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
raw.groupby(0, as_index=False)[0].agg({'cnt': 'count'}))
def test_dataframe_groupby_agg(setup):
agg_funs = ['std', 'mean', 'var', 'max', 'count', 'size', 'all', 'any', 'skew', 'kurt', 'sem']
rs = np.random.RandomState(0)
raw = pd.DataFrame({'c1': np.arange(100).astype(np.int64),
'c2': rs.choice(['a', 'b', 'c'], (100,)),
'c3': rs.rand(100)})
mdf = md.DataFrame(raw, chunk_size=13)
for method in ['tree', 'shuffle']:
r = mdf.groupby('c2').agg('size', method=method)
pd.testing.assert_series_equal(r.execute().fetch().sort_index(),
raw.groupby('c2').agg('size').sort_index())
for agg_fun in agg_funs:
if agg_fun == 'size':
continue
r = mdf.groupby('c2').agg(agg_fun, method=method)
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
raw.groupby('c2').agg(agg_fun).sort_index())
r = mdf.groupby('c2').agg(agg_funs, method=method)
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
raw.groupby('c2').agg(agg_funs).sort_index())
agg = OrderedDict([('c1', ['min', 'mean']), ('c3', 'std')])
r = mdf.groupby('c2').agg(agg, method=method)
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
raw.groupby('c2').agg(agg).sort_index())
agg = OrderedDict([('c1', 'min'), ('c3', 'sum')])
r = mdf.groupby('c2').agg(agg, method=method)
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
raw.groupby('c2').agg(agg).sort_index())
r = mdf.groupby('c2').agg({'c1': 'min', 'c3': 'min'}, method=method)
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
raw.groupby('c2').agg({'c1': 'min', 'c3': 'min'}).sort_index())
r = mdf.groupby('c2').agg({'c1': 'min'}, method=method)
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
raw.groupby('c2').agg({'c1': 'min'}).sort_index())
# test groupby series
r = mdf.groupby(mdf['c2']).sum(method=method)
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
raw.groupby(raw['c2']).sum().sort_index())
r = mdf.groupby('c2').size(method='tree')
pd.testing.assert_series_equal(r.execute().fetch(),
raw.groupby('c2').size())
# test inserted kurt method
r = mdf.groupby('c2').kurtosis(method='tree')
pd.testing.assert_frame_equal(r.execute().fetch(),
raw.groupby('c2').kurtosis())
for agg_fun in agg_funs:
if agg_fun == 'size' or callable(agg_fun):
continue
r = getattr(mdf.groupby('c2'), agg_fun)(method='tree')
pd.testing.assert_frame_equal(r.execute().fetch(),
getattr(raw.groupby('c2'), agg_fun)())
# test as_index=False
for method in ['tree', 'shuffle']:
r = mdf.groupby('c2', as_index=False).agg('mean', method=method)
pd.testing.assert_frame_equal(
r.execute().fetch().sort_values('c2', ignore_index=True),
raw.groupby('c2', as_index=False).agg('mean').sort_values('c2', ignore_index=True))
assert r.op.groupby_params['as_index'] is False
r = mdf.groupby(['c1', 'c2'], as_index=False).agg('mean', method=method)
pd.testing.assert_frame_equal(
r.execute().fetch().sort_values(['c1', 'c2'], ignore_index=True),
raw.groupby(['c1', 'c2'], as_index=False).agg('mean').sort_values(['c1', 'c2'], ignore_index=True))
assert r.op.groupby_params['as_index'] is False
# test as_index=False takes no effect
r = mdf.groupby(['c1', 'c2'], as_index=False).agg(['mean', 'count'])
pd.testing.assert_frame_equal(r.execute().fetch(),
raw.groupby(['c1', 'c2'], as_index=False).agg(['mean', 'count']))
assert r.op.groupby_params['as_index'] is True
r = mdf.groupby('c2').agg(['cumsum', 'cumcount'])
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
raw.groupby('c2').agg(['cumsum', 'cumcount']).sort_index())
r = mdf[['c1', 'c3']].groupby(mdf['c2']).agg(MockReduction2())
pd.testing.assert_frame_equal(r.execute().fetch(),
raw[['c1', 'c3']].groupby(raw['c2']).agg(MockReduction2()))
r = mdf.groupby('c2').agg(sum_c1=md.NamedAgg('c1', 'sum'), min_c1=md.NamedAgg('c1', 'min'),
mean_c3=md.NamedAgg('c3', 'mean'), method='tree')
pd.testing.assert_frame_equal(r.execute().fetch(),
raw.groupby('c2').agg(sum_c1=md.NamedAgg('c1', 'sum'),
min_c1=md.NamedAgg('c1', 'min'),
mean_c3=md.NamedAgg('c3', 'mean')))
def test_series_groupby_agg(setup):
rs = np.random.RandomState(0)
series1 = pd.Series(rs.rand(10))
ms1 = md.Series(series1, chunk_size=3)
agg_funs = ['std', 'mean', 'var', 'max', 'count', 'size', 'all', 'any', 'skew', 'kurt', 'sem']
for method in ['tree', 'shuffle']:
for agg_fun in agg_funs:
r = ms1.groupby(lambda x: x % 2).agg(agg_fun, method=method)
pd.testing.assert_series_equal(r.execute().fetch(),
series1.groupby(lambda x: x % 2).agg(agg_fun))
r = ms1.groupby(lambda x: x % 2).agg(agg_funs, method=method)
pd.testing.assert_frame_equal(r.execute().fetch(),
series1.groupby(lambda x: x % 2).agg(agg_funs))
# test groupby series
r = ms1.groupby(ms1).sum(method=method)
pd.testing.assert_series_equal(r.execute().fetch().sort_index(),
series1.groupby(series1).sum().sort_index())
r = ms1.groupby(ms1).sum(method=method)
pd.testing.assert_series_equal(r.execute().fetch().sort_index(),
series1.groupby(series1).sum().sort_index())
# test inserted kurt method
r = ms1.groupby(ms1).kurtosis(method='tree')
pd.testing.assert_series_equal(r.execute().fetch(),
series1.groupby(series1).kurtosis())
for agg_fun in agg_funs:
r = getattr(ms1.groupby(lambda x: x % 2), agg_fun)(method='tree')
pd.testing.assert_series_equal(r.execute().fetch(),
getattr(series1.groupby(lambda x: x % 2), agg_fun)())
r = ms1.groupby(lambda x: x % 2).agg(['cumsum', 'cumcount'], method='tree')
pd.testing.assert_frame_equal(r.execute().fetch().sort_index(),
series1.groupby(lambda x: x % 2).agg(['cumsum', 'cumcount']).sort_index())
r = ms1.groupby(lambda x: x % 2).agg(MockReduction2(name='custom_r'), method='tree')
pd.testing.assert_series_equal(r.execute().fetch(),
series1.groupby(lambda x: x % 2).agg(MockReduction2(name='custom_r')))
r = ms1.groupby(lambda x: x % 2).agg(col_var='var', col_skew='skew', method='tree')
pd.testing.assert_frame_equal(r.execute().fetch(),
series1.groupby(lambda x: x % 2).agg(col_var='var', col_skew='skew'))
def test_groupby_agg_auto_method(setup):
rs = np.random.RandomState(0)
raw = pd.DataFrame({'c1': rs.randint(20, size=100),
'c2': rs.choice(['a', 'b', 'c'], (100,)),
'c3': rs.rand(100)})
mdf = md.DataFrame(raw, chunk_size=20)
def _disallow_reduce(ctx, op):
assert op.stage != OperandStage.reduce
op.execute(ctx, op)
r = mdf.groupby('c2').agg('sum')
operand_executors = {DataFrameGroupByAgg: _disallow_reduce}
result = r.execute(extra_config={'operand_executors': operand_executors,
'check_all': False}).fetch()
pd.testing.assert_frame_equal(result.sort_index(),
raw.groupby('c2').agg('sum'))
def _disallow_combine_and_agg(ctx, op):
assert op.stage not in (OperandStage.combine, OperandStage.agg)
op.execute(ctx, op)
r = mdf.groupby('c1').agg('sum')
operand_executors = {DataFrameGroupByAgg: _disallow_combine_and_agg}
result = r.execute(extra_config={'operand_executors': operand_executors,
'check_all': False}).fetch()
pd.testing.assert_frame_equal(result.sort_index(),
raw.groupby('c1').agg('sum'))
def test_groupby_agg_str_cat(setup):
agg_fun = lambda x: x.str.cat(sep='_', na_rep='NA')
rs = np.random.RandomState(0)
raw_df = pd.DataFrame({'a': rs.choice(['A', 'B', 'C'], size=(100,)),
'b': rs.choice([None, 'alfa', 'bravo', 'charlie'], size=(100,))})
mdf = md.DataFrame(raw_df, chunk_size=13)
r = mdf.groupby('a').agg(agg_fun, method='tree')
pd.testing.assert_frame_equal(r.execute().fetch(),
raw_df.groupby('a').agg(agg_fun))
raw_series = pd.Series(rs.choice([None, 'alfa', 'bravo', 'charlie'], size=(100,)))
ms = md.Series(raw_series, chunk_size=13)
r = ms.groupby(lambda x: x % 2).agg(agg_fun, method='tree')
pd.testing.assert_series_equal(r.execute().fetch(),
raw_series.groupby(lambda x: x % 2).agg(agg_fun))
@require_cudf
def test_gpu_groupby_agg(setup_gpu):
rs = np.random.RandomState(0)
df1 = pd.DataFrame({'a': rs.choice([2, 3, 4], size=(100,)),
'b': rs.choice([2, 3, 4], size=(100,))})
mdf = md.DataFrame(df1, chunk_size=13).to_gpu()
r = mdf.groupby('a').sum()
pd.testing.assert_frame_equal(r.execute().fetch().to_pandas(),
df1.groupby('a').sum())
r = mdf.groupby('a').kurt()
pd.testing.assert_frame_equal(r.execute().fetch().to_pandas(),
df1.groupby('a').kurt())
r = mdf.groupby('a').agg(['sum', 'var'])
pd.testing.assert_frame_equal(r.execute().fetch().to_pandas(),
df1.groupby('a').agg(['sum', 'var']))
rs = np.random.RandomState(0)
idx = pd.Index(np.where(rs.rand(10) > 0.5, 'A', 'B'))
series1 = pd.Series(rs.rand(10), index=idx)
ms = md.Series(series1, index=idx, chunk_size=3).to_gpu().to_gpu()
r = ms.groupby(level=0).sum()
pd.testing.assert_series_equal(r.execute().fetch().to_pandas(),
series1.groupby(level=0).sum())
r = ms.groupby(level=0).kurt()
pd.testing.assert_series_equal(r.execute().fetch().to_pandas(),
series1.groupby(level=0).kurt())
r = ms.groupby(level=0).agg(['sum', 'var'])
pd.testing.assert_frame_equal(r.execute().fetch().to_pandas(),
series1.groupby(level=0).agg(['sum', 'var']))
def test_groupby_apply(setup):
df1 = pd.DataFrame({'a': [3, 4, 5, 3, 5, 4, 1, 2, 3],
'b': [1, 3, 4, 5, 6, 5, 4, 4, 4],
'c': list('aabaaddce')})
def apply_df(df, ret_series=False):
df = df.sort_index()
df.a += df.b
if len(df.index) > 0:
if not ret_series:
df = df.iloc[:-1, :]
else:
df = df.iloc[-1, :]
return df
def apply_series(s, | |
0.434294481903252*log(1 + 2007.75081369119*m.b495) + 0.434294481903252*log(1 + 5431.16998851643*
m.b496) + 0.434294481903252*log(1 + 342.359410695155*m.b497) + 0.434294481903252*log(1 +
72.0734036305427*m.b498) + 0.434294481903252*log(1 + 1261.07680796947*m.b499) +
0.434294481903252*log(1 + 4720.72303655132*m.b500) + 0.434294481903252*log(1 + 1854.76744907733*
m.b501) + 0.434294481903252*log(1 + 919.407561641574*m.b502) + 0.434294481903252*log(1 +
864.052363054075*m.b503) + 0.434294481903252*log(1 + 3453.49702574677*m.b504) +
0.434294481903252*log(1 + 15072.8892913823*m.b505) + 0.434294481903252*log(1 + 46516.6917290662*
m.b506) + 0.434294481903252*log(1 + 1752.23229993449*m.b507) + 0.434294481903252*log(1 +
188.699486972999*m.b508) + 0.434294481903252*log(1 + 784.228500949826*m.b509) +
0.434294481903252*log(1 + 30918.3858574513*m.b510) + 0.434294481903252*log(1 + 555.854889843107*
m.b511) + 0.434294481903252*log(1 + 2061.92877584052*m.b512) + 0.434294481903252*log(1 +
4234.76776342235*m.b513) + 0.434294481903252*log(1 + 5757.25154802802*m.b514) +
0.434294481903252*log(1 + 2354.339546998*m.b515) + 0.434294481903252*log(1 + 4041.36955141029*
m.b516) + 0.434294481903252*log(1 + 1283.63774453508*m.b517) + 0.434294481903252*log(1 +
3888.07085355996*m.b518) + 0.434294481903252*log(1 + 72702.2132531148*m.b519) +
0.434294481903252*log(1 + 3591.90870995247*m.b520) + 0.434294481903252*log(1 + 11409.0474505529*
m.b521) + 0.434294481903252*log(1 + 1147.10560663736*m.b522) + 0.434294481903252*log(1 +
404.619573514426*m.b523) + 0.434294481903252*log(1 + 2316.69824093325*m.b524) +
0.434294481903252*log(1 + 93.3484387038088*m.b525) + 0.434294481903252*log(1 + 5961.85974624427*
m.b526) + 0.434294481903252*log(1 + 1855.7959822928*m.b527) + 0.434294481903252*log(1 +
12318.7382271755*m.b528) + 0.434294481903252*log(1 + 2908.34462479479*m.b529) +
0.434294481903252*log(1 + 172.439081909692*m.b530) + 0.434294481903252*log(1 + 2482.90485049701*
m.b531) + 0.434294481903252*log(1 + 1374.68182301604*m.b532) + 0.434294481903252*log(1 +
1362.24491334904*m.b533) + 0.434294481903252*log(1 + 1225.96640103539*m.b534) +
0.434294481903252*log(1 + 33318.4787069776*m.b535) + 0.434294481903252*log(1 + 2770.60123548017*
m.b536) + 0.434294481903252*log(1 + 1442.37332503559*m.b537) + 0.434294481903252*log(1 +
27209.4079007248*m.b538) + 0.434294481903252*log(1 + 471.35017212154*m.b539) + 0.434294481903252
*log(1 + 26526.0221102521*m.b540) + 0.434294481903252*log(1 + 1748.8539246863*m.b541) +
0.434294481903252*log(1 + 1707.29047390884*m.b542) + 0.434294481903252*log(1 + 32968.6099600421*
m.b543) + 0.434294481903252*log(1 + 841.206353739348*m.b544) + 0.434294481903252*log(1 +
125.219626525923*m.b545) + 0.434294481903252*log(1 + 841.284145292934*m.b546) +
0.434294481903252*log(1 + 277.084800679118*m.b547) + 0.434294481903252*log(1 + 8004.14038016155*
m.b548) + 0.434294481903252*log(1 + 1526.96723454076*m.b549) + 0.434294481903252*log(1 +
331.486531766832*m.b550) + 0.434294481903252*log(1 + 327.210090401598*m.b551) +
0.434294481903252*log(1 + 1111.49507115761*m.b552) + 0.434294481903252*log(1 + 48.5022836398045*
m.b553) + 0.434294481903252*log(1 + 568.010559559101*m.b554) + 0.434294481903252*log(1 +
9653.89473911305*m.b555) + 0.434294481903252*log(1 + 11242.1801194069*m.b556) +
0.434294481903252*log(1 + 762.958694759727*m.b557) + 0.434294481903252*log(1 + 805.279693716168*
m.b558) + 0.434294481903252*log(1 + 12300.5070021695*m.b559) + 0.434294481903252*log(1 +
4360.55649612184*m.b560) + 0.434294481903252*log(1 + 4296.92163502603*m.b561) +
0.434294481903252*log(1 + 820.203404494724*m.b562) + 0.434294481903252*log(1 + 5280.84153601824*
m.b563) + 0.434294481903252*log(1 + 2287.03932960134*m.b564) + 0.434294481903252*log(1 +
3561.07342596921*m.b565) + 0.434294481903252*log(1 + 1312.57852517598*m.b566) +
0.434294481903252*log(1 + 336.275187295465*m.b567) + 0.434294481903252*log(1 + 9613.89308088925*
m.b568) + 0.434294481903252*log(1 + 3980.70781891785*m.b569) + 0.434294481903252*log(1 +
576.832307092888*m.b570) + 0.434294481903252*log(1 + 4009.18513398058*m.b571) +
0.434294481903252*log(1 + 13161.572698313*m.b572) + 0.434294481903252*log(1 + 382.901078008441*
m.b573) + 0.434294481903252*log(1 + 568.649861694855*m.b574) + 0.434294481903252*log(1 +
439.429151707237*m.b575) + 0.434294481903252*log(1 + 615.889792081188*m.b576) +
0.434294481903252*log(1 + 434.510318593601*m.b577) + 0.434294481903252*log(1 + 1454.38346815964*
m.b578) + 0.434294481903252*log(1 + 2123.19657974925*m.b579) + 0.434294481903252*log(1 +
34.2100886293496*m.b580) + 0.434294481903252*log(1 + 171.696467459172*m.b581) +
0.434294481903252*log(1 + 13639.6039826893*m.b582) + 0.434294481903252*log(1 + 1027.6760622072*
m.b583) + 0.434294481903252*log(1 + 32175.268616459*m.b584) + 0.434294481903252*log(1 +
1250.46854382339*m.b585) + 0.434294481903252*log(1 + 1300.35698750258*m.b586) +
0.434294481903252*log(1 + 20582.6224130299*m.b587) + 0.434294481903252*log(1 + 2787.41947878493*
m.b588) + 0.434294481903252*log(1 + 82.4845851910256*m.b589) + 0.434294481903252*log(1 +
16284.8032680097*m.b590) + 0.434294481903252*log(1 + 406.238832395841*m.b591) +
0.434294481903252*log(1 + 18483.3257001511*m.b592) + 0.434294481903252*log(1 + 1540.86233011115*
m.b593) + 0.434294481903252*log(1 + 1884.74297582731*m.b594) + 0.434294481903252*log(1 +
248.721067071059*m.b595) + 0.434294481903252*log(1 + 1611.63883480382*m.b596) +
0.434294481903252*log(1 + 427.102003040675*m.b597) + 0.434294481903252*log(1 + 172.34237091695*
m.b598) + 0.434294481903252*log(1 + 4092.8059679855*m.b599) + 0.434294481903252*log(1 +
61712.3783763179*m.b600) >= 7.5257498916)
m.c18 = Constraint(expr=0.434294481903252*log(1 + 474.854729801177*m.b601) + 0.434294481903252*log(1 + 9384.33344740377*
m.b602) + 0.434294481903252*log(1 + 388.414291736935*m.b603) + 0.434294481903252*log(1 +
3547.9302761433*m.b604) + 0.434294481903252*log(1 + 245.455121982139*m.b605) + 0.434294481903252
*log(1 + 1347.7838166433*m.b606) + 0.434294481903252*log(1 + 3194.97142841093*m.b607) +
0.434294481903252*log(1 + 908.158075536971*m.b608) + 0.434294481903252*log(1 + 11.2355549148117*
m.b609) + 0.434294481903252*log(1 + 1665.26836856583*m.b610) + 0.434294481903252*log(1 +
587.060695283374*m.b611) + 0.434294481903252*log(1 + 327.141490806886*m.b612) +
0.434294481903252*log(1 + 60.6118931300699*m.b613) + 0.434294481903252*log(1 + 1600.0079972123*
m.b614) + 0.434294481903252*log(1 + 272.447430621049*m.b615) + 0.434294481903252*log(1 +
1291.98816911262*m.b616) + 0.434294481903252*log(1 + 606.031528354192*m.b617) +
0.434294481903252*log(1 + 166.260720660887*m.b618) + 0.434294481903252*log(1 + 1048.01330064803*
m.b619) + 0.434294481903252*log(1 + 1197.85612853383*m.b620) + 0.434294481903252*log(1 +
1013.91831957275*m.b621) + 0.434294481903252*log(1 + 2221.43909583569*m.b622) +
0.434294481903252*log(1 + 3589.66925544343*m.b623) + 0.434294481903252*log(1 + 3376.48190433742*
m.b624) + 0.434294481903252*log(1 + 748.132244457066*m.b625) + 0.434294481903252*log(1 +
1292.97144077443*m.b626) + 0.434294481903252*log(1 + 2106.77115685493*m.b627) +
0.434294481903252*log(1 + 181.880067472052*m.b628) + 0.434294481903252*log(1 + 940.416186650989*
m.b629) + 0.434294481903252*log(1 + 3924.66536860832*m.b630) + 0.434294481903252*log(1 +
10247.2915537728*m.b631) + 0.434294481903252*log(1 + 1352.00451546486*m.b632) +
0.434294481903252*log(1 + 2412.04209849101*m.b633) + 0.434294481903252*log(1 + 16279.8676730383*
m.b634) + 0.434294481903252*log(1 + 144.1134459271*m.b635) + 0.434294481903252*log(1 +
1273.82712899567*m.b636) + 0.434294481903252*log(1 + 2125.97423010101*m.b637) +
0.434294481903252*log(1 + 13894.8223569297*m.b638) + 0.434294481903252*log(1 + 2159.11408079188*
m.b639) + 0.434294481903252*log(1 + 3621.39819457907*m.b640) + 0.434294481903252*log(1 +
203.644852968811*m.b641) + 0.434294481903252*log(1 + 2492.94744160395*m.b642) +
0.434294481903252*log(1 + 942.034803509322*m.b643) + 0.434294481903252*log(1 + 2079.09525171044*
m.b644) + 0.434294481903252*log(1 + 139.299790832397*m.b645) + 0.434294481903252*log(1 +
1642.33389002954*m.b646) + 0.434294481903252*log(1 + 563.634690673551*m.b647) +
0.434294481903252*log(1 + 1554.32211481697*m.b648) + 0.434294481903252*log(1 + 555.575271178096*
m.b649) + 0.434294481903252*log(1 + 4077.02146584319*m.b650) + 0.434294481903252*log(1 +
1563.60461914423*m.b651) + 0.434294481903252*log(1 + 10620.9928899453*m.b652) +
0.434294481903252*log(1 + 4038.75362583971*m.b653) + 0.434294481903252*log(1 + 2654.20333257619*
m.b654) + 0.434294481903252*log(1 + 3958.13162882149*m.b655) + 0.434294481903252*log(1 +
441.266474172046*m.b656) + 0.434294481903252*log(1 + 505.697059385812*m.b657) +
0.434294481903252*log(1 + 2469.19022358721*m.b658) + 0.434294481903252*log(1 + 11926.7381663649*
m.b659) + 0.434294481903252*log(1 + 320.347401007842*m.b660) + 0.434294481903252*log(1 +
8441.97788643972*m.b661) + 0.434294481903252*log(1 + 3669.0062588045*m.b662) + 0.434294481903252
*log(1 + 1728.25313862289*m.b663) + 0.434294481903252*log(1 + 366.857413016698*m.b664) +
0.434294481903252*log(1 + 275.045964620623*m.b665) + 0.434294481903252*log(1 + 874.39685493719*
m.b666) + 0.434294481903252*log(1 + 274.724031307221*m.b667) + 0.434294481903252*log(1 +
2032.03228111453*m.b668) + 0.434294481903252*log(1 + 4586.34943852718*m.b669) +
0.434294481903252*log(1 + 2448.05420837663*m.b670) + 0.434294481903252*log(1 + 3726.20308138081*
m.b671) + 0.434294481903252*log(1 + 204.420437607011*m.b672) + 0.434294481903252*log(1 +
1547.90861765921*m.b673) + 0.434294481903252*log(1 + 1355.34153429962*m.b674) +
0.434294481903252*log(1 + 1787.19555146659*m.b675) + 0.434294481903252*log(1 + 43.8653522187123*
m.b676) + 0.434294481903252*log(1 + 53.2429813518581*m.b677) + 0.434294481903252*log(1 +
765.161347180929*m.b678) + 0.434294481903252*log(1 + 4308.08602609383*m.b679) +
0.434294481903252*log(1 + 50.3195673756505*m.b680) + 0.434294481903252*log(1 + 6075.60762979023*
m.b681) + 0.434294481903252*log(1 + 1140.07551045648*m.b682) + 0.434294481903252*log(1 +
1210.86931317318*m.b683) + 0.434294481903252*log(1 + 2478.33210748078*m.b684) +
0.434294481903252*log(1 + 869.637018255344*m.b685) + 0.434294481903252*log(1 + 1847.07901335282*
m.b686) + 0.434294481903252*log(1 + 1530.96564775959*m.b687) + 0.434294481903252*log(1 +
894.873522772495*m.b688) + 0.434294481903252*log(1 + 3231.84062782477*m.b689) +
0.434294481903252*log(1 + 267.4256499342*m.b690) + 0.434294481903252*log(1 + 1967.0185175581*
m.b691) + 0.434294481903252*log(1 + 1297.84408042514*m.b692) + 0.434294481903252*log(1 +
1993.34000895583*m.b693) + 0.434294481903252*log(1 + 1810.03617210963*m.b694) +
0.434294481903252*log(1 + 1449.14821431819*m.b695) + 0.434294481903252*log(1 + 1289.28770196515*
m.b696) + 0.434294481903252*log(1 + 3890.78903955186*m.b697) + 0.434294481903252*log(1 +
872.512527591647*m.b698) + 0.434294481903252*log(1 + 2984.54414602961*m.b699) +
0.434294481903252*log(1 + 362.199496920418*m.b700) + 0.434294481903252*log(1 + 4333.11029158424*
m.b701) + 0.434294481903252*log(1 + 896.502436186287*m.b702) + 0.434294481903252*log(1 +
52.8670746947153*m.b703) + 0.434294481903252*log(1 + 298.505339340174*m.b704) +
0.434294481903252*log(1 + 22866.1749259217*m.b705) + 0.434294481903252*log(1 + 188.099722192958*
m.b706) + 0.434294481903252*log(1 + 2114.79079943704*m.b707) + 0.434294481903252*log(1 +
4747.58549267533*m.b708) + 0.434294481903252*log(1 + 12770.1362673734*m.b709) +
0.434294481903252*log(1 + 2662.7515742621*m.b710) + 0.434294481903252*log(1 + 564.292857983069*
m.b711) + 0.434294481903252*log(1 + 2428.04861954581*m.b712) + 0.434294481903252*log(1 +
3254.14090387951*m.b713) + 0.434294481903252*log(1 + 3007.75593836234*m.b714) +
0.434294481903252*log(1 + 4107.36721893299*m.b715) + 0.434294481903252*log(1 + 7709.15450998564*
m.b716) + 0.434294481903252*log(1 + 207.12209520682*m.b717) + 0.434294481903252*log(1 +
3803.15504018403*m.b718) + 0.434294481903252*log(1 + 2875.06414361484*m.b719) +
0.434294481903252*log(1 + 3978.20805268159*m.b720) + 0.434294481903252*log(1 + 5233.97854959426*
m.b721) + 0.434294481903252*log(1 + 2395.19814256711*m.b722) + 0.434294481903252*log(1 +
1339.35706159401*m.b723) + 0.434294481903252*log(1 + 1053.44379715109*m.b724) +
0.434294481903252*log(1 + 316.76349901739*m.b725) + 0.434294481903252*log(1 + 33.383483053259*
m.b726) + 0.434294481903252*log(1 + 7610.52089702266*m.b727) + 0.434294481903252*log(1 +
11151.0938970001*m.b728) + 0.434294481903252*log(1 + 90.0404346492952*m.b729) +
0.434294481903252*log(1 + 867.407703573225*m.b730) + 0.434294481903252*log(1 + 3363.88522859726*
m.b731) + 0.434294481903252*log(1 + 2612.19569814744*m.b732) + 0.434294481903252*log(1 +
3895.09849411277*m.b733) + 0.434294481903252*log(1 + 466.582065808827*m.b734) +
0.434294481903252*log(1 + 2015.98994230465*m.b735) + 0.434294481903252*log(1 + 2651.97686335575*
m.b736) + 0.434294481903252*log(1 + 537.195483191459*m.b737) + 0.434294481903252*log(1 +
3810.08267101771*m.b738) + 0.434294481903252*log(1 + 810.554015478114*m.b739) +
0.434294481903252*log(1 + 1961.43507540024*m.b740) + 0.434294481903252*log(1 + 1700.7199998046*
m.b741) + 0.434294481903252*log(1 + 81.0874221820337*m.b742) + 0.434294481903252*log(1 +
2650.11965771548*m.b743) + 0.434294481903252*log(1 + 334.546034280968*m.b744) +
0.434294481903252*log(1 + 113.209034537858*m.b745) + 0.434294481903252*log(1 + 2.4122008062355*
m.b746) + 0.434294481903252*log(1 + 661.376847638501*m.b747) + 0.434294481903252*log(1 +
358.663132837536*m.b748) + 0.434294481903252*log(1 + 2159.7720981512*m.b749) + 0.434294481903252
*log(1 + 551.290790274395*m.b750) + 0.434294481903252*log(1 + 1786.31832170368*m.b751) +
0.434294481903252*log(1 + 2749.38514906219*m.b752) + 0.434294481903252*log(1 + 1429.63790831519*
m.b753) + 0.434294481903252*log(1 + 2767.49213929833*m.b754) + 0.434294481903252*log(1 +
788.388993853277*m.b755) + 0.434294481903252*log(1 + 9675.04202029705*m.b756) +
0.434294481903252*log(1 + 10692.183243307*m.b757) + 0.434294481903252*log(1 + 7283.66000277224*
m.b758) + 0.434294481903252*log(1 + 729.337254590051*m.b759) + 0.434294481903252*log(1 +
1588.94737873712*m.b760) + 0.434294481903252*log(1 + 524.157032959007*m.b761) +
0.434294481903252*log(1 + 2728.49653018146*m.b762) + 0.434294481903252*log(1 + 3917.61015183502*
m.b763) + 0.434294481903252*log(1 + 911.486468258131*m.b764) + 0.434294481903252*log(1 +
6023.43137266511*m.b765) + 0.434294481903252*log(1 + 1213.78014918656*m.b766) +
0.434294481903252*log(1 + 1262.18427860683*m.b767) + 0.434294481903252*log(1 + 203.305332029499*
m.b768) + 0.434294481903252*log(1 + 1778.49641339958*m.b769) + 0.434294481903252*log(1 +
2911.08789362198*m.b770) + 0.434294481903252*log(1 + 1613.90903555254*m.b771) +
0.434294481903252*log(1 + 819.220432239003*m.b772) + 0.434294481903252*log(1 | |
<reponame>FcoJavT/rantanplan<filename>tests/test_core.py
import json
from pathlib import Path
from unittest import mock
import pytest
import spacy
import rantanplan.core
from rantanplan.core import apply_exception_rules
from rantanplan.core import apply_exception_rules_post
from rantanplan.core import clean_phonological_groups
from rantanplan.core import format_stress
from rantanplan.core import generate_liaison_positions
from rantanplan.core import generate_phonological_groups
from rantanplan.core import get_last_syllable
from rantanplan.core import get_orthographic_accent
from rantanplan.core import get_phonological_groups
from rantanplan.core import get_rhythmical_pattern
from rantanplan.core import get_scansion
from rantanplan.core import get_stresses
from rantanplan.core import get_syllables_word_end
from rantanplan.core import get_word_stress
from rantanplan.core import get_words
from rantanplan.core import has_single_liaisons
from rantanplan.core import have_prosodic_liaison
from rantanplan.core import is_paroxytone
from rantanplan.core import spacy_tag_to_dict
from rantanplan.core import syllabify
nlp = spacy.load('es_core_news_md')
@pytest.fixture
def phonological_groups():
return json.loads(
Path("tests/fixtures/phonological_groups.json").read_text())
@pytest.fixture
def rhyme_analysis_sonnet():
return json.loads(
Path("tests/fixtures/rhyme_analysis_sonnet.json").read_text())
@pytest.fixture
def scansion_sonnet():
return json.loads(
Path("tests/fixtures/scansion_sonnet.json").read_text())
@pytest.fixture
def haiku():
return json.loads(Path("tests/fixtures/haiku.json").read_text())
class TokenMock(mock.MagicMock):
_ = property(lambda self: mock.Mock(has_tmesis=self.has_tmesis,
line=self.line))
def __isinstance__(self, token): # noqa
return True
@staticmethod
def is_ancestor(token): # noqa
return True
@staticmethod
def nbor(): # noqa
return TokenMock()
def test_get_scansion_spacy_doc(monkeypatch):
token = TokenMock(text="Agüita", i=0, is_punct=False, has_tmesis=False,
line=1, pos_="NOUN")
def mockreturn(lang=None):
return lambda _: [
token
]
monkeypatch.setattr(rantanplan.core, 'load_pipeline', mockreturn)
enjambment = get_scansion(token)
assert enjambment == [
{'tokens': [
{'word': [
{'syllable': 'A', 'is_stressed': False},
{'syllable': 'güi', 'is_stressed': True},
{'syllable': 'ta', 'is_stressed': False,
'is_word_end': True}
],
'stress_position': -2}
],
'phonological_groups': [
{'syllable': 'A', 'is_stressed': False},
{'syllable': 'güi', 'is_stressed': True},
{'syllable': 'ta', 'is_stressed': False,
'is_word_end': True}
],
'rhythm': {'stress': '-+-', 'type': 'pattern', 'length': 3}
}
]
def test_have_prosodic_liaison():
first_syllable = {'syllable': 'ca', 'is_stressed': True}
second_syllable = {'syllable': 'en', 'is_stressed': False}
assert have_prosodic_liaison(first_syllable, second_syllable) is True
def test_have_prosodic_liaison_second_syllable_y_with_vowel():
first_syllable = {'syllable': 'ca', 'is_stressed': True}
second_syllable = {'syllable': 'yen', 'is_stressed': False}
assert have_prosodic_liaison(first_syllable, second_syllable) is False
def test_syllabify_exceptions_en():
word = "entender"
output = ['en', 'ten', 'der']
assert syllabify(word)[0] == output
def test_syllabify_exceptions_en_2():
word = "desentender"
output = ['de', 'sen', 'ten', 'der']
assert syllabify(word)[0] == output
def test_syllabify_exceptions_en_3():
word = "desenmarañados"
output = ['de', 'sen', 'ma', 'ra', 'ña', 'dos']
assert syllabify(word)[0] == output
def test_syllabify_exceptions_prefix_des_consonant():
word = "destapar"
output = ['des', 'ta', 'par']
assert syllabify(word)[0] == output
def test_syllabify_exceptions_prefix_sin_consonant():
word = "sinhueso"
output = ['sin', 'hue', 'so']
assert syllabify(word)[0] == output
def test_syllabify_tl():
word = "atlante"
output = ['a', 'tlan', 'te']
assert syllabify(word)[0] == output
def test_syllabify_group_1():
word = "antihumano"
output = ['an', 'tihu', 'ma', 'no']
assert syllabify(word)[0] == output
def test_syllabify_group_2():
word = "entrehierro"
output = ['en', 'tre', 'hie', 'rro']
assert syllabify(word)[0] == output
def test_syllabify_group_3():
word = "yihad"
output = ['yi', 'had']
assert syllabify(word)[0] == output
def test_syllabify_group_4():
word = "coche"
output = ['co', 'che']
assert syllabify(word)[0] == output
def test_syllabify_group_4_rl():
word = "abarloar"
output = ['a', 'bar', 'lo', 'ar']
assert syllabify(word)[0] == output
def test_syllabify_group_5():
word = "checo"
output = ['che', 'co']
assert syllabify(word)[0] == output
def test_syllabify_group_6():
word = "año"
output = ['a', 'ño']
assert syllabify(word)[0] == output
def test_syllabify_group_7():
word = "desvirtúe"
output = ['des', 'vir', 'tú', 'e']
assert syllabify(word)[0] == output
def test_syllabify_umlaut_u_e():
word = "güegüecho"
output = ['güe', 'güe', 'cho']
assert syllabify(word)[0] == output
def test_syllabify_umlaut_hyatus_with_consonant_1():
word = "insacïable"
output = ['in', 'sa', 'cï', 'a', 'ble']
assert syllabify(word)[0] == output
def test_syllabify_umlaut_hyatus_with_consonant_2():
word = "ruïdo"
output = ['ru', 'ï', 'do']
assert syllabify(word)[0] == output
def test_syllabify_umlaut_hyatus_with_vowel():
word = "ruëa"
output = ['ru', 'ë', 'a']
assert syllabify(word)[0] == output
def test_syllabify_umlaut_u_i():
word = "güito"
output = ['güi', 'to']
assert syllabify(word)[0] == output
def test_syllabify_umlaut_u_i_tilde():
word = "agüío"
output = ['a', 'güí', 'o']
assert syllabify(word)[0] == output
def test_syllabify_alternatives():
word = "arcaizabas"
output = (['ar', 'ca', 'i', 'za', 'bas'], (1, 2))
assert syllabify(word, alternative_syllabification=True) == output
def test_syllabify_alternatives_2():
word = "puntual"
output = (['pun', 'tu', 'al'], (1, 2))
assert syllabify(word, alternative_syllabification=True) == output
def test_get_orthographic_accent():
syllable_list = ['plá', 'ta', 'no']
output = 0
assert get_orthographic_accent(syllable_list) == output
def test_apply_exception_rules_post_hiatus_first_vowel():
syllabified_word = "hüe-co"
output = "hü-e-co"
assert apply_exception_rules_post(syllabified_word) == output
def test_apply_exception_rules_post_consonant_cluster():
syllabified_word = "c-ne-o-rá-ce-a"
output = "cne-o-rá-ce-a"
assert apply_exception_rules_post(syllabified_word) == output
def test_apply_exception_rules_post_raising_diphthong():
syllabified_word = "a-hi-ja-dor"
output = "ahi-ja-dor"
assert apply_exception_rules_post(syllabified_word) == output
def test_apply_exception_rules_post_lowering_diphthong():
syllabified_word = "bu-hi-ti-ho"
output = "buhi-tiho"
assert apply_exception_rules_post(syllabified_word) == output
def test_get_orthographic_accent_with_no_tilde():
syllable_list = ['pla', 'ta', 'ne', 'ro']
assert get_orthographic_accent(syllable_list) is None
def test_is_paroxytone():
syllable_list = ['pla', 'ta', 'ne', 'ro']
assert is_paroxytone(syllable_list) is True
def test_is_paroxytone_with_tilde():
syllable_list = ['cés', 'ped']
assert is_paroxytone(syllable_list) is False
def test_is_paroxytone_with_proparoxytone():
syllable_list = ['es', 'drú', 'ju', 'la']
assert is_paroxytone(syllable_list) is False
def test_is_paroxytone_with_oxytone_with_tilde():
syllable_list = ['a', 'com', 'pa', 'ñó']
assert is_paroxytone(syllable_list) is False
def test_is_paroxytone_with_oxytone_no_tilde():
syllable_list = ['tam', 'bor']
assert is_paroxytone(syllable_list) is False
def test_get_word_stress():
word = "plátano"
pos = "NOUN"
tag = {'Gender': 'Masc', 'Number': 'Sing'}
output = {
'word': [
{'syllable': 'plá', 'is_stressed': True},
{'syllable': 'ta', 'is_stressed': False},
{'syllable': 'no', 'is_stressed': False}
], 'stress_position': -3}
assert get_word_stress(word, pos, tag) == output
def test_get_word_stress_unstressed():
word = "platano"
pos = "DET"
tag = {'Gender': 'Masc', 'Number': 'Sing'}
output = {
'word': [
{'syllable': 'pla', 'is_stressed': False},
{'syllable': 'ta', 'is_stressed': False},
{'syllable': 'no', 'is_stressed': False}
], 'stress_position': 0}
assert get_word_stress(word, pos, tag) == output
def test_get_word_stress_stressed_monosyllables_without_tilde():
word = "yo"
pos = "PRON"
tag = {'Case': 'Nom', 'Number': 'Sing', 'Person': '1', 'PronType': 'Prs'}
output = {
'word': [
{'syllable': 'yo', 'is_stressed': True}
],
'stress_position': -1}
assert get_word_stress(word, pos, tag) == output
def test_get_word_stress_unstressed_monosyllables_without_tilde():
word = "mi"
pos = "DET"
tag = {'Number': 'Sing', 'Number[psor]': 'Sing', 'Person': '1',
'Poss': 'Yes', 'PronType': 'Prs'}
output = {
'word': [
{'syllable': 'mi', 'is_stressed': False}
],
'stress_position': 0}
assert get_word_stress(word, pos, tag) == output
def test_get_word_stress_no_tilde():
word = "campo"
pos = "NOUN"
tag = {'Gender': 'Masc', 'Number': 'Sing'}
output = {
'word': [
{'syllable': 'cam', 'is_stressed': True},
{'syllable': 'po', 'is_stressed': False}
],
'stress_position': -2}
assert get_word_stress(word, pos, tag) == output
def test_get_word_stress_oxytone():
word = "tambor"
pos = "NOUN"
tag = {'Gender': 'Fem', 'Number': 'Sing'}
output = {
'word': [
{'syllable': 'tam', 'is_stressed': False},
{'syllable': 'bor', 'is_stressed': True}
],
'stress_position': -1}
assert get_word_stress(word, pos, tag) == output
def test_get_words():
word = nlp('físico-químico')
output = [
{
'word': [
{'syllable': 'fí', 'is_stressed': True},
{'syllable': 'si', 'is_stressed': False},
{'syllable': 'co', 'is_stressed': False}
], 'stress_position': -3}, {'symbol': '-'}, {
'word': [
{'syllable': 'quí', 'is_stressed': True},
{'syllable': 'mi', 'is_stressed': False},
{'syllable': 'co', 'is_stressed': False}
], 'stress_position': -3}
]
assert get_words(word) == output
def test_get_scansion_spacy_doc_text():
text = nlp("patata")
output = [
{'tokens': [
{'word': [
{'syllable': 'pa', 'is_stressed': False},
{'syllable': 'ta', 'is_stressed': True},
{'syllable': 'ta', 'is_stressed': False, 'is_word_end': True}
],
'stress_position': -2}
],
'phonological_groups': [
{'syllable': 'pa', 'is_stressed': False},
{'syllable': 'ta', 'is_stressed': True},
{'syllable': 'ta', 'is_stressed': False,
'is_word_end': True}
],
'rhythm': {'stress': '-+-', 'type': 'pattern', 'length': 3}
}
]
assert get_scansion(text) == output
def test_get_scansion_rhyme_analysis_sonnet(rhyme_analysis_sonnet):
text = """Cruel amor, ¿tan fieras sinrazones
tras tanta confusión, tras pena tanta?
¿De qué sirve la argolla a la garganta
a quién jamás huyó de sus prisiones?
¿Hierro por premio das a mis pasiones?
Dueño cruel, tu sinrazón espanta,
el castigo a la pena se adelanta
y cuando sirvo bien hierros me pones.
¡Gentil laurel, amor; buenos despojos!
Y en un sujeto a tus mudanzas firme
hierro, virote, lágrimas y enojos.
Mas pienso que has querido persuadirme
que trayendo los hierros a los ojos
no pueda de la causa arrepentirme."""
assert get_scansion(text, rhyme_analysis=True) == rhyme_analysis_sonnet
def test_get_scansion_structures_length():
text = "casa azul"
output = [
{'tokens': [
{'word': [
{'syllable': 'ca', 'is_stressed': True},
{'syllable': 'sa',
'is_stressed': False,
'has_synalepha': True,
'is_word_end': True}],
'stress_position': -2},
{'word': [
{'syllable': 'a', 'is_stressed': False},
{'syllable': 'zul', 'is_stressed': True,
'is_word_end': True}
],
'stress_position': -1}
],
'phonological_groups': [
{'syllable': 'ca', 'is_stressed': True},
{'syllable': 'sa',
'is_stressed': False,
'has_synalepha': False,
'is_word_end': True},
{'syllable': 'a', 'is_stressed': False},
{'syllable': 'zul', 'is_stressed': True,
'is_word_end': True}],
'rhythm': {'stress': '+--+-', 'type': 'pattern', 'length': 5}
}
]
assert get_scansion(text, rhythmical_lengths=[5]) == output
def test_get_scansion_rhyme_analysis_haiku_no_rhyme(haiku):
text = """Noche sin luna.
La tempestad estruja
los viejos cedros."""
assert get_scansion(text, rhyme_analysis=True) == haiku
def test_get_scansion(scansion_sonnet):
text = """Siempre en octubre comenzaba el año.
¡Y cuántas veces esa luz de otoño
me recordó a <NAME>:
«Ya el tiempo nos convida
A los estudios nobles...»!"""
assert get_scansion(text, rhythm_format="pattern") == scansion_sonnet
def test_get_scansion_stressed_last_syllable():
text = "altavoz"
output = [
{'tokens': [
{'word': [
{'syllable': 'al', 'is_stressed': False},
{'syllable': 'ta', 'is_stressed': False},
{'syllable': 'voz', 'is_stressed': True,
'is_word_end': True}
],
'stress_position': -1}
],
'phonological_groups': [
{'syllable': 'al', 'is_stressed': False},
{'syllable': 'ta', 'is_stressed': False},
{'syllable': 'voz', 'is_stressed': True,
'is_word_end': True}
],
'rhythm': {'stress': '--+-', 'type': 'pattern', 'length': 4}}
]
assert get_scansion(text, rhythm_format="pattern") == output
def test_get_scansion_stressed_last_syllable_index_metrical_pattern():
text = "altavoz"
output = [
{'tokens': [
{'word': [
{'syllable': 'al', 'is_stressed': False},
{'syllable': 'ta', 'is_stressed': False},
| |
<reponame>cjohnson-ctl/platform-client-sdk-python<filename>build/PureCloudPlatformClientV2/models/create_work_plan.py
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class CreateWorkPlan(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
CreateWorkPlan - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'name': 'str',
'enabled': 'bool',
'constrain_weekly_paid_time': 'bool',
'flexible_weekly_paid_time': 'bool',
'weekly_exact_paid_minutes': 'int',
'weekly_minimum_paid_minutes': 'int',
'weekly_maximum_paid_minutes': 'int',
'constrain_paid_time_granularity': 'bool',
'paid_time_granularity_minutes': 'int',
'constrain_minimum_time_between_shifts': 'bool',
'minimum_time_between_shifts_minutes': 'int',
'maximum_days': 'int',
'minimum_consecutive_non_working_minutes_per_week': 'int',
'constrain_maximum_consecutive_working_weekends': 'bool',
'maximum_consecutive_working_weekends': 'int',
'minimum_working_days_per_week': 'int',
'constrain_maximum_consecutive_working_days': 'bool',
'maximum_consecutive_working_days': 'int',
'minimum_shift_start_distance_minutes': 'int',
'minimum_days_off_per_planning_period': 'int',
'maximum_days_off_per_planning_period': 'int',
'minimum_paid_minutes_per_planning_period': 'int',
'maximum_paid_minutes_per_planning_period': 'int',
'optional_days': 'SetWrapperDayOfWeek',
'shift_start_variance_type': 'str',
'shift_start_variances': 'ListWrapperShiftStartVariance',
'shifts': 'list[CreateWorkPlanShift]',
'agents': 'list[UserReference]'
}
self.attribute_map = {
'name': 'name',
'enabled': 'enabled',
'constrain_weekly_paid_time': 'constrainWeeklyPaidTime',
'flexible_weekly_paid_time': 'flexibleWeeklyPaidTime',
'weekly_exact_paid_minutes': 'weeklyExactPaidMinutes',
'weekly_minimum_paid_minutes': 'weeklyMinimumPaidMinutes',
'weekly_maximum_paid_minutes': 'weeklyMaximumPaidMinutes',
'constrain_paid_time_granularity': 'constrainPaidTimeGranularity',
'paid_time_granularity_minutes': 'paidTimeGranularityMinutes',
'constrain_minimum_time_between_shifts': 'constrainMinimumTimeBetweenShifts',
'minimum_time_between_shifts_minutes': 'minimumTimeBetweenShiftsMinutes',
'maximum_days': 'maximumDays',
'minimum_consecutive_non_working_minutes_per_week': 'minimumConsecutiveNonWorkingMinutesPerWeek',
'constrain_maximum_consecutive_working_weekends': 'constrainMaximumConsecutiveWorkingWeekends',
'maximum_consecutive_working_weekends': 'maximumConsecutiveWorkingWeekends',
'minimum_working_days_per_week': 'minimumWorkingDaysPerWeek',
'constrain_maximum_consecutive_working_days': 'constrainMaximumConsecutiveWorkingDays',
'maximum_consecutive_working_days': 'maximumConsecutiveWorkingDays',
'minimum_shift_start_distance_minutes': 'minimumShiftStartDistanceMinutes',
'minimum_days_off_per_planning_period': 'minimumDaysOffPerPlanningPeriod',
'maximum_days_off_per_planning_period': 'maximumDaysOffPerPlanningPeriod',
'minimum_paid_minutes_per_planning_period': 'minimumPaidMinutesPerPlanningPeriod',
'maximum_paid_minutes_per_planning_period': 'maximumPaidMinutesPerPlanningPeriod',
'optional_days': 'optionalDays',
'shift_start_variance_type': 'shiftStartVarianceType',
'shift_start_variances': 'shiftStartVariances',
'shifts': 'shifts',
'agents': 'agents'
}
self._name = None
self._enabled = None
self._constrain_weekly_paid_time = None
self._flexible_weekly_paid_time = None
self._weekly_exact_paid_minutes = None
self._weekly_minimum_paid_minutes = None
self._weekly_maximum_paid_minutes = None
self._constrain_paid_time_granularity = None
self._paid_time_granularity_minutes = None
self._constrain_minimum_time_between_shifts = None
self._minimum_time_between_shifts_minutes = None
self._maximum_days = None
self._minimum_consecutive_non_working_minutes_per_week = None
self._constrain_maximum_consecutive_working_weekends = None
self._maximum_consecutive_working_weekends = None
self._minimum_working_days_per_week = None
self._constrain_maximum_consecutive_working_days = None
self._maximum_consecutive_working_days = None
self._minimum_shift_start_distance_minutes = None
self._minimum_days_off_per_planning_period = None
self._maximum_days_off_per_planning_period = None
self._minimum_paid_minutes_per_planning_period = None
self._maximum_paid_minutes_per_planning_period = None
self._optional_days = None
self._shift_start_variance_type = None
self._shift_start_variances = None
self._shifts = None
self._agents = None
@property
def name(self):
"""
Gets the name of this CreateWorkPlan.
Name of this work plan
:return: The name of this CreateWorkPlan.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this CreateWorkPlan.
Name of this work plan
:param name: The name of this CreateWorkPlan.
:type: str
"""
self._name = name
@property
def enabled(self):
"""
Gets the enabled of this CreateWorkPlan.
Whether the work plan is enabled for scheduling
:return: The enabled of this CreateWorkPlan.
:rtype: bool
"""
return self._enabled
@enabled.setter
def enabled(self, enabled):
"""
Sets the enabled of this CreateWorkPlan.
Whether the work plan is enabled for scheduling
:param enabled: The enabled of this CreateWorkPlan.
:type: bool
"""
self._enabled = enabled
@property
def constrain_weekly_paid_time(self):
"""
Gets the constrain_weekly_paid_time of this CreateWorkPlan.
Whether the weekly paid time constraint is enabled for this work plan
:return: The constrain_weekly_paid_time of this CreateWorkPlan.
:rtype: bool
"""
return self._constrain_weekly_paid_time
@constrain_weekly_paid_time.setter
def constrain_weekly_paid_time(self, constrain_weekly_paid_time):
"""
Sets the constrain_weekly_paid_time of this CreateWorkPlan.
Whether the weekly paid time constraint is enabled for this work plan
:param constrain_weekly_paid_time: The constrain_weekly_paid_time of this CreateWorkPlan.
:type: bool
"""
self._constrain_weekly_paid_time = constrain_weekly_paid_time
@property
def flexible_weekly_paid_time(self):
"""
Gets the flexible_weekly_paid_time of this CreateWorkPlan.
Whether the weekly paid time constraint is flexible for this work plan
:return: The flexible_weekly_paid_time of this CreateWorkPlan.
:rtype: bool
"""
return self._flexible_weekly_paid_time
@flexible_weekly_paid_time.setter
def flexible_weekly_paid_time(self, flexible_weekly_paid_time):
"""
Sets the flexible_weekly_paid_time of this CreateWorkPlan.
Whether the weekly paid time constraint is flexible for this work plan
:param flexible_weekly_paid_time: The flexible_weekly_paid_time of this CreateWorkPlan.
:type: bool
"""
self._flexible_weekly_paid_time = flexible_weekly_paid_time
@property
def weekly_exact_paid_minutes(self):
"""
Gets the weekly_exact_paid_minutes of this CreateWorkPlan.
Exact weekly paid time in minutes for this work plan. Used if flexibleWeeklyPaidTime == false
:return: The weekly_exact_paid_minutes of this CreateWorkPlan.
:rtype: int
"""
return self._weekly_exact_paid_minutes
@weekly_exact_paid_minutes.setter
def weekly_exact_paid_minutes(self, weekly_exact_paid_minutes):
"""
Sets the weekly_exact_paid_minutes of this CreateWorkPlan.
Exact weekly paid time in minutes for this work plan. Used if flexibleWeeklyPaidTime == false
:param weekly_exact_paid_minutes: The weekly_exact_paid_minutes of this CreateWorkPlan.
:type: int
"""
self._weekly_exact_paid_minutes = weekly_exact_paid_minutes
@property
def weekly_minimum_paid_minutes(self):
"""
Gets the weekly_minimum_paid_minutes of this CreateWorkPlan.
Minimum weekly paid time in minutes for this work plan. Used if flexibleWeeklyPaidTime == true
:return: The weekly_minimum_paid_minutes of this CreateWorkPlan.
:rtype: int
"""
return self._weekly_minimum_paid_minutes
@weekly_minimum_paid_minutes.setter
def weekly_minimum_paid_minutes(self, weekly_minimum_paid_minutes):
"""
Sets the weekly_minimum_paid_minutes of this CreateWorkPlan.
Minimum weekly paid time in minutes for this work plan. Used if flexibleWeeklyPaidTime == true
:param weekly_minimum_paid_minutes: The weekly_minimum_paid_minutes of this CreateWorkPlan.
:type: int
"""
self._weekly_minimum_paid_minutes = weekly_minimum_paid_minutes
@property
def weekly_maximum_paid_minutes(self):
"""
Gets the weekly_maximum_paid_minutes of this CreateWorkPlan.
Maximum weekly paid time in minutes for this work plan. Used if flexibleWeeklyPaidTime == true
:return: The weekly_maximum_paid_minutes of this CreateWorkPlan.
:rtype: int
"""
return self._weekly_maximum_paid_minutes
@weekly_maximum_paid_minutes.setter
def weekly_maximum_paid_minutes(self, weekly_maximum_paid_minutes):
"""
Sets the weekly_maximum_paid_minutes of this CreateWorkPlan.
Maximum weekly paid time in minutes for this work plan. Used if flexibleWeeklyPaidTime == true
:param weekly_maximum_paid_minutes: The weekly_maximum_paid_minutes of this CreateWorkPlan.
:type: int
"""
self._weekly_maximum_paid_minutes = weekly_maximum_paid_minutes
@property
def constrain_paid_time_granularity(self):
"""
Gets the constrain_paid_time_granularity of this CreateWorkPlan.
Whether paid time granularity should be constrained for this workplan
:return: The constrain_paid_time_granularity of this CreateWorkPlan.
:rtype: bool
"""
return self._constrain_paid_time_granularity
@constrain_paid_time_granularity.setter
def constrain_paid_time_granularity(self, constrain_paid_time_granularity):
"""
Sets the constrain_paid_time_granularity of this CreateWorkPlan.
Whether paid time granularity should be constrained for this workplan
:param constrain_paid_time_granularity: The constrain_paid_time_granularity of this CreateWorkPlan.
:type: bool
"""
self._constrain_paid_time_granularity = constrain_paid_time_granularity
@property
def paid_time_granularity_minutes(self):
"""
Gets the paid_time_granularity_minutes of this CreateWorkPlan.
Granularity in minutes allowed for shift paid time in this work plan. Used if constrainPaidTimeGranularity == true
:return: The paid_time_granularity_minutes of this CreateWorkPlan.
:rtype: int
"""
return self._paid_time_granularity_minutes
@paid_time_granularity_minutes.setter
def paid_time_granularity_minutes(self, paid_time_granularity_minutes):
"""
Sets the paid_time_granularity_minutes of this CreateWorkPlan.
Granularity in minutes allowed for shift paid time in this work plan. Used if constrainPaidTimeGranularity == true
:param paid_time_granularity_minutes: The paid_time_granularity_minutes of this CreateWorkPlan.
:type: int
"""
self._paid_time_granularity_minutes = paid_time_granularity_minutes
@property
def constrain_minimum_time_between_shifts(self):
"""
Gets the constrain_minimum_time_between_shifts of this CreateWorkPlan.
Whether the minimum time between shifts constraint is enabled for this work plan
:return: The constrain_minimum_time_between_shifts of this CreateWorkPlan.
:rtype: bool
"""
return self._constrain_minimum_time_between_shifts
@constrain_minimum_time_between_shifts.setter
def constrain_minimum_time_between_shifts(self, constrain_minimum_time_between_shifts):
"""
Sets the constrain_minimum_time_between_shifts of this CreateWorkPlan.
Whether the minimum time between shifts constraint is enabled for this work plan
:param constrain_minimum_time_between_shifts: The constrain_minimum_time_between_shifts of this CreateWorkPlan.
:type: bool
"""
self._constrain_minimum_time_between_shifts = constrain_minimum_time_between_shifts
@property
def minimum_time_between_shifts_minutes(self):
"""
Gets the minimum_time_between_shifts_minutes of this CreateWorkPlan.
Minimum time between shifts in minutes defined in this work plan. Used if constrainMinimumTimeBetweenShifts == true
:return: The minimum_time_between_shifts_minutes of this CreateWorkPlan.
:rtype: int
"""
return self._minimum_time_between_shifts_minutes
@minimum_time_between_shifts_minutes.setter
def minimum_time_between_shifts_minutes(self, minimum_time_between_shifts_minutes):
"""
Sets the minimum_time_between_shifts_minutes of this CreateWorkPlan.
Minimum time between shifts in minutes defined in this work plan. Used if constrainMinimumTimeBetweenShifts == true
:param minimum_time_between_shifts_minutes: The minimum_time_between_shifts_minutes of this CreateWorkPlan.
:type: int
"""
self._minimum_time_between_shifts_minutes = minimum_time_between_shifts_minutes
@property
def maximum_days(self):
"""
Gets the maximum_days of this CreateWorkPlan.
Maximum number days in a week allowed to be scheduled for this work plan
:return: The maximum_days of this CreateWorkPlan.
:rtype: int
"""
return self._maximum_days
@maximum_days.setter
def maximum_days(self, maximum_days):
"""
Sets the maximum_days of this CreateWorkPlan.
Maximum number days in a week allowed to be scheduled for this work plan
:param maximum_days: The maximum_days of this CreateWorkPlan.
:type: int
"""
self._maximum_days = maximum_days
@property
def minimum_consecutive_non_working_minutes_per_week(self):
"""
Gets the minimum_consecutive_non_working_minutes_per_week of this CreateWorkPlan.
Minimum amount of consecutive non working minutes per week that agents who are assigned this work plan are allowed to have off
:return: The minimum_consecutive_non_working_minutes_per_week of this CreateWorkPlan.
:rtype: int
"""
return self._minimum_consecutive_non_working_minutes_per_week
@minimum_consecutive_non_working_minutes_per_week.setter
def minimum_consecutive_non_working_minutes_per_week(self, minimum_consecutive_non_working_minutes_per_week):
"""
Sets the minimum_consecutive_non_working_minutes_per_week of this CreateWorkPlan.
Minimum amount of consecutive non working minutes | |
<gh_stars>0
# -*- coding: utf-8 -*-
"""
PROYECTO FINAL
<NAME> y <NAME>
"""
import cv2
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.preprocessing import StandardScaler
from sklearn.feature_selection import VarianceThreshold
from matplotlib import cm
from matplotlib.lines import Line2D
from collections import Counter
from joblib import Parallel, delayed
from math import sqrt
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn import metrics
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from RBF import *
# Fijamos la semilla
np.random.seed(1)
"""# CONSTANTES"""
RESIZE_SHAPE = (27,27) #Tamaño resultante de una imagen tras reescalarla
VISUALIZATION = False #Determina si se visualizan los datos mediante PCA y t-SNE
TUNING = False #Determina si se ejecuta o no el ajuste de parámetros de los distintos modelos
NPY = True #Indica si se cargan los datos ya guardados en disco en formato de array de numpy
DOWNSAMPLING = True #Determina si se aplica downsampling en el preprocesamiento
K_FOLDS = 5 #Número de particiones para validación cruzada
N_JOBS = -1 #Número de procesadores a usar para la ejecución en paralelo. -1 es para usar todos los procesadores
# Hiperparámetros de Regresión Logística
C = 0.07435854122563143 # Parámetro de regularización
# Hiperparámetros de MLP
ACTIVATION = 'relu'
NUM_NEURONS = 100
ALPHA = 0.14729982381618334
EARLY_STOPPING = False
# Hiperparámetros de Random Forest
NUM_ARBOLES=290 #Número de árboles para random forest
CCP_ALPHA=0 #Valor para el parámetro de complejidad para el pruning en random forest
# Hiperparámetros de Support Vector Machine
C_SVM= 100 #Parámetro de regularización para SVM
GAMMA= 0.01 #Valor del parámetro gamma para SVM
# Hiperparámetros de RBF-Network
K=1500 #Valor para el número de clusters en RBF-Network
"""# FUNCIONES"""
def continuar():
input("\n----Presiona Enter para continuar----\n")
def loadDataset(directory_path, shuffle=True):
""" Función para cargar las imágenes en forma de arrays de numpy
Args:
directory_path: ruta donde se encuentran los datos
shuffle: booleano que determina si se baraja o no el conjunto de datos.
"""
import glob, os
current_dir = os.path.abspath(os.getcwd())
os.chdir(directory_path)
imgs, labels = [], []
for class_dir in os.listdir('./'):
print("Class:",class_dir)
class_name = class_dir
for i, file in enumerate(glob.glob("{}/*.png".format(class_dir))):
imgs.append(cv2.imread(file, 0))
labels.append(class_name)
imgs = np.asarray(imgs)
labels = np.asarray(labels)
if shuffle:
indices = np.random.permutation(len(imgs))
imgs = imgs[indices]
labels = labels[indices]
os.chdir(current_dir)
return imgs, labels
def plot2D(x,y,title):
""" Función para visualizar una muesta etiquetada en 2D
Args:
x: muestra de puntos a visualizar
y: vector de etiquetas asociado a la muestra x
title: título para el gráfico generado
"""
num_classes = np.unique(y).size
plt.figure(figsize=(10,10))
plt.scatter(x[:,0],x[:,1],c=y, cmap='tab20', alpha=0.5)
cmap=cm.get_cmap('tab20')
proxys=[]
labels=[]
for l in range(num_classes):
proxys.append(Line2D([0],[0], linestyle='none', c=cmap(l/(num_classes-1)), marker='o'))
labels.append(str(l+1))
plt.legend(proxys, labels, numpoints = 1,framealpha=0.5)
plt.title(title)
plt.show()
def visualization(x,y,title):
""" Función para visualizar una muesta etiquetada en 2D,
tras reducir su dimensionalidad con PCA y posteriormente con T-SNE
Args:
x: muestra de puntos a visualizar
y: vector de etiquetas asociado a la muestra x
title: título para el gráfico generado
"""
#Escalamos las características para que tengan media 0 y varianza 1
x = StandardScaler().fit_transform(x)
#Realizamos un análisis de componentes principales, para quedarnos con 2 componentes y poder visualizar los datos
pca=PCA(n_components=2, random_state=1)
x_pca=pca.fit_transform(x)
#Visualizamos los datos resultantes en 2D
plot2D(x_pca, y, 'PCA\n'+title)
#Vemos la varianza explicada por cada una de las dos componentes
print("Varianza explicada: ", pca.explained_variance_ratio_)
#Reducimos ahora la dimensionalidad con t-SNE, partiendo de los resultados obtenidos con pca
x_tsne = TSNE(n_components=2, init=x_pca,perplexity=30).fit_transform(x)
#Visualizamos los datos resultantes en 2D
plot2D(x_tsne, y, 'TSNE\n'+title)
def boundingBox(img):
""" Función para calcular la caja englobante del carácter
representado en la imagen que se pasa como parámetro
Args:
img: imagen en la que se quiere calcular la caja englobante
Returns:
x,y: coordenadas de la esquina superior izquierda de la caja
w: ancho de la caja
h: altura de la caja
"""
#Referencias:
#https://learnopencv.com/otsu-thresholding-with-opencv/
#https://docs.opencv.org/master/d7/d4d/tutorial_py_thresholding.html
#https://docs.opencv.org/master/d9/d61/tutorial_py_morphological_ops.html
kernel = np.ones((3,3),np.uint8) #Kernel de 1's y tamaño 3x3
closing = cv2.morphologyEx(img,cv2.MORPH_CLOSE,kernel) #Calculamos el closing de la imagen para rellenar huecos
_,thresh = cv2.threshold(closing,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) #Método de Otsu para calcular el threashold de la imagen automáticamente
x,y,w,h = 2,2,thresh.shape[0]-2,thresh.shape[1]-2 #Le restamos 2 para quitarle el padding que tienen las imágenes en el dataset
#Acotamos los límites del BB hasta encontrar un píxel blanco (que forme parte del trazo del carácter)
while not any(thresh[y:y+h,x]>0):
x += 1
w -= 1
while not any(thresh[y,x:x+w]>0):
y += 1
h -= 1
while not any(thresh[y:y+h,x+w-1]>0):
w -= 1
while not any(thresh[y+h-1,x:x+w]>0):
h -= 1
return x,y,w,h
def meanDimensionsBoundingBox(data):
""" Función para calcular el ancho y alto medio
de las cajas englobantes del conjunto de datos
pasado como parámetro
Args:
data: conjunto de imágenes
Returns:
w_mean: ancho medio de las cajas englobantes
h_mean : altura media de las cajas englobantes
"""
w_mean, h_mean = 0.0, 0.0
for i in range(data.shape[0]):
x,y,w,h = boundingBox(data[i])
w_mean += w
h_mean += h
w_mean /= data.shape[0]
h_mean /= data.shape[0]
return w_mean, h_mean
def preprocessImage(img):
""" Función para preprocesar una imagen pasada como parámetro
Args:
img: imagen a preprocesar
Returns:
imagen preprocesada
"""
#Referencias:
#https://docs.opencv.org/master/d4/d86/group__imgproc__filter.html#gaf9bba239dfca11654cb7f50f889fc2ff
x,y,w,h = boundingBox(img) #Determinamos la caja englobante del caracter en la imagen
img_prep = img[y:y+h,x:x+w] #Nos quedamos con los píxeles englobados por la caja ('recortamos' la imagen)
img_prep = cv2.resize(img_prep,RESIZE_SHAPE,interpolation=cv2.INTER_LINEAR) #Reescalamos la imagen recortada, con interpolación bilineal
#Hacemos downsampling de la imagen, para reducir el número de píxeles
if DOWNSAMPLING:
img_prep = cv2.GaussianBlur(img_prep,ksize=(3,3),sigmaX=0.5) #Aplicamos un filtro gaussiano para prevenir el aliasing, con kernel
#de tamaño 3x3, y sigma=0.5
img_prep = img_prep[1:img_prep.shape[0]:2,1:img_prep.shape[1]:2] #Eliminamos las filas y columnas pares
img_prep = np.reshape(img_prep,img_prep.size) #Pasamos la imagen a forma de vector
return img_prep
def preprocess(data):
""" Fundión para preprocesar todas las imágenes
del conjunto de datos pasado como parámetro en paralelo
Args:
data:conjunto de imágenes a preprocesar
Returns:
Conjunto de imágenes preprocesado
"""
out = Parallel(n_jobs=-1)(map(delayed(preprocessImage),data)) #Preprocesamos en paralelo cada una de las imágenes
out = np.array(out,np.float32)
return out
def plotMatrix(matrix, title, labels=False):
""" Función para visualizar una matriz como un mapa de calor
Args:
matrix: matriz a visualizar
title: título para el gráfico generado
labels: booleano que determina si se añaden etiquetas a los ejes
"""
plt.figure(figsize=(8,8))
plt.matshow(matrix, cmap='viridis')
plt.colorbar()
plt.title(title,fontsize=15)
if labels:
plt.ylabel('Verdaderos')
plt.xlabel('Predicciones')
plt.show()
def findRange(model, param_name, x_train, y_train, params,
color='black', log=True, n_jobs_cv=None):
""" Función que determina un intervalo de valores para un parámetro
entre los cuales el modelo presenta los mejores resultados
Args:
model: modelo cuyo parámetro se quiere ajustar
param_name: parámetro para el que se quiere buscar el intervalo
de mejores valores
x_train: conjunto de entrenamiento
y_train: vector de etiquetas asociado al conjunto de entrenamiento
params: lista de valores para el parámetros en la que se busca
el mejor intervalo
color: color para los puntos en la visualización
log: booleano que determina si se usa escala logarítmica en el eje x en la visualización
n_jobs_cv: número de procesadores usados en paralelo en validación cruzada
Returns:
a, b : extremos del intervalo
a_score, b_score: accuracy del modelo para los valores a y b del parámetro, resp.
"""
scores = []
print("Búsqueda del intervalo con mejores valores para el parámetro "+param_name)
for i in range(len(params)):
model.set_params(**{param_name:params[i]})
#Aplicamos cross_validation con K_FOLDS particiones para determinar el accuracy del modelo
# y el resultado se añade al vector de scores
scores.append(np.mean(cross_val_score(model,x_train,y_train,cv=K_FOLDS,n_jobs=n_jobs_cv)))
print(params[i],":",scores[i])
params = np.array(params)
scores = np.array(scores)
plotScores(params, scores, color, 'Accuracy media frente a '+param_name, param_name, log=log)
#Índice del valor del parámetro con accuracy más alta
max_index = np.argmax(scores)
#Si el mejor valor es el primero o el último proporcionados, se devuelve ese valor
a = b = params[max_index]
a_score = b_score = scores[max_index]
if max_index>0 and max_index<scores.size-1:
#Determinamos si el intervalo con mejores resultados queda por encima o por debajo
#del índice con mejor valor determinado
if scores[max_index-1] > scores[max_index+1]:
#Establecemos el extremo del intervalo que falta y el valor de accuracy del mismo
a = params[max_index-1]
a_score = scores[max_index-1]
else:
#Establecemos el extremo del intervalo que falta y el valor de accuracy del mismo
b = params[max_index+1]
b_score = scores[max_index+1]
return a, b, a_score, b_score
def dichotomicSearch(model, | |
resultado = "0" + resultado
elif num == 1:
resultado = "1" + resultado
decimal += 1
decimal /= 2
tamanho = len(resultado)
while tamanho < 8:
resultado = '0' + resultado
tamanho += 1
tamanho = len(resultado)
nume = list(resultado)
while i < tamanho:
if resultado[i] == '1':
nume[i] = '0'
elif resultado[i] == '0':
nume[i] = '1'
i += 1
resultado = "".join(nume)
Teste1["text"] = "Esse número decimal em binario é"
Teste2["text"] = resultado
Textotipo["text"] = 'Informe um número decimal para ser trans- '
Texttipo2["text"] = 'formado em binario complemento de dois'
dados = Entry(janela, font="14")
dados.place(x=55, y=440, width=200, height=35)
converter = Button(janela, text="Converter",
command=lambda: Converte(), font="14")
converter.place(x=100, y=490, width=100, height=30)
# Binarios #
#......................................................................................................................................#
def octal_Decimal():
def Converte():
decimal = 0
contador = 0
i = 0
octal2 = octal.get()
tamanho = len(octal2)
larg = tamanho
while i < tamanho:
if octal2[larg - 1] == '0':
decimal += 0 * (8 ** contador)
elif octal2[larg - 1] == '1':
decimal += 1 * (8 ** contador)
elif octal2[larg - 1] == '2':
decimal += 2 * (8 ** contador)
elif octal2[larg - 1] == '3':
decimal += 3 * (8 ** contador)
elif octal2[larg - 1] == '4':
decimal += 4 * (8 ** contador)
elif octal2[larg - 1] == '5':
decimal += 5 * (8 ** contador)
elif octal2[larg - 1] == '6':
decimal += 6 * (8 ** contador)
elif octal2[larg - 1] == '7':
decimal += 7 * (8 ** contador)
elif octal2[larg - 1] == '8':
decimal += 8 * (8 ** contador)
elif octal2[larg - 1] == '9':
decimal += 9 * (8 ** contador)
larg -= 1
contador += 1
i += 1
Teste3["text"] = "Esse número octal em decimal é"
Teste4["text"] = decimal
Textotipo3["text"] = 'Informe um número Octal para ser'
Texttipo4["text"] = 'convertido em decimal'
octal = Entry(janela, font="14")
octal.place(x=455, y=370, width=200, height=35)
converter = Button(janela, text="Converter",
command=lambda: Converte(), font="14")
converter.place(x=500, y=420, width=100, height=30)
def decimal_Octal():
def Converte():
resultado = ""
decimal = dados.get()
decimal = int(decimal)
while decimal >= 1:
num = int(decimal % 8)
texto = str(num)
resultado = texto + resultado
if num != 0:
decimal -= num
decimal /= 8
Teste3["text"] = "Esse número decimal em octal é"
Teste4["text"] = resultado
Textotipo3["text"] = 'Informe um número decimal para'
Texttipo4["text"] = 'ser transformado em octal'
dados = Entry(janela, font="14")
dados.place(x=455, y=370, width=200, height=35)
converter = Button(janela, text="Converter",
command=lambda: Converte(), font="14")
converter.place(x=500, y=420, width=100, height=30)
#Octais#
#.......................................................................................................................................#
def hexa_Decimal():
def Converte():
decimal = 0
i = 0
contador = 0
hexa = hexadecimal.get()
tamanho = len(hexa)
larg = tamanho
while i < tamanho:
if hexa[larg - 1] == '0':
decimal += 0 * (16 ** contador)
elif hexa[larg - 1] == '1':
decimal += 1 * (16 ** contador)
elif hexa[larg - 1] == '2':
decimal += 2 * (16 ** contador)
elif hexa[larg - 1] == '3':
decimal += 3 * (16 ** contador)
elif hexa[larg - 1] == '4':
decimal += 4 * (16 ** contador)
elif hexa[larg - 1] == '5':
decimal += 5 * (16 ** contador)
elif hexa[larg - 1] == '6':
decimal += 6 * (16 ** contador)
elif hexa[larg - 1] == '7':
decimal += 7 * (16 ** contador)
elif hexa[larg - 1] == '8':
decimal += 8 * (16 ** contador)
elif hexa[larg - 1] == '9':
decimal += 9 * (16 ** contador)
elif hexa[larg - 1] == 'A':
decimal += 10 * (16 ** contador)
elif hexa[larg - 1] == 'B':
decimal += 11 * (16 ** contador)
elif hexa[larg - 1] == 'C':
decimal += 12 * (16 ** contador)
elif hexa[larg - 1] == 'D':
decimal += 13 * (16 ** contador)
elif hexa[larg - 1] == 'E':
decimal += 14 * (16 ** contador)
elif hexa[larg - 1] == 'F':
decimal += 15 * (16 ** contador)
larg -= 1
contador += 1
i += 1
Teste5["text"] = "Esse número hexadecimal em decimal é"
Teste6["text"] = decimal
Textotipo5["text"] = 'Informe um número Hexadecimal para ser'
Texttipo6["text"] = 'convertido em decimal'
hexadecimal = Entry(janela, font="14")
hexadecimal.place(x=855, y=370, width=200, height=35)
converter = Button(janela, text="Converter",
command=lambda: Converte(), font="14")
converter.place(x=900, y=420, width=100, height=30)
def decimal_Hexa():
def Converte():
decimal = dados.get()
decimal = int(decimal)
resultado = ""
while decimal >= 1:
num = int(decimal % 16)
texto = str(num)
if num == 15:
resultado = 'F' + resultado
elif num == 14:
resultado = 'E' + resultado
elif num == 13:
resultado = 'D' + resultado
elif num == 12:
resultado = 'C' + resultado
elif num == 11:
resultado = 'B' + resultado
elif num == 10:
resultado = 'A' + resultado
else:
resultado = texto + resultado
if num != 0:
decimal -= num
decimal /= 16
Teste5["text"] = "Esse número decimal em hexadecimal é"
Teste6["text"] = resultado
Textotipo5["text"] = 'Informe um número decimal para'
Texttipo6["text"] = 'ser transformado em hexadecimal'
dados = Entry(janela, font="14")
dados.place(x=855, y=370, width=200, height=35)
converter = Button(janela, text="Converter",
command=lambda: Converte(), font="14")
converter.place(x=900, y=420, width=100, height=30)
#Hexadecimais#
#......................................................................................................................................#
def octal_Binario():
def Converte():
i = 0
contador = 0
resultado = ""
octa = dados.get()
tamanho = len(octa)
larg = tamanho
while i < tamanho:
if octa[larg - 1] == '0':
resultado = '000' + resultado
elif octa[larg - 1] == '1':
resultado = '001' + resultado
elif octa[larg - 1] == '2':
resultado = '010' + resultado
elif octa[larg - 1] == '3':
resultado = '011' + resultado
elif octa[larg - 1] == '4':
resultado = '100' + resultado
elif octa[larg - 1] == '5':
resultado = '101' + resultado
elif octa[larg - 1] == '6':
resultado = '110' + resultado
elif octa[larg - 1] == '7':
resultado = '111' + resultado
larg -= 1
contador += 1
i += 1
print(resultado)
Teste7["text"] = "Esse número octal"
Teste8["text"] = "em binario é"
Teste9["text"] = resultado
Textotipo7["text"] = 'Informe um número octal para'
Texttipo8["text"] = 'ser transformado em binario'
dados = Entry(janela, font="14")
dados.place(x=1255, y=400, width=200, height=35)
converter = Button(janela, text="Converter",
command=lambda: Converte(), font="14")
converter.place(x=1300, y=450, width=100, height=30)
def binario_Octal():
def Converte():
dados2 = dados.get()
tamanho = len(dados2)
i = 1
c = 0
largura = tamanho % 3
ture = largura
vale = tamanho - ture
testar = vale / 3
resposta = ""
if tamanho == 1:
if dados2 == '0':
resposta = "0"
Teste7["text"] = "Esse número binario em octal é"
Teste8["text"] = resposta
elif dados2 == '1':
resposta = "1"
Teste7["text"] = "Esse número binario em octal é"
Teste8["text"] = resposta
elif tamanho == 2:
if dados2 == '00':
resposta = "0"
Teste7["text"] = "Esse número binario em octal é"
Teste8["text"] = resposta
elif dados2 == '01':
resposta = "1"
Teste7["text"] = "Esse número binario em octal é"
Teste8["text"] = resposta
elif dados2 == "10":
resposta = "2"
Teste7["text"] = "Esse número binario em octal é"
Teste8["text"] = resposta
elif dados2 == "11":
resposta = "3"
Teste7["text"] = "Esse número binario em octal é"
Teste8["text"] = resposta
elif testar >= 1:
resposta1 = ""
if ture == 1:
binario = dados2[0]
if binario == '0':
resposta1 = "0"
elif binario == '1':
resposta1 = "1"
elif ture == 2:
binario = dados2[0] + dados2[1]
if binario == '00':
resposta1 = "0"
elif binario == '01':
resposta1 = "1"
elif binario == "10":
resposta1 = "2"
elif binario == "11":
resposta1 = "3"
while i <= testar:
teste1 = tamanho - (3 + c)
teste2 = tamanho - (2 + c)
teste3 = tamanho - (1 + c)
binario = ""
binario = dados2[teste1] + dados2[teste2] + dados2[teste3]
if binario == '000':
resposta = "0" + resposta
elif binario == '001':
resposta = "1" + resposta
elif binario == '010':
resposta = "2" + resposta
elif binario == '011':
resposta = "3" | |
"div",
"rem",
"eqeq",
"neq",
"lt",
"lte",
"gt",
"gte",
]:
def fn(self, items, meta, op=op):
assert len(items) == 2
return E.Apply(sp(self.filename, meta), "_" + op, items)
setattr(_ExprTransformer, op, lark.v_args(meta=True)(classmethod(fn))) # pyre-fixme
class _TypeTransformer(lark.Transformer):
# pylint: disable=no-self-use,unused-argument
def __init__(self, file: str) -> None:
self.filename = file
def optional(self, items, meta):
return set(["optional"])
def nonempty(self, items, meta):
return set(["nonempty"])
def optional_nonempty(self, items, meta):
return set(["optional", "nonempty"])
def type(self, items, meta):
quantifiers = set()
if len(items) > 1 and isinstance(items[-1], set):
quantifiers = items.pop()
param = items[1] if len(items) > 1 else None
param2 = items[2] if len(items) > 2 else None
if items[0].value == "Array":
if not param or param2:
raise Err.InvalidType(sp(self.filename, meta), "Array must have one type parameter")
if quantifiers - set(["optional", "nonempty"]):
raise Err.ValidationError(
sp(self.filename, meta), "invalid type quantifier(s) for Array"
)
return T.Array(param, "optional" in quantifiers, "nonempty" in quantifiers)
if "nonempty" in quantifiers:
raise Err.InvalidType(
sp(self.filename, meta), "invalid type quantifier(s) for " + items[0].value
)
atomic_types = {
"Int": T.Int,
"Float": T.Float,
"Boolean": T.Boolean,
"String": T.String,
"File": T.File,
}
if items[0].value in atomic_types:
if param or param2:
raise Err.InvalidType(
sp(self.filename, meta), items[0] + " type doesn't accept parameters"
)
return atomic_types[items[0].value]("optional" in quantifiers)
if items[0].value == "Map":
if not (param and param2):
raise Err.InvalidType(sp(self.filename, meta), "Map must have two type parameters")
return T.Map((param, param2), "optional" in quantifiers)
if items[0].value == "Pair":
if not (param and param2):
raise Err.InvalidType(sp(self.filename, meta), "Pair must have two type parameters")
return T.Pair(param, param2, "optional" in quantifiers)
if param or param2:
raise Err.InvalidType(sp(self.filename, meta), "Unexpected type parameter(s)")
return T.StructInstance(items[0].value, "optional" in quantifiers)
def _check_keyword(pos, name):
if name in _keywords:
raise Err.SyntaxError(pos, "unexpected keyword {}".format(name))
class _DocTransformer(_ExprTransformer, _TypeTransformer):
# pylint: disable=no-self-use,unused-argument
def __init__(self, file: str) -> None:
# pylint: disable=super-init-not-called
self.filename = file
def decl(self, items, meta):
_check_keyword(sp(self.filename, meta), items[1].value)
return D.Decl(
sp(self.filename, meta),
items[0],
items[1].value,
(items[2] if len(items) > 2 else None),
)
def input_decls(self, items, meta):
return {"inputs": items}
def noninput_decls(self, items, meta):
return {"decls": items}
def placeholder_option(self, items, meta):
assert len(items) == 2
return (items[0].value, items[1])
def placeholder(self, items, meta):
options = dict(items[:-1])
if len(options.items()) < len(items) - 1:
raise Err.MultipleDefinitions(
sp(self.filename, meta), "duplicate options in expression placeholder"
)
return E.Placeholder(sp(self.filename, meta), options, items[-1])
def command(self, items, meta):
parts = []
for item in items:
if isinstance(item, E.Placeholder):
parts.append(item)
else:
parts.append(item.value)
return {"command": E.String(sp(self.filename, meta), parts)}
def output_decls(self, items, meta):
return {"outputs": items}
def meta_kv(self, items, meta):
return (items[0].value, items[1])
def meta_object(self, items, meta):
d = dict()
for k, v in items:
if k in d:
raise Err.MultipleDefinitions(
sp(self.filename, meta), "duplicate keys in meta object"
)
d[k] = v
return d
def meta_array(self, items, meta):
return items
def meta_section(self, items, meta):
kind = items[0].value
assert kind in ["meta", "parameter_meta"]
d = dict()
d[kind] = items[1]
return d
def runtime_kv(self, items, meta):
return (items[0].value, items[1])
def runtime_section(self, items, meta):
d = dict()
for k, v in items:
# TODO: restore duplicate check, cf. https://github.com/gatk-workflows/five-dollar-genome-analysis-pipeline/blob/89f11befc13abae97ab8fb1b457731f390c8728d/tasks_pipelines/qc.wdl#L288
# if k in d:
# raise Err.MultipleDefinitions(sp(self.filename, meta), "duplicate keys in runtime section")
d[k] = v
return {"runtime": d}
def task(self, items, meta):
d = {}
for item in items:
if isinstance(item, dict):
for k, v in item.items():
if k in d:
raise Err.MultipleDefinitions(
sp(self.filename, meta), "redundant sections in task"
)
d[k] = v
else:
assert isinstance(item, str)
assert "name" not in d
d["name"] = item.value
_check_keyword(sp(self.filename, meta), d["name"])
return D.Task(
sp(self.filename, meta),
d["name"],
d.get("inputs", None),
d.get("decls", []),
d["command"],
d.get("outputs", []),
d.get("parameter_meta", {}),
d.get("runtime", {}),
d.get("meta", {}),
)
def tasks(self, items, meta):
return items
def namespaced_ident(self, items, meta) -> E.Base:
assert items
return [item.value for item in items]
def call_input(self, items, meta):
return (items[0].value, items[1])
def call_inputs(self, items, meta):
d = dict()
for k, v in items:
if k in d:
raise Err.MultipleDefinitions(
sp(self.filename, meta), "duplicate keys in call inputs"
)
d[k] = v
return d
def call(self, items, meta):
return D.Call(
sp(self.filename, meta), items[0], None, items[1] if len(items) > 1 else dict()
)
def call_as(self, items, meta):
_check_keyword(sp(self.filename, meta), items[1].value)
return D.Call(
sp(self.filename, meta),
items[0],
items[1].value,
items[2] if len(items) > 2 else dict(),
)
def scatter(self, items, meta):
_check_keyword(sp(self.filename, meta), items[0].value)
return D.Scatter(sp(self.filename, meta), items[0].value, items[1], items[2:])
def conditional(self, items, meta):
return D.Conditional(sp(self.filename, meta), items[0], items[1:])
def workflow_wildcard_output(self, items, meta):
return items[0] + ["*"]
# return E.Ident(items[0].pos, items[0].namespace + [items[0].name, "*"])
def workflow_output_decls(self, items, meta):
decls = [elt for elt in items if isinstance(elt, D.Decl)]
idents = [elt for elt in items if isinstance(elt, list)]
assert len(decls) + len(idents) == len(items)
return {"outputs": decls, "output_idents": idents, "pos": sp(self.filename, meta)}
def workflow(self, items, meta):
elements = []
inputs = None
outputs = None
output_idents = None
output_idents_pos = None
parameter_meta = None
meta_section = None
for item in items[1:]:
if isinstance(item, dict):
if "inputs" in item:
assert inputs is None
inputs = item["inputs"]
elif "outputs" in item:
if outputs is not None:
raise Err.MultipleDefinitions(
sp(self.filename, meta), "redundant sections in workflow"
)
outputs = item["outputs"]
if "output_idents" in item:
assert output_idents is None
output_idents = item["output_idents"]
output_idents_pos = item["pos"]
elif "meta" in item:
if meta_section is not None:
raise Err.MultipleDefinitions(
sp(self.filename, meta), "redundant sections in workflow"
)
meta_section = item["meta"]
elif "parameter_meta" in item:
if parameter_meta is not None:
raise Err.MultipleDefinitions(
sp(self.filename, meta), "redundant sections in workflow"
)
parameter_meta = item["parameter_meta"]
else:
assert False
elif isinstance(item, (D.Call, D.Conditional, D.Decl, D.Scatter)):
elements.append(item)
else:
assert False
_check_keyword(sp(self.filename, meta), items[0].value)
return D.Workflow(
sp(self.filename, meta),
items[0].value,
inputs,
elements,
outputs,
parameter_meta or dict(),
meta_section or dict(),
output_idents,
output_idents_pos,
)
def struct(self, items, meta):
assert len(items) >= 1
name = items[0]
_check_keyword(sp(self.filename, meta), name)
members = {}
for d in items[1:]:
assert not d.expr
if d.name in members:
raise Err.MultipleDefinitions(
sp(self.filename, meta), "duplicate members in struct"
)
members[d.name] = d.type
return D.StructTypeDef(sp(self.filename, meta), name, members)
def import_alias(self, items, meta):
assert len(items) == 2
_check_keyword(sp(self.filename, meta), items[1].value)
return (items[0].value, items[1].value)
def import_doc(self, items, meta):
uri = items[0]
if len(items) > 1 and isinstance(items[1], str):
namespace = items[1].value
else:
namespace = uri
try:
namespace = namespace[namespace.rindex("/") + 1 :]
except ValueError:
pass
if namespace.endswith(".wdl"):
namespace = namespace[:-4]
_check_keyword(sp(self.filename, meta), namespace)
aliases = [p for p in items[1:] if isinstance(p, tuple)]
return D.DocImport(
pos=sp(self.filename, meta), uri=uri, namespace=namespace, aliases=aliases, doc=None
)
def document(self, items, meta):
imports = []
structs = {}
tasks = []
workflow = None
for item in items:
if isinstance(item, D.Task):
tasks.append(item)
elif isinstance(item, D.Workflow):
if workflow is not None:
raise Err.MultipleDefinitions(
sp(self.filename, meta), "Document has multiple workflows"
)
workflow = item
elif isinstance(item, D.StructTypeDef):
if item.name in structs:
raise Err.MultipleDefinitions(
sp(self.filename, meta), "multiple structs named " + item.name
)
structs[item.name] = item
elif isinstance(item, lark.Tree) and item.data == "version":
pass
elif isinstance(item, D.DocImport):
imports.append(item)
else:
assert False
return D.Document(sp(self.filename, meta), imports, structs, tasks, workflow)
# have lark pass the 'meta' with line/column numbers to each transformer method
for _klass in [_ExprTransformer, _TypeTransformer, _DocTransformer]:
for name, method in inspect.getmembers(_klass, inspect.isfunction):
if not name.startswith("_"):
setattr(_klass, name, lark.v_args(meta=True)(method)) # pyre-fixme
def parse_expr(txt: str, version: Optional[str] = None) -> E.Base:
try:
return _ExprTransformer(txt).transform(parse(txt, "expr", version))
except lark.exceptions.UnexpectedInput as exn:
pos = SourcePosition(
filename="(buffer)",
line=getattr(exn, "line", "?"),
column=getattr(exn, "column", "?"),
end_line=getattr(exn, "line", "?"),
end_column=getattr(exn, "column", "?"),
)
raise Err.SyntaxError(pos, str(exn)) from None
except lark.exceptions.VisitError as exn:
raise exn.__context__
def parse_tasks(txt: str, version: Optional[str] = None) -> List[D.Task]:
try:
return _DocTransformer("").transform(parse(txt, "tasks", version))
except lark.exceptions.VisitError as exn:
raise exn.__context__
def parse_document(txt: str, version: Optional[str] = None, uri: str = "") -> D.Document:
if version is None:
# for now assume the version is 1.0 if the first line is "version <number>"
# otherwise draft-2
version = "draft-2"
for line in txt.split("\n"):
line = line.strip()
if line and line[0] != "#":
if line.startswith("version ") and line[8].isdigit():
version = "1.0"
break
if not txt.strip():
return D.Document(
SourcePosition(filename=uri, line=0, column=0, end_line=0, end_column=0),
[],
{},
[],
None,
)
try:
return _DocTransformer(uri).transform(parse(txt, "document", version))
except lark.exceptions.UnexpectedInput as exn:
pos = SourcePosition(
filename=(uri if uri != "" else | |
not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkesign__2__0_models.GetCorpConsoleResponse(),
await self.do_roarequest_async('GetCorpConsole', 'esign_2.0', 'HTTP', 'GET', 'AK', f'/v2.0/esign/corps/consoles', 'json', req, runtime)
)
def get_file_info(
self,
file_id: str,
) -> dingtalkesign__2__0_models.GetFileInfoResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.GetFileInfoHeaders()
return self.get_file_info_with_options(file_id, headers, runtime)
async def get_file_info_async(
self,
file_id: str,
) -> dingtalkesign__2__0_models.GetFileInfoResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.GetFileInfoHeaders()
return await self.get_file_info_with_options_async(file_id, headers, runtime)
def get_file_info_with_options(
self,
file_id: str,
headers: dingtalkesign__2__0_models.GetFileInfoHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.GetFileInfoResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkesign__2__0_models.GetFileInfoResponse(),
self.do_roarequest('GetFileInfo', 'esign_2.0', 'HTTP', 'GET', 'AK', f'/v2.0/esign/files/{file_id}', 'json', req, runtime)
)
async def get_file_info_with_options_async(
self,
file_id: str,
headers: dingtalkesign__2__0_models.GetFileInfoHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.GetFileInfoResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkesign__2__0_models.GetFileInfoResponse(),
await self.do_roarequest_async('GetFileInfo', 'esign_2.0', 'HTTP', 'GET', 'AK', f'/v2.0/esign/files/{file_id}', 'json', req, runtime)
)
def channel_orders(
self,
request: dingtalkesign__2__0_models.ChannelOrdersRequest,
) -> dingtalkesign__2__0_models.ChannelOrdersResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.ChannelOrdersHeaders()
return self.channel_orders_with_options(request, headers, runtime)
async def channel_orders_async(
self,
request: dingtalkesign__2__0_models.ChannelOrdersRequest,
) -> dingtalkesign__2__0_models.ChannelOrdersResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.ChannelOrdersHeaders()
return await self.channel_orders_with_options_async(request, headers, runtime)
def channel_orders_with_options(
self,
request: dingtalkesign__2__0_models.ChannelOrdersRequest,
headers: dingtalkesign__2__0_models.ChannelOrdersHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.ChannelOrdersResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
if not UtilClient.is_unset(request.order_id):
body['orderId'] = request.order_id
if not UtilClient.is_unset(request.item_code):
body['itemCode'] = request.item_code
if not UtilClient.is_unset(request.item_name):
body['itemName'] = request.item_name
if not UtilClient.is_unset(request.quantity):
body['quantity'] = request.quantity
if not UtilClient.is_unset(request.pay_fee):
body['payFee'] = request.pay_fee
if not UtilClient.is_unset(request.order_create_time):
body['orderCreateTime'] = request.order_create_time
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkesign__2__0_models.ChannelOrdersResponse(),
self.do_roarequest('ChannelOrders', 'esign_2.0', 'HTTP', 'POST', 'AK', f'/v2.0/esign/orders/channel', 'json', req, runtime)
)
async def channel_orders_with_options_async(
self,
request: dingtalkesign__2__0_models.ChannelOrdersRequest,
headers: dingtalkesign__2__0_models.ChannelOrdersHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.ChannelOrdersResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
if not UtilClient.is_unset(request.order_id):
body['orderId'] = request.order_id
if not UtilClient.is_unset(request.item_code):
body['itemCode'] = request.item_code
if not UtilClient.is_unset(request.item_name):
body['itemName'] = request.item_name
if not UtilClient.is_unset(request.quantity):
body['quantity'] = request.quantity
if not UtilClient.is_unset(request.pay_fee):
body['payFee'] = request.pay_fee
if not UtilClient.is_unset(request.order_create_time):
body['orderCreateTime'] = request.order_create_time
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkesign__2__0_models.ChannelOrdersResponse(),
await self.do_roarequest_async('ChannelOrders', 'esign_2.0', 'HTTP', 'POST', 'AK', f'/v2.0/esign/orders/channel', 'json', req, runtime)
)
def resale_order(
self,
request: dingtalkesign__2__0_models.ResaleOrderRequest,
) -> dingtalkesign__2__0_models.ResaleOrderResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.ResaleOrderHeaders()
return self.resale_order_with_options(request, headers, runtime)
async def resale_order_async(
self,
request: dingtalkesign__2__0_models.ResaleOrderRequest,
) -> dingtalkesign__2__0_models.ResaleOrderResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.ResaleOrderHeaders()
return await self.resale_order_with_options_async(request, headers, runtime)
def resale_order_with_options(
self,
request: dingtalkesign__2__0_models.ResaleOrderRequest,
headers: dingtalkesign__2__0_models.ResaleOrderHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.ResaleOrderResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
if not UtilClient.is_unset(request.order_id):
body['orderId'] = request.order_id
if not UtilClient.is_unset(request.quantity):
body['quantity'] = request.quantity
if not UtilClient.is_unset(request.order_create_time):
body['orderCreateTime'] = request.order_create_time
if not UtilClient.is_unset(request.service_start_time):
body['serviceStartTime'] = request.service_start_time
if not UtilClient.is_unset(request.service_stop_time):
body['serviceStopTime'] = request.service_stop_time
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkesign__2__0_models.ResaleOrderResponse(),
self.do_roarequest('ResaleOrder', 'esign_2.0', 'HTTP', 'POST', 'AK', f'/v2.0/esign/orders/resale', 'json', req, runtime)
)
async def resale_order_with_options_async(
self,
request: dingtalkesign__2__0_models.ResaleOrderRequest,
headers: dingtalkesign__2__0_models.ResaleOrderHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.ResaleOrderResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
if not UtilClient.is_unset(request.order_id):
body['orderId'] = request.order_id
if not UtilClient.is_unset(request.quantity):
body['quantity'] = request.quantity
if not UtilClient.is_unset(request.order_create_time):
body['orderCreateTime'] = request.order_create_time
if not UtilClient.is_unset(request.service_start_time):
body['serviceStartTime'] = request.service_start_time
if not UtilClient.is_unset(request.service_stop_time):
body['serviceStopTime'] = request.service_stop_time
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkesign__2__0_models.ResaleOrderResponse(),
await self.do_roarequest_async('ResaleOrder', 'esign_2.0', 'HTTP', 'POST', 'AK', f'/v2.0/esign/orders/resale', 'json', req, runtime)
)
def cancel_corp_auth(
self,
request: dingtalkesign__2__0_models.CancelCorpAuthRequest,
) -> dingtalkesign__2__0_models.CancelCorpAuthResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.CancelCorpAuthHeaders()
return self.cancel_corp_auth_with_options(request, headers, runtime)
async def cancel_corp_auth_async(
self,
request: dingtalkesign__2__0_models.CancelCorpAuthRequest,
) -> dingtalkesign__2__0_models.CancelCorpAuthResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.CancelCorpAuthHeaders()
return await self.cancel_corp_auth_with_options_async(request, headers, runtime)
def cancel_corp_auth_with_options(
self,
request: dingtalkesign__2__0_models.CancelCorpAuthRequest,
headers: dingtalkesign__2__0_models.CancelCorpAuthHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.CancelCorpAuthResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkesign__2__0_models.CancelCorpAuthResponse(),
self.do_roarequest('CancelCorpAuth', 'esign_2.0', 'HTTP', 'POST', 'AK', f'/v2.0/esign/auths/cancel', 'json', req, runtime)
)
async def cancel_corp_auth_with_options_async(
self,
request: dingtalkesign__2__0_models.CancelCorpAuthRequest,
headers: dingtalkesign__2__0_models.CancelCorpAuthHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.CancelCorpAuthResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkesign__2__0_models.CancelCorpAuthResponse(),
await self.do_roarequest_async('CancelCorpAuth', 'esign_2.0', 'HTTP', 'POST', 'AK', f'/v2.0/esign/auths/cancel', 'json', req, runtime)
)
def get_file_upload_url(
self,
request: dingtalkesign__2__0_models.GetFileUploadUrlRequest,
) -> dingtalkesign__2__0_models.GetFileUploadUrlResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.GetFileUploadUrlHeaders()
return self.get_file_upload_url_with_options(request, headers, runtime)
async def get_file_upload_url_async(
self,
request: dingtalkesign__2__0_models.GetFileUploadUrlRequest,
) -> dingtalkesign__2__0_models.GetFileUploadUrlResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.GetFileUploadUrlHeaders()
return await self.get_file_upload_url_with_options_async(request, headers, runtime)
def get_file_upload_url_with_options(
self,
request: dingtalkesign__2__0_models.GetFileUploadUrlRequest,
headers: dingtalkesign__2__0_models.GetFileUploadUrlHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.GetFileUploadUrlResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
if not UtilClient.is_unset(request.content_md_5):
body['contentMd5'] = request.content_md_5
if not UtilClient.is_unset(request.content_type):
body['contentType'] = request.content_type
if not UtilClient.is_unset(request.file_name):
body['fileName'] = request.file_name
if not UtilClient.is_unset(request.file_size):
body['fileSize'] = request.file_size
if not UtilClient.is_unset(request.convert_2pdf):
body['convert2Pdf'] = request.convert_2pdf
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkesign__2__0_models.GetFileUploadUrlResponse(),
self.do_roarequest('GetFileUploadUrl', 'esign_2.0', 'HTTP', 'POST', 'AK', f'/v2.0/esign/files/uploadUrls', 'json', req, runtime)
)
async def get_file_upload_url_with_options_async(
self,
request: dingtalkesign__2__0_models.GetFileUploadUrlRequest,
headers: dingtalkesign__2__0_models.GetFileUploadUrlHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.GetFileUploadUrlResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.ding_corp_id):
body['dingCorpId'] = request.ding_corp_id
if not UtilClient.is_unset(request.content_md_5):
body['contentMd5'] = request.content_md_5
if not UtilClient.is_unset(request.content_type):
body['contentType'] = request.content_type
if not UtilClient.is_unset(request.file_name):
body['fileName'] = request.file_name
if not UtilClient.is_unset(request.file_size):
body['fileSize'] = request.file_size
if not UtilClient.is_unset(request.convert_2pdf):
body['convert2Pdf'] = request.convert_2pdf
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkesign__2__0_models.GetFileUploadUrlResponse(),
await self.do_roarequest_async('GetFileUploadUrl', 'esign_2.0', 'HTTP', 'POST', 'AK', f'/v2.0/esign/files/uploadUrls', 'json', req, runtime)
)
def get_isv_status(self) -> dingtalkesign__2__0_models.GetIsvStatusResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.GetIsvStatusHeaders()
return self.get_isv_status_with_options(headers, runtime)
async def get_isv_status_async(self) -> dingtalkesign__2__0_models.GetIsvStatusResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.GetIsvStatusHeaders()
return await self.get_isv_status_with_options_async(headers, runtime)
def get_isv_status_with_options(
self,
headers: dingtalkesign__2__0_models.GetIsvStatusHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.GetIsvStatusResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkesign__2__0_models.GetIsvStatusResponse(),
self.do_roarequest('GetIsvStatus', 'esign_2.0', 'HTTP', 'GET', 'AK', f'/v2.0/esign/corps/appStatus', 'json', req, runtime)
)
async def get_isv_status_with_options_async(
self,
headers: dingtalkesign__2__0_models.GetIsvStatusHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.GetIsvStatusResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkesign__2__0_models.GetIsvStatusResponse(),
await self.do_roarequest_async('GetIsvStatus', 'esign_2.0', 'HTTP', 'GET', 'AK', f'/v2.0/esign/corps/appStatus', 'json', req, runtime)
)
def get_flow_docs(
self,
task_id: str,
) -> dingtalkesign__2__0_models.GetFlowDocsResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.GetFlowDocsHeaders()
return self.get_flow_docs_with_options(task_id, headers, runtime)
async def get_flow_docs_async(
self,
task_id: str,
) -> dingtalkesign__2__0_models.GetFlowDocsResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkesign__2__0_models.GetFlowDocsHeaders()
return await self.get_flow_docs_with_options_async(task_id, headers, runtime)
def get_flow_docs_with_options(
self,
task_id: str,
headers: dingtalkesign__2__0_models.GetFlowDocsHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.GetFlowDocsResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkesign__2__0_models.GetFlowDocsResponse(),
self.do_roarequest('GetFlowDocs', 'esign_2.0', 'HTTP', 'GET', 'AK', f'/v2.0/esign/flowTasks/{task_id}/docs', 'json', req, runtime)
)
async def get_flow_docs_with_options_async(
self,
task_id: str,
headers: dingtalkesign__2__0_models.GetFlowDocsHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkesign__2__0_models.GetFlowDocsResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.service_group):
real_headers['serviceGroup'] = headers.service_group
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
| |
<reponame>peterlynch/MoneydancePythonScripts
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# stockglance2020.py build:1016 - October 2020 - <NAME>
# Original code StockGlance.java Moneydance Extension Copyright <NAME> - https://github.com/jameslarus/stockglance
#
# Copyright (c) 2020, <NAME> StuWareSoftSystems
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES LOSS OF USE,
# DATA, OR PROFITS OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Modified by waynelloydsmith to run as standalone Python/Jython script StockGlance75.py and to show in which accounts the stocks are held.
# https://github.com/waynelloydsmith/Moneydance-Scripts/blob/master/StockGlance75.py
# Extensively modified/enhanced by <NAME> - StuWareSoftSystems - September 2020 to create stockglance2020.py with these features:
# - This script basically shows all stocks/funds summarised into single stocks/funds per row. I.E. consolidates data across all Accounts
# - Some of the code looks somewhat different to how I would write native Python, but it is as it is as it was converted from pure Java by waynelloydsmith
# - Shows QTY of shares
# - If you are running Windows and get file IO errors (e.g. 13) creating the extract, you likely have a permissions issue. Try a different location (e.g. your standard user directory)
# - Removed all non-functional Java / Python code and general tidy up.
# - Also fixed / replaced the JY/Java code to make JTable and the Scroll panes function properly
# - Addressed bug hiding some securities when not all prices found (by date) - by eliminating % as not needed anyway.
# - Price is taken from Current Price now, and NOT from price history. If you would like price history, let me know!
# - Added Parameter/filter screen/options and export to file....
# - The file will write a utf8 encoded file - so we strip out currency signs - else Excel treats them wrongly. You can specify to leave them included...
# -- We strip out all non ASCII characters below code 128 and also number separators. Delimiter will flip to a semi-colon if your decimal point is a comma!
# -- USAGE: Just execute and a popup will ask you to set parameters. You can just leave all as default if you wish.
# -- Change the defaults in the rows just below this statement...
# -- WARNING: Cash Balances are per account, not per security/currency.
# -- So the cash balances will always be for the whole account(s) included, not just filtered securities etc...
# -- Added extra columns with raw number that don't get displayed. Planned to use for custom sort,
# -- but found a workaround stripping non numerics out of text... (so not used)
# -- Found that JFileChooser with file extension filters hangs on Macs, so use FileDialog on Mac to also get Mac(ish) GUI LaF
# -- Version 3 - fixed bug so that .moneydance extension test only checks end of filename (not middle)
# -- Version 3c fiddled with the Accounts filter; added extra total (securities + cash balance) where whole account selected
# -- Version 3d eliminated rounding on the totals in base currency (user request)
# -- don't display base currency in local currency where same; make all filters uppercase
# -- V3d Fix small bug on check for whether all securities were same currency (didn't really affect much);
# -- also tried to deal better with LOCALE decimal point and comma....
# -- V4 - Enhanced to use CSV File writer and write raw numbers into the CSV file - let CSV writer handle the special character handling.......;
# -- altered pricing rounding
# -- V4b - tweaked to use Jython syntax (rather than Java)...syntax; Added "copyright to file extract";
# -- added version number - all cosmetic only; replaced AcctFilter()
# -- V4b - enhanced filters to include Currency Name, and Security Name in searches....
# -- V4b - added option to split / securities within account (rather than total across the accounts)... Messy display, but requested
# -- V4c - now saving parameters to file (within MD) so that they persist.
# -- V4d - added script encoding utf-8 (removed again in v4e)
# -- V4e - Added MD Cost Basis and the Unrealised Gain (calculated); also now save export path to disk
# -- V4e - Added trap for file write errors; added parameter to allow user to exclude totals from csv file; cleaned up row highlighting and cell neg colours
# -- V4f - Added file permissions check; added code to display file to stdout if file error. Allows user to copy / paste into Excel...
# -- V4g - Added % to gain calculation (user request); changed default extract location (search for User Home) to avoid internal MD locations
# -- V4g - re-added UTF8 coding; tinkered with display formatting (bold rows); enabled scrolling on footer table (totals) (user request); allow footer to gain focus and CTRL-C (copy)
# -- V4h - format CSV Gain% field as a %string; fixed Gain% on the final total row...
# -- V5 - Released version (from v4h)
# -- V5a - Some users report a problem saving files to some folders on Windows 10. It seems that Anti-malware or Windows Access Control is restrictiing access
# -- So, changed to FileDialog (from JFileChooser) for Windows as this seems to tell Windows to allow access.
# -- Added some console messages; fixed crash when no investment accounts exist.
# -- V5b - Removed the now redundant lUseMacFileChooser variable; enhanced some console messages...
# -- V5c - Code cleanup only - cosmetic only; Added parameter to allow User to select no rounding of price (useful if their settings on the security are set wrongly); switched to use MD's decimal setting
# -- v5d - Small tweak to parameter field (cosmetic only); Added a File BOM marker to help Excel open UTF-8 files (and changed open() to 'w' instead of 'wb')
# -- v5e - Cosmetic change to display; catch pickle.load() error (when restored dataset); Reverting to open() 'wb' - fix Excel CR/LF double line on Windows issue
# Build: 1000 - Slight change to myParameters; changed __file__ usage; code cleanup for IntelliJ; version_build change; changed versioning; changed export filename; Eliminated wait for script close..
# Build: 1000 - no functional changes; Added code fix for extension runtime to set moneydance variables (if not set);
# Build: 1000 - all print functions changed to work headless; added some popup warnings...; stream-lined common code.....
# Build: 1000 - Column widths now save....; optional parameter whether to write BOM to export file; added date/time to console log
# Build: 1001 - Cosmetic change to console to state when not splitting accounts; Added About menu (cosmetic)
# Build: 1002 - Cosmetic change to put main window in centre of screen; bug fix for text double-decimal point corrupting display in MyGainsRenderer()
# Build: 1002 - Enhanced MyPrint to catch unicode utf-8 encode/decode errors
# Build: 1003 - fixed raise(Exception) clauses ;->
# Build: 1004 | |
legend=False,
linewidth=linewidth,
band_color=band_color,
)
band_up.plot_plain(
ax=ax_band_down,
color=unprojected_band_color,
linewidth=unprojected_linewidth,
)
dos_down.plot_elements(
ax=ax_dos_down,
elements=elements,
fill=fill,
alpha=alpha,
linewidth=linewidth,
sigma=sigma,
energyaxis='y',
color_list=color_list,
legend=legend,
total=True,
erange=erange,
)
dos_up.plot_elements(
ax=ax_dos_down,
elements=elements,
fill=fill,
alpha=0.25 * alpha,
alpha_line=0.25 * alpha,
linewidth=linewidth,
sigma=sigma,
energyaxis='y',
color_list=color_list,
legend=False,
total=True,
erange=erange,
)
fig.canvas.draw()
nbins = len(ax_dos_down.get_xticklabels())
ax_dos_down.xaxis.set_major_locator(
MaxNLocator(nbins=nbins - 1, prune='lower'))
plt.tight_layout(pad=0.2)
plt.subplots_adjust(wspace=0, hspace=0.05)
if save:
plt.savefig(output)
else:
return fig, ax_band_up, ax_dos_up, ax_band_down, ax_dos_down
def band_dos_element_orbitals_spin_polarized(
band_folder,
dos_folder,
element_orbital_pairs,
output='band_dos_element_orbitals_sp.png',
scale_factor=6,
color_list=None,
legend=True,
linewidth=0.75,
band_color='black',
unprojected_band_color='gray',
unprojected_linewidth=0.6,
figsize=(8, 6),
width_ratios=[7, 3],
erange=[-6, 6],
hse=False,
kpath=None,
n=None,
fontsize=8,
annotations=['$\\uparrow$ ', '$\\downarrow$ '],
annotation_xy=(0.0125, 0.98),
save=True,
fill=True,
alpha=0.3,
sigma=0.05,
):
"""
This function plots a spin polarized band structure projected onto specified [element, orbital] pairs next to a spin
polarized density of states projected onto the same [element, orbital] pairs. The top figure highlights the spin up
bands and the bottom figure highlights the spin down bands.
Parameters:
band_folder (str): This is the folder that contains the VASP files for the band structure
dos_folder (str): This is the folder that contains the VASP files for the density of states
output (str): File name of the resulting plot.
scale_factor (float): Factor to scale weights. This changes the size of the
points in the scatter plot
element_orbital_pairs (list[list]): Selected orbitals on selected elements to plot.
This should take the form of [[element index, orbital_index], ...].
color_list (list): List of colors of the same length as the element_orbital_pairs
legend (bool): Determines if the legend should be included or not.
linewidth (float): Line width of the plain band structure plotted in the background
band_color (string): Color of the plain band structure
unprojected_band_color (str): Color of the unprojected band
unprojected_linewidth (float): Line width of the unprojected bands
figsize (list / tuple): Desired size of the image in inches (width, height)
width_ratios (list / tuple): Width ration of the band plot and dos plot.
erange (list / tuple): Range of energy to show in the plot [low, high]
kpath (str): High symmetry k-point path of band structure calculation
Due to the nature of the KPOINTS file for HSE calculations this
information is a required input for proper labeling of the figure
for HSE calculations. This information is extracted from the KPOINTS
files for non-HSE calculations. (G is automaticall converted to \\Gamma)
n (int): Number of points between each high symmetry points.
This is also only required for HSE calculations. This number should be
known by the user, as it was used to generate the KPOINTS file.
fontsize (float): Font size of the text in the figure.
annotations (list): Annotations to put on the top and bottom (left and right) figures.
By default it will show the spin up and spin down arrows.
annotation_xy (list / tuple): Fractional (x, y) coordinated of the annotation location
save (bool): Determines whether to automatically save the figure or not. If not
the figure and axis are return for further manipulation.
fill (bool): Determines wether or not to fill underneath the plot
alpha (float): Alpha value for the fill
sigma (float): Standard deviation for gaussian filter
Returns:
If save == True, this function will return nothing and directly save the image as
the output name. If save == False, the function will return the matplotlib figure
and axis for further editing.
"""
fig, ax = plt.subplots(
nrows=2,
ncols=2,
sharey=True,
figsize=figsize,
dpi=400,
gridspec_kw={'width_ratios': width_ratios}
)
ax_band_up, ax_dos_up, ax_band_down, ax_dos_down = _figure_setup_band_dos_spin_polarized(
ax=ax,
fontsize=fontsize,
ylim=[erange[0], erange[1]]
)
band_up = Band(
folder=band_folder,
spin='up',
projected=True,
hse=hse,
kpath=kpath,
n=n,
)
band_down = Band(
folder=band_folder,
spin='down',
projected=True,
hse=hse,
kpath=kpath,
n=n,
)
dos_up = Dos(folder=dos_folder, spin='up')
dos_down = Dos(folder=dos_folder, spin='down')
bbox = dict(boxstyle='round', fc='white',
edgecolor='gray', alpha=0.95, pad=0.3)
ax_band_up.annotate(
annotations[0],
xy=annotation_xy,
xycoords='axes fraction',
va='top',
ha='left',
bbox=bbox,
fontsize=fontsize + 1,
)
ax_band_down.annotate(
annotations[1],
xy=annotation_xy,
xycoords='axes fraction',
va='top',
ha='left',
bbox=bbox,
fontsize=fontsize + 1,
)
band_up.plot_element_orbitals(
ax=ax_band_up,
scale_factor=scale_factor,
element_orbital_pairs=element_orbital_pairs,
color_list=color_list,
legend=False,
linewidth=linewidth,
band_color=band_color,
)
band_down.plot_plain(
ax=ax_band_up,
color=unprojected_band_color,
linewidth=unprojected_linewidth,
)
dos_up.plot_element_orbitals(
ax=ax_dos_up,
element_orbital_pairs=element_orbital_pairs,
fill=fill,
alpha=alpha,
linewidth=linewidth,
sigma=sigma,
energyaxis='y',
color_list=color_list,
legend=legend,
total=True,
erange=erange,
)
dos_down.plot_element_orbitals(
ax=ax_dos_up,
element_orbital_pairs=element_orbital_pairs,
fill=fill,
alpha=0.25 * alpha,
alpha_line=0.25 * alpha,
linewidth=linewidth,
sigma=sigma,
energyaxis='y',
color_list=color_list,
legend=False,
total=True,
erange=erange,
)
band_down.plot_element_orbitals(
ax=ax_band_down,
scale_factor=scale_factor,
element_orbital_pairs=element_orbital_pairs,
color_list=color_list,
legend=False,
linewidth=linewidth,
band_color=band_color,
)
band_up.plot_plain(
ax=ax_band_down,
color=unprojected_band_color,
linewidth=unprojected_linewidth,
)
dos_down.plot_element_orbitals(
ax=ax_dos_down,
element_orbital_pairs=element_orbital_pairs,
fill=fill,
alpha=alpha,
linewidth=linewidth,
sigma=sigma,
energyaxis='y',
color_list=color_list,
legend=legend,
total=True,
erange=erange,
)
dos_up.plot_element_orbitals(
ax=ax_dos_down,
element_orbital_pairs=element_orbital_pairs,
fill=fill,
alpha=0.25 * alpha,
alpha_line=0.25 * alpha,
linewidth=linewidth,
sigma=sigma,
energyaxis='y',
color_list=color_list,
legend=False,
total=True,
erange=erange,
)
fig.canvas.draw()
nbins = len(ax_dos_down.get_xticklabels())
ax_dos_down.xaxis.set_major_locator(
MaxNLocator(nbins=nbins - 1, prune='lower'))
plt.tight_layout(pad=0.2)
plt.subplots_adjust(wspace=0, hspace=0.05)
if save:
plt.savefig(output)
else:
return fig, ax_band_up, ax_dos_up, ax_band_down, ax_dos_down
def band_dos_element_spd_spin_polarized(
band_folder,
dos_folder,
elements,
output='band_dos_element_spd_sp.png',
scale_factor=6,
order=['s', 'p', 'd'],
color_dict=None,
legend=True,
linewidth=0.75,
band_color='black',
unprojected_band_color='gray',
unprojected_linewidth=0.6,
figsize=(8, 6),
width_ratios=[7, 3],
erange=[-6, 6],
hse=False,
kpath=None,
n=None,
fontsize=8,
annotations=['$\\uparrow$ ', '$\\downarrow$ '],
annotation_xy=(0.0125, 0.98),
save=True,
fill=True,
alpha=0.3,
sigma=0.05,
):
"""
This function plots a spin polarized s, p, d projected band structure on a given element next to a spin polarized
s, p, d projected density of states on the same element. The top figure highlights the spin up bands and the bottom
figure highlights the spin down bands.
Parameters:
band_folder (str): This is the folder that contains the VASP files for the band structure
dos_folder (str): This is the folder that contains the VASP files for the density of states
elements (list): List of elements to project onto
output (str): File name of the resulting plot.
scale_factor (float): Factor to scale weights. This changes the size of the
points in the scatter plot
order (list): This determines the order in which the points are plotted on the
graph. This is an option because sometimes certain orbitals can be hidden
under others because they have a larger weight. For example, if the
weights of the d orbitals are greater than that of the s orbitals, it
might be smart to choose ['d', 'p', 's'] as the order so the s orbitals are
plotted over the d orbitals.
color_dict (dict[str][str]): This option allow the colors of the s, p, and d
orbitals to be specified. Should be in the form of:
{'s': <s color>, 'p': <p color>, 'd': <d color>}
legend (bool): Determines if the legend should be included or not.
linewidth (float): Line width of the plain band structure plotted in the background
band_color (string): Color of the plain band structure
unprojected_band_color (str): Color of the unprojected band
unprojected_linewidth (float): Line width of the unprojected bands
figsize (list / tuple): Desired size of the image in inches (width, height)
width_ratios (list / tuple): Width ration of the band plot and dos plot.
erange (list / tuple): Range of energy to show in the plot [low, high]
kpath (str): High symmetry k-point path of band structure calculation
Due to the nature of the KPOINTS file for HSE calculations this
information is a required input for proper labeling of the figure
for HSE calculations. This information is extracted from the KPOINTS
files for non-HSE calculations. (G is automaticall converted to \\Gamma)
n (int): Number of points between each high symmetry points.
This is also only required for HSE calculations. This number should be
known by the user, as it was used to generate the KPOINTS file.
fontsize (float): Font size of the text in the figure.
annotations (list): Annotations to put on the top and bottom (left and right) figures.
By default it will show the spin up and spin down arrows.
annotation_xy (list / tuple): Fractional (x, y) coordinated of the annotation location
save (bool): Determines whether to automatically save the figure or not. If not
the figure and axis are return for further manipulation.
fill (bool): Determines wether or not to fill underneath the plot
alpha (float): Alpha value for the fill
sigma (float): Standard deviation for gaussian filter
Returns:
If save == True, this function will return nothing and directly save the image as
the output | |
or sourceval_id or sourceval:
self.addImportedFrom(repo=repo, claim=new_prop, lang=lang, source_id=source_id, sourceval_id=sourceval_id, sourceval=sourceval, status=1)
# print("Reference added.")
if qualifier_id and qualval_id:
self.addQualifiers(repo=repo, claim=new_prop, qualifier_id=qualifier_id, qualval_id=qualval_id, status=1)
# print("Qualifier added.")
elif qualifier_id and qualval:
self.addQualifiers(repo=repo, claim=new_prop, qualifier_id=qualifier_id, qualval=qualval, status=1)
# print("Qualifier added.")
except:
print('Error in adding monolingual text.')
return 0
def addNumeric(self, prop_id='', prop_value='', lang='', source_id='', sourceval_id='', sourceval='', qualifier_id='', qualval='', qualval_id='', confirm='', overwrite='', append=''):
""" Adds numeric values to Wikidata """
print(self.page.title())
if prop_value:
try:
val = pywikibot.WbQuantity(amount=prop_value, site=enwp)
except:
print('Incorrect property value provided.\n')
return 1
self.page.get()
if prop_id in self.page.claims:
choice = ''
if not append and not overwrite:
choice = input('Property already exists. Select:\n\
1 to skip\n\
2 to over-write the existing property\n\
3 to add another value to the property\n')
if choice == '1':
return
elif choice == '2' or overwrite == 'y':
self.page.removeClaims(self.page.claims[prop_id])
elif choice > '3':
print("Invalid choice.\n")
return 1
try:
new_prop = pywikibot.Claim(repo, prop_id)
# print('hello')
new_prop.setTarget(val)
# print(val)
if confirm.lower() == 'y':
self.page.addClaim(new_prop, summary = u'Adding new numeric value')
self.page = pywikibot.ItemPage(enwd, self.wd_value)
if lang or source_id or sourceval_id or sourceval:
self.addImportedFrom(repo=repo, claim=new_prop, lang=lang, source_id=source_id, sourceval_id=sourceval_id, sourceval=sourceval, status=1)
# print("Reference added.")
if qualifier_id and qualval_id:
self.addQualifiers(repo=repo, claim=new_prop, qualifier_id=qualifier_id, qualval_id=qualval_id, status=1)
# print("Qualifier added.")
elif qualifier_id and qualval:
self.addQualifiers(repo=repo, claim=new_prop, qualifier_id=qualifier_id, qualval=qualval, status=1)
# print("Qualifier added.")
else:
# confirmation
print(new_prop)
text = input("Do you want to save this property? (y/n) ")
if text == 'y':
self.page.addClaim(new_prop, summary = u'Adding new numeric value')
self.page = pywikibot.ItemPage(enwd, self.wd_value)
if lang or source_id or sourceval_id or sourceval:
self.addImportedFrom(repo=repo, claim=new_prop, lang=lang, source_id=source_id, sourceval_id=sourceval_id, sourceval=sourceval, status=1)
# print("Reference added.")
if qualifier_id and qualval_id:
self.addQualifiers(repo=repo, claim=new_prop, qualifier_id=qualifier_id, qualval_id=qualval_id, status=1)
# print("Qualifier added.")
elif qualifier_id and qualval:
self.addQualifiers(repo=repo, claim=new_prop, qualifier_id=qualifier_id, qualval=qualval, status=1)
# print("Qualifier added.")
except:
print('Error in adding numeric value.')
return 0
def addCoordinates(self, prop_id='', prop_value='', lang='', source_id='', sourceval_id='', sourceval='', qualifier_id='', qualval='', qualval_id='', confirm='', overwrite='', append=''):
""" Adds coordinates to Wikidata """
print(self.page.title())
if prop_value and '|' in prop_value:
prop_value = prop_value.split('|')
try:
lat, lon, precision = calc_coord(prop_value)
except:
print('Something went wrong while adding coordinates.')
return
if precision <= 0.0:
print('Incorrect precision value obtained')
return
self.page.get()
if prop_id in self.page.claims:
choice = ''
if not append and not overwrite:
choice = input('Property already exists. Select:\n\
1 to skip\n\
2 to over-write the existing property\n\
3 to add another value to the property\n')
if choice == '1':
return
elif choice == '2' or overwrite == 'y':
self.page.removeClaims(self.page.claims[prop_id])
elif choice > '3':
print("Invalid choice.\n")
return 1
try:
new_prop = pywikibot.Claim(repo, prop_id)
coordinate = pywikibot.Coordinate(lat=lat, lon=lon, precision=precision, site=enwp,globe_item=globe_item)
new_prop.setTarget(coordinate)
if confirm.lower() == 'y':
self.page.addClaim(new_prop, summary=u'Importing new coordinate')
self.page = pywikibot.ItemPage(enwd, self.wd_value)
if lang or source_id or sourceval_id or sourceval:
self.addImportedFrom(repo=repo, claim=new_prop, lang=lang, source_id=source_id, sourceval_id=sourceval_id, sourceval=sourceval, status=1)
# print("Reference added.")
if qualifier_id and qualval_id:
self.addQualifiers(repo=repo, claim=new_prop, qualifier_id=qualifier_id, qualval_id=qualval_id, status=1)
# print("Qualifier added.")
elif qualifier_id and qualval:
self.addQualifiers(repo=repo, claim=new_prop, qualifier_id=qualifier_id, qualval=qualval, status=1)
# print("Qualifier added.")
else:
# confirmation
print(new_prop)
text = input("Do you want to save this property? (y/n) ")
if text == 'y':
self.page.addClaim(new_prop, summary = u'Importing new coordinate')
self.page = pywikibot.ItemPage(enwd, self.wd_value)
if lang or source_id or sourceval_id or sourceval:
self.addImportedFrom(repo=repo, claim=new_prop, lang=lang, source_id=source_id, sourceval_id=sourceval_id, sourceval=sourceval, status=1)
# print("Reference added.")
if qualifier_id and qualval_id:
self.addQualifiers(repo=repo, claim=new_prop, qualifier_id=qualifier_id, qualval_id=qualval_id, status=1)
# print("Qualifier added.")
elif qualifier_id and qualval:
self.addQualifiers(repo=repo, claim=new_prop, qualifier_id=qualifier_id, qualval=qualval, status=1)
# print("Qualifier added.")
except:
print('Error in adding numeric value.')
def addDate(self, prop_id='', date='', lang='', source_id='', sourceval_id='', sourceval='', qualifier_id='', qualval='', qualval_id='', confirm='', overwrite='', append=''):
""" Adds numeric values to Wikidata """
print(self.page.title())
if date and not re.search(r'\d-\d-\d', date, re.IGNORECASE):
date = date.split()
try:
if len(date) == 3:
value = dateparser.parse(str(date[0])+' '+str(date[1])+' '+str(date[2]))
date = str(value.year) + '-' + str(value.month) + '-' + str(value.day)
elif len(date) == 2:
value = dateparser.parse(str(date[0])+' '+str(date[1]))
date = str(value.year) + '-' + str(value.month)
elif len(date) == 1:
value = dateparser.parse(str(date[0]))
date = str(value.year)
except:
print('Error in extracting date.\n')
return
if date and date != '0-0-0':
self.page.get()
if prop_id in self.page.claims:
choice = ''
if not append and not overwrite:
choice = input('Property already exists. Select:\n\
1 to skip\n\
2 to over-write the existing property\n\
3 to add another value to the property\n')
if choice == '1':
return
elif choice == '2' or overwrite == 'y':
self.page.removeClaims(self.page.claims[prop_id])
elif choice > '3':
print("Invalid choice.\n")
return 1
now = datetime.datetime.now()
check_ok = True
if int(date.split('-')[0]) > now.year:
check_ok = False
try:
if int(date.split('-')[0]) == now.year and int(date.split('-')[1]) > now.month:
check_ok = False
except:
print("Invalid date.\n")
pass
if check_ok:
try:
new_prop = pywikibot.Claim(repo, prop_id)
if len(date.split('-')) == 3:
new_prop.setTarget(pywikibot.WbTime(year=int(date.split('-')[0]), month=int(date.split('-')[1]), day=int(date.split('-')[2])))
elif len(date.split('-')) == 2:
new_prop.setTarget(pywikibot.WbTime(year=int(date.split('-')[0]), month=int(date.split('-')[1])))
elif len(date.split('-')) == 1:
new_prop.setTarget(pywikibot.WbTime(year=int(date.split('-')[0])))
if confirm.lower() == 'y':
self.page.addClaim(new_prop, summary = u'Adding new date')
self.page = pywikibot.ItemPage(enwd, self.wd_value)
if lang or source_id or sourceval_id or sourceval:
self.addImportedFrom(repo=repo, claim=new_prop, lang=lang, source_id=source_id, sourceval_id=sourceval_id, sourceval=sourceval, status=1)
# print("Reference added.")
if qualifier_id and qualval_id:
self.addQualifiers(repo=repo, claim=new_prop, qualifier_id=qualifier_id, qualval_id=qualval_id, status=1)
# print("Qualifier added.")
elif qualifier_id and qualval:
self.addQualifiers(repo=repo, claim=new_prop, qualifier_id=qualifier_id, qualval=qualval, status=1)
# print("Qualifier added.")
else:
# confirmation
print(new_prop)
text = input("Do you want to save this property? (y/n) ")
if text == 'y':
self.page.addClaim(new_prop, summary = u'Adding new date')
self.page = pywikibot.ItemPage(enwd, self.wd_value)
if lang or source_id or sourceval_id or sourceval:
self.addImportedFrom(repo=repo, claim=new_prop, lang=lang, source_id=source_id, sourceval_id=sourceval_id, sourceval=sourceval, status=1)
# print("Reference added.")
if qualifier_id and qualval_id:
self.addQualifiers(repo=repo, claim=new_prop, qualifier_id=qualifier_id, qualval_id=qualval_id, status=1)
# print("Qualifier added.")
elif qualifier_id and qualval:
self.addQualifiers(repo=repo, claim=new_prop, qualifier_id=qualifier_id, qualval=qualval, status=1)
# print("Qualifier added.")
except:
print('Error in adding numeric value.\n')
return 0
def addIdentifiers(self, prop_id='', prop_value='', lang='', source_id='', sourceval_id='', sourceval='', qualifier_id='', qualval='', qualval_id='', confirm='', overwrite='', append=''):
""" Adds numeric values to Wikidata """
print(self.page.title())
if not prop_value:
print('Incorrect property value provided.\n')
return 1
self.page.get()
if prop_id in self.page.claims:
choice = ''
if not append and not overwrite:
choice = input('Property already exists. Select:\n\
1 to skip\n\
2 to over-write the existing property\n\
3 to add another value to the property\n')
if choice == '1':
return
elif choice == '2' or overwrite == 'y':
self.page.removeClaims(self.page.claims[prop_id])
elif choice > '3':
print("Invalid choice.\n")
return 1
try:
new_prop = pywikibot.Claim(repo, prop_id)
# print('hello')
new_prop.setTarget(prop_value)
# print(val)
if confirm.lower() == 'y':
self.page.addClaim(new_prop, summary = u'Adding new identifier')
self.page = pywikibot.ItemPage(enwd, self.wd_value)
if lang or source_id or sourceval_id or sourceval:
self.addImportedFrom(repo=repo, claim=new_prop, lang=lang, source_id=source_id, sourceval_id=sourceval_id, sourceval=sourceval, status=1)
# print("Reference added.")
if qualifier_id and qualval_id:
self.addQualifiers(repo=repo, claim=new_prop, qualifier_id=qualifier_id, qualval_id=qualval_id, status=1)
# print("Qualifier added.")
elif qualifier_id and qualval:
self.addQualifiers(repo=repo, claim=new_prop, qualifier_id=qualifier_id, qualval=qualval, status=1)
# print("Qualifier added.")
else:
# confirmation
print(new_prop)
text = input("Do you want to save this property? (y/n) ")
if text == 'y':
self.page.addClaim(new_prop, summary = u'Adding new identifier')
self.page = pywikibot.ItemPage(enwd, self.wd_value)
if lang or source_id or sourceval_id or sourceval:
self.addImportedFrom(repo=repo, claim=new_prop, lang=lang, source_id=source_id, sourceval_id=sourceval_id, sourceval=sourceval, status=1)
# print("Reference added.")
if qualifier_id and qualval_id:
self.addQualifiers(repo=repo, claim=new_prop, qualifier_id=qualifier_id, qualval_id=qualval_id, status=1)
# print("Qualifier added.")
elif qualifier_id and qualval:
self.addQualifiers(repo=repo, claim=new_prop, qualifier_id=qualifier_id, qualval=qualval, status=1)
# print("Qualifier added.")
except:
print('Error in adding identifier.')
return 0
def checkClaimExistence(self, claim=''):
"""
Checks if a claim exists in Wikidata already
@param claim: property and it's value to which this associated with
"""
claim_prop = claim.getID()
claim_target = claim.getTarget()
wd_items = self.page.get()
for props in wd_items['claims']:
if props == claim_prop:
try:
item = wd_items['claims'][props]
for value in item:
try:
value_qid = value.getTarget()
if claim_target.title() == value_qid.title():
return value
except:
pass
except:
print('Error in browsing through items.')
choice = input('Property and value do not exist. Select:\n\
1 to add it Wikidata\n\
2 to skip\n')
if choice == '1':
self.page.addClaim(claim, summary = u'Adding new property')
self.page = pywikibot.ItemPage(enwd, self.wd_value)
return claim
elif choice == '2':
print('Skipping the addition of property and source.\n')
return 0
else:
print('Invalid choice.\n')
return 0
return 0
def addImportedFrom(self, repo=repo, prop_id='', prop_value='', claim='', lang='', source_id='', sourceval_id='', sourceval='', status=0):
"""
Adds a reference/source
@param repo:
@param prop_id: ID of the property
@param prop_val: ID of value associated with property
@param claim: property and it's value to which this associates with
@param lang: language of the wiki - must be a value from 'langs' dict
@param status: decide whether to test for claim's existence or not
(0 - method is called directly by user
1 - method is called indirectly by other methods which add a property to Wd)
"""
if prop_id and prop_value:
try:
new_prop_val = pywikibot.ItemPage(enwd, prop_value)
claim = pywikibot.Claim(enwd, prop_id)
claim.setTarget(new_prop_val)
except:
print('Incorrect property id or value provided.\n')
return 1
if status == 0:
claim = self.checkClaimExistence(claim)
if repo and claim and lang and lang in langs.keys():
importedfrom = pywikibot.Claim(repo, 'P143') #imported from
importedwp = pywikibot.ItemPage(repo, langs[lang])
importedfrom.setTarget(importedwp)
claim.addSource(importedfrom, summary='Adding 1 reference: [[Property:P143]]')
self.page = pywikibot.ItemPage(enwd, self.wd_value)
print('Reference/Source added successfully.\n')
if type(source_id) is list:
i = 0
for val_id in source_id:
if val_id == 'P143' and lang:
continue
if sourceval_id:
source_val = pywikibot.ItemPage(repo, sourceval_id[i])
elif sourceval:
source_val = sourceval[i]
source = pywikibot.Claim(repo, val_id)
source.setTarget(source_val)
claim.addSource(source, summary='Adding 1 source')
self.page = pywikibot.ItemPage(enwd, self.wd_value)
print('source added successfully.\n')
i += 1
else:
if sourceval_id:
source_val = pywikibot.ItemPage(repo, sourceval_id)
elif sourceval:
source_val = sourceval
if repo and claim and source_id:
source = pywikibot.Claim(repo, source_id)
source.setTarget(source_val)
claim.addSource(source, summary='Adding 1 source')
self.page = pywikibot.ItemPage(enwd, self.wd_value)
print('source added successfully.\n')
return 0
def addQualifiers(self, repo=repo, prop_id='', prop_value='', claim='', qualifier_id='', qualval='', qualval_id='', status=0):
"""
Adds a qualifier
@param qualifier_id: ID of the qualifier
@param qualval_id: ID of the qualifier's value
@type of all (except repo and claim): string
"""
if prop_id and prop_value:
try:
new_prop_val = pywikibot.ItemPage(enwd, prop_value)
claim = pywikibot.Claim(enwd, prop_id)
claim.setTarget(new_prop_val)
except:
print('Incorrect property id or value provided.\n')
return 1
if status == 0:
claim = self.checkClaimExistence(claim)
if type(qualifier_id) is list:
i = 0
for qual_id in qualifier_id:
if qualval_id:
qualifier_val = pywikibot.ItemPage(repo, qualval_id[i])
elif qualval:
qualifier_val = qualval[i]
qualifier = pywikibot.Claim(repo, qual_id)
qualifier.setTarget(qualifier_val)
claim.addQualifier(qualifier, summary='Adding 1 qualifier')
self.page = pywikibot.ItemPage(enwd, self.wd_value)
print('Qualifier added successfully.\n')
i += 1
else:
if qualval_id:
qualifier_val = pywikibot.ItemPage(repo, qualval_id)
elif qualval:
qualifier_val = qualval
if repo and claim and qualifier_id:
qualifier = pywikibot.Claim(repo, qualifier_id)
qualifier.setTarget(qualifier_val)
claim.addQualifier(qualifier, summary='Adding 1 qualifier')
self.page = pywikibot.ItemPage(enwd, self.wd_value)
print('Qualifier added successfully.\n')
return 0
def main():
# page_name = input('Name of article: ')
# page_name = 'Hallock-Bilunas Farmstead'
wd_value = 'Q4115189'
# wp_page = ''
# wd_page = ''
# # Test for Wikipedia page
# try:
# wp_page = WpPage(page_name)
# print(wp_page.searchWpPage(props={'P50': ['<NAME>'], 'P123': ['Bloomsbury']}))
# print('\n')
# except:
# ('Page does not exist.\n')
# return 1
# if wp_page:
# wp_page.printWpContents()
# print('\n')
# info = wp_page.findInfobox()
# for prop in info.keys():
# print(str(prop) + ': ' + str(info[prop]))
# print('\n')
# Test for Wikidata | |
<filename>libraryfree/venv/lib/python3.6/site-packages/pymongo/connection.py
# Copyright 2009-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Tools for connecting to MongoDB.
.. warning::
**DEPRECATED:** Please use :mod:`~pymongo.mongo_client` instead.
.. seealso:: Module :mod:`~pymongo.master_slave_connection` for
connecting to master-slave clusters, and
:doc:`/examples/high_availability` for an example of how to connect
to a replica set, or specify a list of mongos instances for automatic
failover.
To get a :class:`~pymongo.database.Database` instance from a
:class:`Connection` use either dictionary-style or attribute-style
access:
.. doctest::
>>> from pymongo import Connection
>>> c = Connection()
>>> c.test_database
Database(Connection('localhost', 27017), u'test_database')
>>> c['test-database']
Database(Connection('localhost', 27017), u'test-database')
"""
from pymongo.mongo_client import MongoClient
from pymongo.errors import ConfigurationError
class Connection(MongoClient):
"""Connection to MongoDB.
"""
def __init__(self, host=None, port=None, max_pool_size=None,
network_timeout=None, document_class=dict,
tz_aware=False, _connect=True, **kwargs):
"""Create a new connection to a single MongoDB instance at *host:port*.
.. warning::
**DEPRECATED:** :class:`Connection` is deprecated. Please
use :class:`~pymongo.mongo_client.MongoClient` instead.
The resultant connection object has connection-pooling built
in. It also performs auto-reconnection when necessary. If an
operation fails because of a connection error,
:class:`~pymongo.errors.ConnectionFailure` is raised. If
auto-reconnection will be performed,
:class:`~pymongo.errors.AutoReconnect` will be
raised. Application code should handle this exception
(recognizing that the operation failed) and then continue to
execute.
Raises :class:`TypeError` if port is not an instance of
``int``. Raises :class:`~pymongo.errors.ConnectionFailure` if
the connection cannot be made.
The `host` parameter can be a full `mongodb URI
<http://dochub.mongodb.org/core/connections>`_, in addition to
a simple hostname. It can also be a list of hostnames or
URIs. Any port specified in the host string(s) will override
the `port` parameter. If multiple mongodb URIs containing
database or auth information are passed, the last database,
username, and password present will be used. For username and
passwords reserved characters like ':', '/', '+' and '@' must be
escaped following RFC 2396.
:Parameters:
- `host` (optional): hostname or IP address of the
instance to connect to, or a mongodb URI, or a list of
hostnames / mongodb URIs. If `host` is an IPv6 literal
it must be enclosed in '[' and ']' characters following
the RFC2732 URL syntax (e.g. '[::1]' for localhost)
- `port` (optional): port number on which to connect
- `max_pool_size` (optional): The maximum number of connections
that the pool will open simultaneously. If this is set, operations
will block if there are `max_pool_size` outstanding connections
from the pool. By default the pool size is unlimited.
- `network_timeout` (optional): timeout (in seconds) to use
for socket operations - default is no timeout
- `document_class` (optional): default class to use for
documents returned from queries on this connection
- `tz_aware` (optional): if ``True``,
:class:`~datetime.datetime` instances returned as values
in a document by this :class:`Connection` will be timezone
aware (otherwise they will be naive)
| **Other optional parameters can be passed as keyword arguments:**
- `socketTimeoutMS`: (integer or None) How long (in milliseconds) a
send or receive on a socket can take before timing out. Defaults to
``None`` (no timeout).
- `connectTimeoutMS`: (integer or None) How long (in milliseconds) a
connection can take to be opened before timing out. Defaults to
``20000``.
- `waitQueueTimeoutMS`: (integer or None) How long (in milliseconds)
a thread will wait for a socket from the pool if the pool has no
free sockets. Defaults to ``None`` (no timeout).
- `waitQueueMultiple`: (integer or None) Multiplied by max_pool_size
to give the number of threads allowed to wait for a socket at one
time. Defaults to ``None`` (no waiters).
- `socketKeepAlive`: (boolean) Whether to send periodic keep-alive
packets on connected sockets. Defaults to ``False`` (do not send
keep-alive packets).
- `auto_start_request`: If ``True`` (the default), each thread that
accesses this Connection has a socket allocated to it for the
thread's lifetime, or until :meth:`end_request` is called.
- `use_greenlets`: if ``True``, :meth:`start_request()` will ensure
that the current greenlet uses the same socket for all operations
until :meth:`end_request()`. Defaults to ``False``.
| **Write Concern options:**
- `safe`: :class:`Connection` **disables** acknowledgement of write
operations. Use ``safe=True`` to enable write acknowledgement.
- `w`: (integer or string) If this is a replica set, write operations
will block until they have been replicated to the specified number
or tagged set of servers. `w=<int>` always includes the replica set
primary (e.g. w=3 means write to the primary and wait until
replicated to **two** secondaries). Implies safe=True.
- `wtimeout`: (integer) Used in conjunction with `w`. Specify a value
in milliseconds to control how long to wait for write propagation
to complete. If replication does not complete in the given
timeframe, a timeout exception is raised. Implies safe=True.
- `j`: If ``True`` block until write operations have been committed
to the journal. Cannot be used in combination with `fsync`. Prior
to MongoDB 2.6 this option was ignored if the server was running
without journaling. Starting with MongoDB 2.6 write operations will
fail with an exception if this option is used when the server is
running without journaling. Implies safe=True.
- `fsync`: If ``True`` and the server is running without journaling,
blocks until the server has synced all data files to disk. If the
server is running with journaling, this acts the same as the `j`
option, blocking until write operations have been committed to the
journal. Cannot be used in combination with `j`. Implies safe=True.
| **Replica-set keyword arguments for connecting with a replica-set
- either directly or via a mongos:**
| (ignored by standalone mongod instances)
- `slave_okay` or `slaveOk` (deprecated): Use `read_preference`
instead.
- `replicaSet`: (string) The name of the replica-set to connect to.
The driver will verify that the replica-set it connects to matches
this name. Implies that the hosts specified are a seed list and the
driver should attempt to find all members of the set. *Ignored by
mongos*. Defaults to ``None``.
- `read_preference`: The read preference for this client. If
connecting to a secondary then a read preference mode *other* than
PRIMARY is required - otherwise all queries will throw a
:class:`~pymongo.errors.AutoReconnect` "not master" error.
See :class:`~pymongo.read_preferences.ReadPreference` for all
available read preference options. Defaults to ``PRIMARY``.
- `tag_sets`: Ignored unless connecting to a replica-set via mongos.
Specify a priority-order for tag sets, provide a list of
tag sets: ``[{'dc': 'ny'}, {'dc': 'la'}, {}]``. A final, empty tag
set, ``{}``, means "read from any member that matches the mode,
ignoring tags. Defaults to ``[{}]``, meaning "ignore members'
tags."
| **SSL configuration:**
- `ssl`: If ``True``, create the connection to the server using SSL.
Defaults to ``False``.
- `ssl_keyfile`: The private keyfile used to identify the local
connection against mongod. If included with the ``certfile` then
only the ``ssl_certfile`` is needed. Implies ``ssl=True``.
Defaults to ``None``.
- `ssl_certfile`: The certificate file used to identify the local
connection against mongod. Implies ``ssl=True``. Defaults to
``None``.
- `ssl_cert_reqs`: The parameter cert_reqs specifies whether a
certificate is required from the other side of the connection,
and whether it will be validated if provided. It must be one of the
three values ``ssl.CERT_NONE`` (certificates ignored),
``ssl.CERT_OPTIONAL`` (not required, but validated if provided), or
``ssl.CERT_REQUIRED`` (required and validated). If the value of
this parameter is not ``ssl.CERT_NONE``, then the ``ssl_ca_certs``
parameter must point to a file of CA certificates.
Implies ``ssl=True``. Defaults to ``ssl.CERT_NONE``.
- `ssl_ca_certs`: The ca_certs file contains a set of concatenated
"certification authority" certificates, which are used to validate
certificates passed from the other end of the connection.
Implies ``ssl=True``. Defaults to ``None``.
.. seealso:: :meth:`end_request`
.. versionchanged:: 2.5
Added additional ssl options
.. versionchanged:: 2.3
Added support for failover between mongos seed list members.
| |
#
# peepdf is a tool to analyse and modify PDF files
# http://peepdf.eternal-todo.com
# By <NAME> <jesparza AT eternal-todo.com>
#
# Copyright (C) 2011-2017 <NAME>
#
# This file is part of peepdf.
#
# peepdf is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# peepdf is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with peepdf. If not, see <http://www.gnu.org/licenses/>.
#
'''
Module to manage cryptographic operations with PDF files
'''
import hashlib,struct,random,warnings,aes,sys
from itertools import cycle, izip
warnings.filterwarnings("ignore")
paddingString = '\x28\xBF\x4E\x5E\x4E\x75\x8A\x41\x64\x00\x4E\x56\xFF\xFA\x01\x08\x2E\x2E\x00\xB6\xD0\x68\x3E\x80\x2F\x0C\xA9\xFE\x64\x53\x69\x7A'
def computeEncryptionKey(password, dictOwnerPass, dictUserPass, dictOE, dictUE, fileID, pElement, dictKeyLength = 128, revision = 3, encryptMetadata = False, passwordType = None):
'''
Compute an encryption key to encrypt/decrypt the PDF file
@param password: The password entered by the user
@param dictOwnerPass: The owner password from the standard security handler dictionary
@param dictUserPass: The user password from the standard security handler dictionary
@param dictOE: The owner encrypted string from the standard security handler dictionary
@param dictUE:The user encrypted string from the standard security handler dictionary
@param fileID: The /ID element in the trailer dictionary of the PDF file
@param pElement: The /P element of the Encryption dictionary
@param dictKeyLength: The length of the key
@param revision: The algorithm revision
@param encryptMetadata: A boolean extracted from the standard security handler dictionary to specify if it's necessary to encrypt the document metadata or not
@param passwordType: It specifies the given password type. It can be 'USER', 'OWNER' or None.
@return: A tuple (status,statusContent), where statusContent is the encryption key in case status = 0 or an error message in case status = -1
'''
try:
if revision != 5:
keyLength = dictKeyLength/8
lenPass = len(password)
if lenPass > 32:
password = password[:32]
elif lenPass < 32:
password += paddingString[:32-lenPass]
md5input = password + dictOwnerPass + struct.pack('<i',int(pElement)) + fileID
if revision > 3 and not encryptMetadata:
md5input += '\xFF'*4
key = hashlib.md5(md5input).digest()
if revision > 2:
counter = 0
while counter < 50:
key = hashlib.md5(key[:keyLength]).digest()
counter += 1
key = key[:keyLength]
elif revision == 2:
key = key[:5]
return (0, key)
else:
if passwordType == 'USER':
password = password.encode('utf-8')[:127]
kSalt = dictUserPass[40:48]
intermediateKey = hashlib.sha256(password + kSalt).digest()
ret = aes.decryptData('\0'*16+dictUE, intermediateKey)
elif passwordType == 'OWNER':
password = password.encode('utf-8')[:127]
kSalt = dictOwnerPass[40:48]
intermediateKey = hashlib.sha256(password + kSalt + dictUserPass).digest()
ret = aes.decryptData('\0'*16+dictOE, intermediateKey)
return ret
except:
return (-1, 'ComputeEncryptionKey error: %s %s' % (str(sys.exc_info()[0]),str(sys.exc_info()[1])))
def computeObjectKey(id, generationNum, encryptionKey, keyLengthBytes, algorithm = 'RC4'):
'''
Compute the key necessary to encrypt each object, depending on the id and generation number. Only necessary with /V < 5.
@param id: The object id
@param generationNum: The generation number of the object
@param encryptionKey: The encryption key
@param keyLengthBytes: The length of the encryption key in bytes
@param algorithm: The algorithm used in the encryption/decryption process
@return A tuple (status,statusContent), where statusContent is the computed key in case status = 0 or an error message in case status = -1
'''
try:
key = encryptionKey + struct.pack('<i',id)[:3] + struct.pack('<i',generationNum)[:2]
if algorithm == 'AES':
key += '\<KEY>' # sAlT
key = hashlib.md5(key).digest()
if keyLengthBytes+5 < 16:
key = key[:keyLengthBytes+5]
else:
key = key[:16]
# AES: block size = 16 bytes, initialization vector (16 bytes), random, first bytes encrypted string
return (0, key)
except:
return (-1, 'ComputeObjectKey error: %s %s' % (str(sys.exc_info()[0]),str(sys.exc_info()[1])))
def computeOwnerPass(ownerPassString, userPassString, keyLength = 128, revision = 3):
'''
Compute the owner password necessary to compute the encryption key of the PDF file
@param ownerPassString: The owner password entered by the user
@param userPassString: The user password entered by the user
@param keyLength: The length of the key
@param revision: The algorithm revision
@return A tuple (status,statusContent), where statusContent is the computed password in case status = 0 or an error message in case status = -1
'''
try:
# TODO: revision 5
keyLength = keyLength/8
lenPass = len(ownerPassString)
if lenPass > 32:
ownerPassString = ownerPassString[:32]
elif lenPass < 32:
ownerPassString += paddingString[:32-lenPass]
rc4Key = hashlib.md5(ownerPassString).digest()
if revision > 2:
counter = 0
while counter < 50:
rc4Key = hashlib.md5(rc4Key).digest()
counter += 1
rc4Key = rc4Key[:keyLength]
lenPass = len(userPassString)
if lenPass > 32:
userPassString = userPassString[:32]
elif lenPass < 32:
userPassString += paddingString[:32-lenPass]
ownerPass = RC4(userPassString,rc4Key)
if revision > 2:
counter = 1
while counter <= 19:
newKey = ''
for i in range(len(rc4Key)):
newKey += chr(ord(rc4Key[i]) ^ counter)
ownerPass = RC4(ownerPass,newKey)
counter += 1
return (0, ownerPass)
except:
return (-1, 'ComputeOwnerPass error: %s %s' % (str(sys.exc_info()[0]),str(sys.exc_info()[1])))
def computeUserPass(userPassString, dictO, fileID, pElement, keyLength = 128, revision = 3, encryptMetadata = False):
'''
Compute the user password of the PDF file
@param userPassString: The user password entered by the user
@param ownerPass: The computed owner password
@param fileID: The /ID element in the trailer dictionary of the PDF file
@param pElement: The /P element of the /Encryption dictionary
@param keyLength: The length of the key
@param revision: The algorithm revision
@param encryptMetadata: A boolean extracted from the standard security handler dictionary to specify if it's necessary to encrypt the document metadata or not
@return: A tuple (status,statusContent), where statusContent is the computed password in case status = 0 or an error message in case status = -1
'''
# TODO: revision 5
userPass = ''
dictU = ''
dictOE = ''
dictUE = ''
ret = computeEncryptionKey(userPassString, dictO, dictU, dictOE, dictUE, fileID, pElement, keyLength, revision, encryptMetadata)
if ret[0] != -1:
rc4Key = ret[1]
else:
return ret
try:
if revision == 2:
userPass = RC4(paddingString,rc4Key)
elif revision > 2:
counter = 1
md5Input = paddingString + fileID
hashResult = hashlib.md5(md5Input).digest()
userPass = RC4(hashResult,rc4Key)
while counter <= 19:
newKey = ''
for i in range(len(rc4Key)):
newKey += chr(ord(rc4Key[i]) ^ counter)
userPass = RC4(userPass,newKey)
counter += 1
counter = 0
while counter < 16:
userPass += chr(random.randint(32,255))
counter += 1
else:
# This should not be possible or the PDF specification does not say anything about it
return (-1, 'ComputeUserPass error: revision number is < 2 (%d)' % revision)
return (0, userPass)
except:
return (-1, 'ComputeUserPass error: %s %s' % (str(sys.exc_info()[0]),str(sys.exc_info()[1])))
def isUserPass(password, computedUserPass, dictU, revision):
'''
Checks if the given password is the User password of the file
@param password: The given password or the empty password
@param computedUserPass: The computed user password of the file
@param dictU: The /U element of the /Encrypt dictionary
@param revision: The number of revision of the standard security handler
@return The boolean telling if the given password is the user password or not
'''
if revision == 5:
vSalt = dictU[32:40]
inputHash = hashlib.sha256(password + vSalt).digest()
if inputHash == dictU[:32]:
return True
else:
return False
elif revision == 3 or revision == 4:
if computedUserPass[:16] == dictU[:16]:
return True
else:
return False
elif revision < 3:
if computedUserPass == dictU:
return True
else:
return False
def isOwnerPass(password, dictO, dictU, computedUserPass, keyLength, revision):
'''
Checks if the given password is the owner password of the file
@param password: The given password or the empty password
@param dictO: The /O element of the /Encrypt dictionary
@param dictU: The /U element of the /Encrypt dictionary
@param computedUserPass: The computed user password of the file
@param keyLength: The length of the key
@param revision: The algorithm revision
@return The boolean telling if the given password is the owner password or not
'''
if revision == 5:
vSalt = dictO[32:40]
inputHash = hashlib.sha256(password + vSalt + dictU).digest()
if inputHash == dictO[:32]:
return True
else:
return | |
import os
import logging
import requests
import json
class NewsArticle(object):
def __init__(self):
self.title = None
self.description = None
self.publishedAt = None
self.author = None
self.url = None
self.urlToImage = None
def _get_json_attribute(self, data, name, def_value=None):
if name in data:
return data[name]
else:
logging.debug ("Attribute [%s] missing from New API Article data"%name)
return def_value
def parse_json(self, data):
self.title = self._get_json_attribute(data, "title")
self.description = self._get_json_attribute(data, "description")
self.publishedAt = self._get_json_attribute(data, "publishedAt")
self.author = self._get_json_attribute(data, "author")
self.url = self._get_json_attribute(data, "url")
self.urlToImage = self._get_json_attribute(data, "urlToImage")
def to_json(self):
data = {}
data["title"] = self.title
data["description"] = self.description
data["publishedAt"] = self.publishedAt
data["author"] = self.author
data["url"] = self.url
data["urlToImage"] = self.urlToImage
return data
# https://newsapi.org/bbc-news-api
class NewsAPI(object):
BASE_URL = "https://newsapi.org/v1/articles?source=%s&sortBy=%s&apiKey=%s"
# Single news feeds
ABC_NEWS_AU = "abc-news-au"
AL_JAZEERA_ENGLISH = "al-jazeera-english"
ARS_TECHNICA = "ars-technica"
ASSOCIATED_PRESS = "associated-press"
BBC_NEWS = "bbc-news"
BBC_SPORT = "bbc-sport"
BLOOMBERG = "bloomberg"
BUSINESS_INSIDER = "business-insider"
BUSINESS_INSIDER_UK = "business-insider-uk"
BUZZFEED = "buzzfeed"
CNBC = "cnbc"
CNN = "cnn"
DAILY_MAIL = "daily-mail"
ENGADGET = "engadget"
ENTERTAINMENT_WEEKLY = "entertainment-weekly"
ESPN = "espn"
ESPN_CRIC_INFO = "espn-cric-info"
FINANCIAL_TIMES = "financial-times"
FOOTBALL_ITALIA = "football-italia"
FORTUNE = "fortune"
FOUR_FOUR_TWO = "four-four-two"
FOX_SPORTS = "fox-sports"
GOOGLE_NEWS = "google-news"
HACKER_NEWS = "hacker-news"
IGN = "ign"
INDEPENDENT = "independent"
MASHABLE = "mashable"
METRO = "metro"
MIRROR = "mirror"
MTV_NEWS = "mtv-news"
MTV_NEWS_UK = "mtv-news-uk"
NATIONAL_GEOGRAPHIC = "national-geographic"
NEW_SCIENTIST = "new-scientist"
NEWSWEEK = "newsweek"
NEW_YORK_MAGAZINE = "new-york-magazine"
NFL_NEWS = "nfl-news"
POLYGON = "polygon"
RECODE = "recode"
REDDIT_R_ALL = "reddit-r-all"
REUTERS = "reuters"
TALKSPORT = "talksport"
TECHCRUNCH = "techcrunch"
TECHRADAR = "techradar"
THE_ECONOMIST = "the-economist"
THE_GUARDIAN_AU = "the-guardian-au"
THE_GUARDIAN_UK = "the-guardian-uk"
THE_HUFFINGTON_POST = "the-huffington-post"
THE_NEW_YORK_TIMES = "the-new-york-times"
THE_NEXT_WEB = "the-next-web"
THE_SPORT_BIBLE = "the-sport-bible"
THE_TELEGRAPH = "the-telegraph"
THE_VERGE = "the-verge"
THE_WALL_STREET_JOURNAL = "the-wall-street-journal"
THE_WASHINGTON_POST = "the-washington-post"
TIME = "time"
USA_TODAY = "usa-today"
# Collections
BUSINESS = "business"
ENTERTAINMENT = " entertainment"
GAMING = "gaming"
MUSIC = "music"
SCIENCE_AND_NATURE = "science_and_nature"
SPORT = "sport"
TECHNOLOGY = "technology"
UK_NEWS = "uk_news"
UK_NEWSPAPERS = "uk_newspapers"
def __init__(self, license_keys):
self.function_mapping = {
NewsAPI.ABC_NEWS_AU: NewsAPI.abc_news_au,
NewsAPI.AL_JAZEERA_ENGLISH: NewsAPI.al_jazeera_english,
NewsAPI.ARS_TECHNICA: NewsAPI.ars_technica,
NewsAPI.ASSOCIATED_PRESS: NewsAPI.associated_press,
NewsAPI.BBC_NEWS: NewsAPI.bbc_news,
NewsAPI.BBC_SPORT: NewsAPI.bbc_sport,
NewsAPI.BLOOMBERG: NewsAPI.bloomberg,
NewsAPI.BUSINESS_INSIDER: NewsAPI.business_insider,
NewsAPI.BUSINESS_INSIDER_UK: NewsAPI.business_insider_uk,
NewsAPI.BUZZFEED: NewsAPI.buzzfeed,
NewsAPI.CNBC: NewsAPI.cnbc,
NewsAPI.CNN: NewsAPI.cnn,
NewsAPI.DAILY_MAIL: NewsAPI.daily_mail,
NewsAPI.ENGADGET: NewsAPI.engadget,
NewsAPI.ENTERTAINMENT_WEEKLY: NewsAPI.entertainment_weekly,
NewsAPI.ESPN: NewsAPI.espn,
NewsAPI.ESPN_CRIC_INFO: NewsAPI.espn_cric_info,
NewsAPI.FINANCIAL_TIMES: NewsAPI.financial_times,
NewsAPI.FOOTBALL_ITALIA: NewsAPI.football_italia,
NewsAPI.FORTUNE: NewsAPI.fortune,
NewsAPI.FOUR_FOUR_TWO: NewsAPI.four_four_two,
NewsAPI.FOX_SPORTS: NewsAPI.fox_sports,
NewsAPI.GOOGLE_NEWS: NewsAPI.google_news,
NewsAPI.HACKER_NEWS: NewsAPI.hacker_news,
NewsAPI.IGN: NewsAPI.ign,
NewsAPI.INDEPENDENT: NewsAPI.independent,
NewsAPI.MASHABLE: NewsAPI.mashable,
NewsAPI.METRO: NewsAPI.metro,
NewsAPI.MIRROR: NewsAPI.mirror,
NewsAPI.MTV_NEWS: NewsAPI.mtv_news,
NewsAPI.MTV_NEWS_UK: NewsAPI.mtv_news_uk,
NewsAPI.NATIONAL_GEOGRAPHIC: NewsAPI.national_geographic,
NewsAPI.NEW_SCIENTIST: NewsAPI.new_scientist,
NewsAPI.NEWSWEEK: NewsAPI.newsweek,
NewsAPI.NEW_YORK_MAGAZINE: NewsAPI.new_york_magazine,
NewsAPI.NFL_NEWS: NewsAPI.nfl_news,
NewsAPI.POLYGON: NewsAPI.polygon,
NewsAPI.RECODE: NewsAPI.recode,
NewsAPI.REDDIT_R_ALL: NewsAPI.reddit,
NewsAPI.REUTERS: NewsAPI.reuters,
NewsAPI.TALKSPORT: NewsAPI.talksport,
NewsAPI.TECHCRUNCH: NewsAPI.techcrunch,
NewsAPI.TECHRADAR: NewsAPI.techradar,
NewsAPI.THE_ECONOMIST: NewsAPI.the_economist,
NewsAPI.THE_GUARDIAN_AU: NewsAPI.the_guardian_au,
NewsAPI.THE_GUARDIAN_UK: NewsAPI.the_guardian_uk,
NewsAPI.THE_HUFFINGTON_POST: NewsAPI.the_huffington_post,
NewsAPI.THE_NEW_YORK_TIMES: NewsAPI.the_new_york_times,
NewsAPI.THE_NEXT_WEB: NewsAPI.the_next_web,
NewsAPI.THE_SPORT_BIBLE: NewsAPI.the_sport_bible,
NewsAPI.THE_TELEGRAPH: NewsAPI.the_telegraph,
NewsAPI.THE_VERGE: NewsAPI.the_verge,
NewsAPI.THE_WALL_STREET_JOURNAL: NewsAPI.the_wall_street_journal,
NewsAPI.THE_WASHINGTON_POST: NewsAPI.the_washington_post,
NewsAPI.TIME: NewsAPI.time,
NewsAPI.USA_TODAY: NewsAPI.usa_today,
NewsAPI.BUSINESS: NewsAPI.business,
NewsAPI.ENTERTAINMENT: NewsAPI. entertainment,
NewsAPI.GAMING: NewsAPI.gaming,
NewsAPI.MUSIC: NewsAPI.music,
NewsAPI.SCIENCE_AND_NATURE: NewsAPI.science_and_nature,
NewsAPI.SPORT: NewsAPI.sport,
NewsAPI.TECHNOLOGY: NewsAPI.technology,
NewsAPI.UK_NEWS: NewsAPI.uk_news,
NewsAPI.UK_NEWSPAPERS: NewsAPI.uk_newspapers,
}
if license_keys.has_key('NEWSAPI_API_KEY'):
self.api_key = license_keys.get_key('NEWSAPI_API_KEY')
else:
raise Exception ("No valid license key METOFFICE_API_KEY found")
@staticmethod
def _format_url(service, api_key, sortBy="top"):
return NewsAPI.BASE_URL%(service, sortBy, api_key)
@staticmethod
def _get_data(url_str, api_key, max, sort, reverse):
url = NewsAPI._format_url(url_str, api_key)
return NewsAPI._get_news_feed_articles(url, max, sort, reverse)
@staticmethod
def _get_news_feed_articles(url, max, sort, reverse):
logging.debug("News API URL: [%s]"%url)
response = requests.get(url)
articles = []
if response.status_code == 200:
header_splits = response.headers['content-type'].split(";")
if header_splits[0] == 'application/json':
json = response.json()
for article_data in json['articles']:
article = NewsArticle()
article.parse_json(article_data)
articles.append(article)
logging.debug(article.description)
if sort is True:
logging.debug("Sorting articles,, reverse=%s" % str(reverse))
articles.sort(key=lambda article: article.publishedAt, reverse=reverse)
if max != 0:
logging.debug("Returning max %d articles" % max)
articles = articles[:max]
else:
logging.debug("Returning all articles")
else:
logging.error("NewsAPI request none JSON object")
else:
logging.error("NewsAPI request returned error code %d"%response.status_code)
return articles
@staticmethod
def abc_news_au(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.ABC_NEWS_AU, api_key, max, sort, reverse)
@staticmethod
def al_jazeera_english(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.AL_JAZEERA_ENGLISH, api_key, max, sort, reverse)
@staticmethod
def ars_technica(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.ARS_TECHNICA, api_key, max, sort, reverse)
@staticmethod
def associated_press(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.ASSOCIATED_PRESS, api_key, max, sort, reverse)
@staticmethod
def bbc_news(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.BBC_NEWS, api_key, max, sort, reverse)
@staticmethod
def bbc_sport(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.BBC_SPORT, api_key, max, sort, reverse)
@staticmethod
def bloomberg(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.BLOOMBERG, api_key, max, sort, reverse)
@staticmethod
def business_insider(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.BUSINESS_INSIDER, api_key, max, sort, reverse)
@staticmethod
def business_insider_uk(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.BUSINESS_INSIDER_UK, api_key, max, sort, reverse)
@staticmethod
def buzzfeed(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.BUZZFEED, api_key, max, sort, reverse)
@staticmethod
def cnbc(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.CNBC, api_key, max, sort, reverse)
@staticmethod
def cnn(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.CNN, api_key, max, sort, reverse)
@staticmethod
def daily_mail(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.DAILY_MAIL, api_key, max, sort, reverse)
@staticmethod
def engadget(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.ENGADGET, api_key, max, sort, reverse)
@staticmethod
def entertainment_weekly(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.ENTERTAINMENT_WEEKLY, api_key, max, sort, reverse)
@staticmethod
def espn(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.ESPN, api_key, max, sort, reverse)
@staticmethod
def espn_cric_info(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.ESPN_CRIC_INFO, api_key, max, sort, reverse)
@staticmethod
def financial_times(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.FINANCIAL_TIMES, api_key, max, sort, reverse)
@staticmethod
def football_italia(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.FOOTBALL_ITALIA, api_key, max, sort, reverse)
@staticmethod
def fortune(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.FORTUNE, api_key, max, sort, reverse)
@staticmethod
def four_four_two(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.FOUR_FOUR_TWO, api_key, max, sort, reverse)
@staticmethod
def fox_sports(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.FOX_SPORTS, api_key, max, sort, reverse)
@staticmethod
def google_news(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.GOOGLE_NEWS, api_key, max, sort, reverse)
@staticmethod
def hacker_news(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.HACKER_NEWS, api_key, max, sort, reverse)
@staticmethod
def ign(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.IGN, api_key, max, sort, reverse)
@staticmethod
def independent(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.INDEPENDENT, api_key, max, sort, reverse)
@staticmethod
def mashable(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.MASHABLE, api_key, max, sort, reverse)
@staticmethod
def metro(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.METRO, api_key, max, sort, reverse)
@staticmethod
def mirror(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.MIRROR, api_key, max, sort, reverse)
@staticmethod
def mtv_news(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.MTV_NEWS, api_key, max, sort, reverse)
@staticmethod
def mtv_news_uk(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.MTV_NEWS_UK, api_key, max, sort, reverse)
@staticmethod
def national_geographic(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.NATIONAL_GEOGRAPHIC, api_key, max, sort, reverse)
@staticmethod
def new_scientist(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.NEW_SCIENTIST, api_key, max, sort, reverse)
@staticmethod
def newsweek(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.NEWSWEEK, api_key, max, sort, reverse)
@staticmethod
def new_york_magazine(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.NEW_YORK_MAGAZINE, api_key, max, sort, reverse)
@staticmethod
def nfl_news(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.NFL_NEWS, api_key, max, sort, reverse)
@staticmethod
def polygon(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.POLYGON, api_key, max, sort, reverse)
@staticmethod
def recode(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.RECODE, api_key, max, sort, reverse)
@staticmethod
def reddit(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.REDDIT_R_ALL, api_key, max, sort, reverse)
@staticmethod
def reuters(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.REUTERS, api_key, max, sort, reverse)
@staticmethod
def talksport(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.TALKSPORT, api_key, max, sort, reverse)
@staticmethod
def techcrunch(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.TECHCRUNCH, api_key, max, sort, reverse)
@staticmethod
def techradar(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.TECHRADAR, api_key, max, sort, reverse)
@staticmethod
def the_economist(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.THE_ECONOMIST, api_key, max, sort, reverse)
@staticmethod
def the_guardian_au(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.THE_GUARDIAN_AU, api_key, max, sort, reverse)
@staticmethod
def the_guardian_uk(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.THE_GUARDIAN_UK, api_key, max, sort, reverse)
@staticmethod
def the_huffington_post(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.THE_HUFFINGTON_POST, api_key, max, sort, reverse)
@staticmethod
def the_new_york_times(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.THE_NEW_YORK_TIMES, api_key, max, sort, reverse)
@staticmethod
def the_next_web(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.THE_NEXT_WEB, api_key, max, sort, reverse)
@staticmethod
def the_sport_bible(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.THE_SPORT_BIBLE, api_key, max, sort, reverse)
@staticmethod
def the_telegraph(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.THE_TELEGRAPH, api_key, max, sort, reverse)
@staticmethod
def the_verge(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.THE_VERGE, api_key, max, sort, reverse)
@staticmethod
def the_wall_street_journal(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.THE_WALL_STREET_JOURNAL, api_key, max, sort, reverse)
@staticmethod
def the_washington_post(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.THE_WASHINGTON_POST, api_key, max, sort, reverse)
@staticmethod
def time(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.TIME, api_key, max, sort, reverse)
@staticmethod
def usa_today(api_key, max, sort, reverse):
return NewsAPI._get_data(NewsAPI.USA_TODAY, api_key, max, sort, reverse)
@staticmethod
def business(api_key, max, sort, reverse):
articles = []
articles.extend(NewsAPI.bloomberg(api_key, max, sort, reverse))
articles.extend(NewsAPI.business_insider(api_key, max, sort, reverse))
articles.extend(NewsAPI.business_insider_uk(api_key, max, sort, reverse))
articles.extend(NewsAPI.cnbc(api_key, max, sort, reverse))
articles.extend(NewsAPI.financial_times(api_key, max, sort, reverse))
articles.extend(NewsAPI.fortune(api_key, max, sort, reverse))
articles.extend(NewsAPI.the_economist(api_key, max, sort, reverse))
| |
is not None and '_subtype' not in already_processed:
already_processed.add('_subtype')
if value in ('true', '1'):
self._subtype = True
elif value in ('false', '0'):
self._subtype = False
else:
raise_parse_error(node, 'Bad boolean attribute')
value = find_attr_value_('Source', node)
if value is not None and 'Source' not in already_processed:
already_processed.add('Source')
self.Source = value
value = find_attr_value_('_instances', node)
if value is not None and '_instances' not in already_processed:
already_processed.add('_instances')
self._instances = value
value = find_attr_value_('_archetype', node)
if value is not None and '_archetype' not in already_processed:
already_processed.add('_archetype')
self._archetype = value
value = find_attr_value_('Units', node)
if value is not None and 'Units' not in already_processed:
already_processed.add('Units')
self.Units = value
value = find_attr_value_('_id', node)
if value is not None and '_id' not in already_processed:
already_processed.add('_id')
self._id = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class AllowableShearStressType
class AllowableTensileStressType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, _derived=None, _real_archetype=None, _desynched_atts=None, Value=None, _subtype=None, Source=None, _instances=None, _archetype=None, Units=None, _id=None):
self.original_tagname_ = None
self._derived = _cast(None, _derived)
self._real_archetype = _cast(bool, _real_archetype)
self._desynched_atts = _cast(None, _desynched_atts)
self.Value = _cast(float, Value)
self._subtype = _cast(bool, _subtype)
self.Source = _cast(None, Source)
self._instances = _cast(None, _instances)
self._archetype = _cast(None, _archetype)
self.Units = _cast(None, Units)
self._id = _cast(None, _id)
def factory(*args_, **kwargs_):
if AllowableTensileStressType.subclass:
return AllowableTensileStressType.subclass(*args_, **kwargs_)
else:
return AllowableTensileStressType(*args_, **kwargs_)
factory = staticmethod(factory)
def get__derived(self): return self._derived
def set__derived(self, _derived): self._derived = _derived
def get__real_archetype(self): return self._real_archetype
def set__real_archetype(self, _real_archetype): self._real_archetype = _real_archetype
def get__desynched_atts(self): return self._desynched_atts
def set__desynched_atts(self, _desynched_atts): self._desynched_atts = _desynched_atts
def get_Value(self): return self.Value
def set_Value(self, Value): self.Value = Value
def get__subtype(self): return self._subtype
def set__subtype(self, _subtype): self._subtype = _subtype
def get_Source(self): return self.Source
def set_Source(self, Source): self.Source = Source
def get__instances(self): return self._instances
def set__instances(self, _instances): self._instances = _instances
def get__archetype(self): return self._archetype
def set__archetype(self, _archetype): self._archetype = _archetype
def get_Units(self): return self.Units
def set_Units(self, Units): self.Units = Units
def get__id(self): return self._id
def set__id(self, _id): self._id = _id
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='AllowableTensileStressType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AllowableTensileStressType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='AllowableTensileStressType', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AllowableTensileStressType'):
if self._derived is not None and '_derived' not in already_processed:
already_processed.add('_derived')
outfile.write(' _derived=%s' % (self.gds_format_string(quote_attrib(self._derived).encode(ExternalEncoding), input_name='_derived'), ))
if self._real_archetype is not None and '_real_archetype' not in already_processed:
already_processed.add('_real_archetype')
outfile.write(' _real_archetype="%s"' % self.gds_format_boolean(self._real_archetype, input_name='_real_archetype'))
if self._desynched_atts is not None and '_desynched_atts' not in already_processed:
already_processed.add('_desynched_atts')
outfile.write(' _desynched_atts=%s' % (self.gds_format_string(quote_attrib(self._desynched_atts).encode(ExternalEncoding), input_name='_desynched_atts'), ))
if self.Value is not None and 'Value' not in already_processed:
already_processed.add('Value')
outfile.write(' Value="%s"' % self.gds_format_double(self.Value, input_name='Value'))
if self._subtype is not None and '_subtype' not in already_processed:
already_processed.add('_subtype')
outfile.write(' _subtype="%s"' % self.gds_format_boolean(self._subtype, input_name='_subtype'))
if self.Source is not None and 'Source' not in already_processed:
already_processed.add('Source')
outfile.write(' Source=%s' % (self.gds_format_string(quote_attrib(self.Source).encode(ExternalEncoding), input_name='Source'), ))
if self._instances is not None and '_instances' not in already_processed:
already_processed.add('_instances')
outfile.write(' _instances=%s' % (self.gds_format_string(quote_attrib(self._instances).encode(ExternalEncoding), input_name='_instances'), ))
if self._archetype is not None and '_archetype' not in already_processed:
already_processed.add('_archetype')
outfile.write(' _archetype=%s' % (self.gds_format_string(quote_attrib(self._archetype).encode(ExternalEncoding), input_name='_archetype'), ))
if self.Units is not None and 'Units' not in already_processed:
already_processed.add('Units')
outfile.write(' Units=%s' % (self.gds_format_string(quote_attrib(self.Units).encode(ExternalEncoding), input_name='Units'), ))
if self._id is not None and '_id' not in already_processed:
already_processed.add('_id')
outfile.write(' _id=%s' % (self.gds_format_string(quote_attrib(self._id).encode(ExternalEncoding), input_name='_id'), ))
def exportChildren(self, outfile, level, namespace_='', name_='AllowableTensileStressType', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='AllowableTensileStressType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self._derived is not None and '_derived' not in already_processed:
already_processed.add('_derived')
showIndent(outfile, level)
outfile.write('_derived="%s",\n' % (self._derived,))
if self._real_archetype is not None and '_real_archetype' not in already_processed:
already_processed.add('_real_archetype')
showIndent(outfile, level)
outfile.write('_real_archetype=%s,\n' % (self._real_archetype,))
if self._desynched_atts is not None and '_desynched_atts' not in already_processed:
already_processed.add('_desynched_atts')
showIndent(outfile, level)
outfile.write('_desynched_atts="%s",\n' % (self._desynched_atts,))
if self.Value is not None and 'Value' not in already_processed:
already_processed.add('Value')
showIndent(outfile, level)
outfile.write('Value=%e,\n' % (self.Value,))
if self._subtype is not None and '_subtype' not in already_processed:
already_processed.add('_subtype')
showIndent(outfile, level)
outfile.write('_subtype=%s,\n' % (self._subtype,))
if self.Source is not None and 'Source' not in already_processed:
already_processed.add('Source')
showIndent(outfile, level)
outfile.write('Source="%s",\n' % (self.Source,))
if self._instances is not None and '_instances' not in already_processed:
already_processed.add('_instances')
showIndent(outfile, level)
outfile.write('_instances="%s",\n' % (self._instances,))
if self._archetype is not None and '_archetype' not in already_processed:
already_processed.add('_archetype')
showIndent(outfile, level)
outfile.write('_archetype="%s",\n' % (self._archetype,))
if self.Units is not None and 'Units' not in already_processed:
already_processed.add('Units')
showIndent(outfile, level)
outfile.write('Units="%s",\n' % (self.Units,))
if self._id is not None and '_id' not in already_processed:
already_processed.add('_id')
showIndent(outfile, level)
outfile.write('_id="%s",\n' % (self._id,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('_derived', node)
if value is not None and '_derived' not in already_processed:
already_processed.add('_derived')
self._derived = value
value = find_attr_value_('_real_archetype', node)
if value is not None and '_real_archetype' not in already_processed:
already_processed.add('_real_archetype')
if value in ('true', '1'):
self._real_archetype = True
elif value in ('false', '0'):
self._real_archetype = False
else:
raise_parse_error(node, 'Bad boolean attribute')
value = find_attr_value_('_desynched_atts', node)
if value is not None and '_desynched_atts' not in already_processed:
already_processed.add('_desynched_atts')
self._desynched_atts = value
value = find_attr_value_('Value', node)
if value is not None and 'Value' not in already_processed:
already_processed.add('Value')
try:
self.Value = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (Value): %s' % exp)
value = find_attr_value_('_subtype', node)
if value is not None and '_subtype' not in already_processed:
already_processed.add('_subtype')
if value in ('true', '1'):
self._subtype = True
elif value in ('false', '0'):
self._subtype = False
else:
raise_parse_error(node, 'Bad boolean attribute')
value = find_attr_value_('Source', node)
if value is not None and 'Source' not in already_processed:
already_processed.add('Source')
self.Source = value
value = find_attr_value_('_instances', node)
if value is not None and '_instances' not in already_processed:
already_processed.add('_instances')
self._instances = value
value = find_attr_value_('_archetype', node)
if value is not None and '_archetype' not in already_processed:
already_processed.add('_archetype')
self._archetype = value
value = find_attr_value_('Units', node)
if value is not None and 'Units' not in already_processed:
already_processed.add('Units')
self.Units = value
value = find_attr_value_('_id', node)
if value is not None and '_id' not in already_processed:
already_processed.add('_id')
self._id = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class AllowableTensileStressType
class CADAnalysisMetaDataType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, _derived=None, _real_archetype=None, _archetype=None, _subtype=None, _instances=None, _desynched_atts=None, _id=None, _libname=None, AnalysisSupportingData=None, Assemblies=None, Materials=None, CADAnalysisMetaData=None):
self.original_tagname_ = None
self._derived = _cast(None, _derived)
self._real_archetype = _cast(bool, _real_archetype)
self._archetype = _cast(None, _archetype)
self._subtype = _cast(bool, _subtype)
self._instances = _cast(None, _instances)
self._desynched_atts = _cast(None, _desynched_atts)
self._id = _cast(None, _id)
self._libname = _cast(None, _libname)
self.AnalysisSupportingData = AnalysisSupportingData
self.Assemblies = Assemblies
self.Materials = Materials
if CADAnalysisMetaData is None:
self.CADAnalysisMetaData = []
else:
self.CADAnalysisMetaData = CADAnalysisMetaData
def factory(*args_, **kwargs_):
if CADAnalysisMetaDataType.subclass:
return CADAnalysisMetaDataType.subclass(*args_, **kwargs_)
else:
return CADAnalysisMetaDataType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_AnalysisSupportingData(self): return self.AnalysisSupportingData
def set_AnalysisSupportingData(self, AnalysisSupportingData): self.AnalysisSupportingData = AnalysisSupportingData
def get_Assemblies(self): return self.Assemblies
def set_Assemblies(self, Assemblies): self.Assemblies = Assemblies
def get_Materials(self): return self.Materials
def set_Materials(self, Materials): self.Materials = Materials
def get_CADAnalysisMetaData(self): return self.CADAnalysisMetaData
def set_CADAnalysisMetaData(self, CADAnalysisMetaData): self.CADAnalysisMetaData = CADAnalysisMetaData
def add_CADAnalysisMetaData(self, value): self.CADAnalysisMetaData.append(value)
def insert_CADAnalysisMetaData(self, index, value): self.CADAnalysisMetaData[index] = value
def get__derived(self): return self._derived
def set__derived(self, _derived): self._derived = _derived
def get__real_archetype(self): return self._real_archetype
def set__real_archetype(self, _real_archetype): self._real_archetype = _real_archetype
def get__archetype(self): return self._archetype
def set__archetype(self, _archetype): self._archetype = _archetype
def get__subtype(self): return self._subtype
def set__subtype(self, _subtype): self._subtype = _subtype
def get__instances(self): return self._instances
def set__instances(self, _instances): self._instances = _instances
def get__desynched_atts(self): return self._desynched_atts
def set__desynched_atts(self, _desynched_atts): self._desynched_atts = _desynched_atts
def get__id(self): return self._id
def set__id(self, _id): self._id = _id
def get__libname(self): return self._libname
def set__libname(self, _libname): self._libname = _libname
def hasContent_(self):
if (
self.AnalysisSupportingData is not None or
self.Assemblies is not None or
self.Materials is not None or
self.CADAnalysisMetaData
):
return True
else:
return False
def export(self, outfile, level, | |
import shutil
import os
import time
import multiprocessing
import pdb
import torch
from torch.nn.utils import clip_grad_norm_
import torch.distributed as dist
import torch.nn.functional as F
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
from tqdm import tqdm
from .video_transforms import (GroupRandomHorizontalFlip,
GroupMultiScaleCrop, GroupScale, GroupCenterCrop, GroupRandomCrop,
GroupNormalize, Stack, ToTorchFormatTensor, GroupRandomScale)
from torch.cuda.amp import GradScaler,autocast
import random
import pdb
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1, 5)):
"""Computes the precision@k for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
# correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
correct_k=correct[:k].contiguous().view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def save_checkpoint(state, is_best, filepath=''):
torch.save(state, os.path.join(filepath, 'checkpoint.pth.tar'))
if is_best:
shutil.copyfile(os.path.join(filepath, 'checkpoint.pth.tar'),
os.path.join(filepath, 'model_best.pth.tar'))
def pack_pathway_output(cfg, frames):
"""
Prepare output as a list of tensors. Each tensor corresponding to a
unique pathway.
Args:
frames (tensor): frames of images sampled from the video. The
dimension is `channel` x `num frames` x `height` x `width`.
Returns:
frame_list (list): list of tensors with the dimension of
`channel` x `num frames` x `height` x `width`.
"""
if cfg.DATA.REVERSE_INPUT_CHANNEL:
frames = frames[[2, 1, 0], :, :, :]
if cfg.MODEL.ARCH in cfg.MODEL.SINGLE_PATHWAY_ARCH:
frame_list = [frames]
elif cfg.MODEL.ARCH in cfg.MODEL.MULTI_PATHWAY_ARCH:
fast_pathway = frames
# Perform temporal sampling from the fast pathway.
slow_pathway = torch.index_select(
frames,
1,
torch.linspace(
0, frames.shape[1] - 1, frames.shape[1] // cfg.SLOWFAST.ALPHA
).long(),
)
frame_list = [slow_pathway, fast_pathway]
else:
raise NotImplementedError(
"Model arch {} is not in {}".format(
cfg.MODEL.ARCH,
cfg.MODEL.SINGLE_PATHWAY_ARCH + cfg.MODEL.MULTI_PATHWAY_ARCH,
)
)
return frame_list
def get_augmentor(is_train, image_size, mean=None,
std=None, disable_scaleup=False, is_flow=False,
threed_data=False, version='v1', scale_range=None):
mean = [0.485, 0.456, 0.406] if mean is None else mean
std = [0.229, 0.224, 0.225] if std is None else std
scale_range = [256, 320] if scale_range is None else scale_range
augments = []
if is_train:
if version == 'v1':
augments += [
GroupMultiScaleCrop(image_size, [1, .875, .75, .66])
]
augments += [GroupRandomHorizontalFlip(is_flow=is_flow)]
elif version == 'v2':
augments += [
GroupRandomScale(scale_range),
GroupRandomCrop(image_size),
]
augments += [GroupRandomHorizontalFlip(is_flow=is_flow)]
elif version == 'v3':
augments += [
GroupRandomScale([224,256]),
GroupRandomCrop(image_size),
]
augments += [GroupRandomHorizontalFlip(is_flow=is_flow)]
else:
augments += [
GroupRandomScale([224,256]),
GroupRandomCrop(image_size),
]
else:
scaled_size = image_size if disable_scaleup else int(image_size / 0.875 + 0.5)
augments += [
GroupScale(scaled_size),
GroupCenterCrop(image_size)
]
augments += [
Stack(threed_data=threed_data),
ToTorchFormatTensor(),
GroupNormalize(mean=mean, std=std, threed_data=threed_data)
]
augmentor = transforms.Compose(augments)
return augmentor
def get_augmentor_withGaussianNoise(is_train, image_size, mean=None,
std=None,g_std=0.2, disable_scaleup=False, is_flow=False,
threed_data=False, version='v1', scale_range=None):
def Gaussian_Noise(x,std):
return x+torch.zeros_like(x).data.normal_(0,std)
mean = [0.485, 0.456, 0.406] if mean is None else mean
std = [0.229, 0.224, 0.225] if std is None else std
scale_range = [256, 320] if scale_range is None else scale_range
augments = []
if is_train:
if version == 'v1':
augments += [
GroupMultiScaleCrop(image_size, [1, .875, .75, .66])
]
augments += [GroupRandomHorizontalFlip(is_flow=is_flow)]
elif version == 'v2':
augments += [
GroupRandomScale(scale_range),
GroupRandomCrop(image_size),
]
augments += [GroupRandomHorizontalFlip(is_flow=is_flow)]
elif version == 'v3':
augments += [
GroupRandomScale([224,256]),
GroupRandomCrop(image_size),
]
augments += [GroupRandomHorizontalFlip(is_flow=is_flow)]
else:
augments += [
GroupRandomScale([224,256]),
GroupRandomCrop(image_size),
]
else:
scaled_size = image_size if disable_scaleup else int(image_size / 0.875 + 0.5)
augments += [
GroupScale(scaled_size),
GroupCenterCrop(image_size)
]
augments += [
Stack(threed_data=threed_data),
ToTorchFormatTensor(),
transforms.Lambda(lambda x:Gaussian_Noise(x,g_std)),
GroupNormalize(mean=mean, std=std, threed_data=threed_data)
]
augmentor = transforms.Compose(augments)
return augmentor
def build_dataflow(dataset, is_train, batch_size, workers=36, is_distributed=False):
workers = min(workers, multiprocessing.cpu_count())
shuffle = False
sampler = torch.utils.data.distributed.DistributedSampler(dataset) if is_distributed else None
if is_train:
shuffle = sampler is None
data_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=shuffle,
num_workers=workers, pin_memory=True, sampler=sampler)
return data_loader
def train_slowfast(data_loader, model, criterion, optimizer, epoch, display=100,
steps_per_epoch=99999999999, clip_gradient=None, gpu_id=None, rank=0):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# set different random see every epoch
if dist.is_initialized():
data_loader.sampler.set_epoch(epoch)
# switch to train mode
model.train()
end = time.time()
num_batch = 0
with tqdm(total=len(data_loader)) as t_bar:
for i, (images, target) in enumerate(data_loader):
# pdb.set_trace()
# measure data loading time
data_time.update(time.time() - end)
# compute output
if gpu_id is not None:
images = images.cuda(gpu_id, non_blocking=True)
output = model(images)
target = target.cuda(gpu_id, non_blocking=True)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target)
if dist.is_initialized():
world_size = dist.get_world_size()
dist.all_reduce(prec1)
dist.all_reduce(prec5)
prec1 /= world_size
prec5 /= world_size
losses.update(loss.item(), images[0].size(0))
top1.update(prec1[0], images[0].size(0))
top5.update(prec5[0], images[0].size(0))
# compute gradient and do SGD step
loss.backward()
if clip_gradient is not None:
_ = clip_grad_norm_(model.parameters(), clip_gradient)
optimizer.step()
optimizer.zero_grad()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % display == 0 and rank == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(data_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5), flush=True)
num_batch += 1
t_bar.update(1)
if i > steps_per_epoch:
break
return top1.avg, top5.avg, losses.avg, batch_time.avg, data_time.avg, num_batch
def train_withNoise(data_loader, model, criterion, optimizer, epoch, std=0.2, display=100,
steps_per_epoch=99999999999, clip_gradient=None, gpu_id=None, rank=0):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# set different random see every epoch
if dist.is_initialized():
data_loader.sampler.set_epoch(epoch)
# switch to train mode
model.train()
end = time.time()
num_batch = 0
def gaussian_noise(data):
batch_size = data.size()[0]
t_samples = data.size()[1]
# Generate 5 random numbers between 0 and t samples
randomlist = random.sample(range(0, t_samples), random.randint(0, t_samples))
# print(data.size())
for index in randomlist:
data[:, :, index, :, :] += torch.zeros(batch_size, 3, 224, 224).data.normal_(0, std).cuda()
return data
with tqdm(total=len(data_loader)) as t_bar:
for i, (images, target) in enumerate(data_loader):
# measure data loading time
# pdb.set_trace()
data_time.update(time.time() - end)
# compute output
if gpu_id is not None:
images = images.cuda(gpu_id, non_blocking=True)
corrupted_images = gaussian_noise(images.cuda())
concat_images = torch.cat((images.cuda(), corrupted_images), 0)
concat_output = model(concat_images)
logits_clean, logits_corrputed = torch.split(concat_output, images.size()[0])
target = target.cuda(gpu_id, non_blocking=True)
loss = criterion(concat_output, torch.cat((target,target),0))
# measure accuracy and record loss
prec1, prec5 = accuracy(logits_clean, target)
if dist.is_initialized():
world_size = dist.get_world_size()
dist.all_reduce(prec1)
dist.all_reduce(prec5)
prec1 /= world_size
prec5 /= world_size
losses.update(loss.item(), images.size(0))
top1.update(prec1[0], images.size(0))
top5.update(prec5[0], images.size(0))
# compute gradient and do SGD step
loss.backward()
if clip_gradient is not None:
_ = clip_grad_norm_(model.parameters(), clip_gradient)
optimizer.step()
optimizer.zero_grad()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % display == 0 and rank == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(data_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5), flush=True)
num_batch += 1
t_bar.update(1)
if i > steps_per_epoch:
break
return top1.avg, top5.avg, losses.avg, batch_time.avg, data_time.avg, num_batch
def info_nce_loss(features,batch_size,n_views,gpu_id,temperature=0.07):
labels = torch.cat([torch.arange(batch_size) for i in range(n_views)], dim=0)
labels = (labels.unsqueeze(0) == labels.unsqueeze(1)).float()
labels = labels.cuda(gpu_id, non_blocking=True)
features = F.normalize(features, dim=1)
similarity_matrix = torch.matmul(features, features.T)
# assert similarity_matrix.shape == (
# n_views * batch_size, n_views * batch_size)
# assert similarity_matrix.shape == labels.shape
# discard the main diagonal from both: labels and similarities matrix
mask = torch.eye(labels.shape[0], dtype=torch.bool).cuda(gpu_id, non_blocking=True)
labels = labels[~mask].view(labels.shape[0], -1)
similarity_matrix = similarity_matrix[~mask].view(similarity_matrix.shape[0], -1)
# pdb.set_trace()
# assert similarity_matrix.shape == labels.shape
# select and combine multiple positives
positives = similarity_matrix[labels.bool()].view(labels.shape[0], -1)
# select only the negatives the negatives
negatives = similarity_matrix[~labels.bool()].view(similarity_matrix.shape[0], -1)
logits = torch.cat([positives, negatives], dim=1)
labels = torch.zeros(logits.shape[0], dtype=torch.long).cuda(gpu_id, non_blocking=True)
logits = logits / temperature
return logits, labels
def simclr_train(data_loader, model, criterion, optimizer, epoch, std=0.2, display=100,
steps_per_epoch=99999999999, clip_gradient=None, gpu_id=None, rank=0):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
kld_losses = AverageMeter()
ss_losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# set different random see every epoch
if dist.is_initialized():
data_loader.sampler.set_epoch(epoch)
# switch to train mode
model.train()
end = time.time()
num_batch = 0
def gaussian_noise(data):
batch_size = data.size()[0]
t_samples = data.size()[1]
# Generate 5 random numbers between 0 and t samples
randomlist = random.sample(range(0, t_samples), random.randint(0, t_samples))
# print(data.size())
for index in randomlist:
data[:, :, index, :, :] += torch.zeros(batch_size, 3, 224, 224).data.normal_(0, std).cuda()
return data
with tqdm(total=len(data_loader)) as t_bar:
for i, (images, target) in enumerate(data_loader):
# measure data loading time
# pdb.set_trace()
data_time.update(time.time() - end)
# compute output
if gpu_id is not None:
images = images.cuda(gpu_id, non_blocking=True)
corrupted_images = gaussian_noise(images.cuda())
| |
<filename>src/googletranslate.py<gh_stars>0
# Keypirinha: a fast launcher for Windows (keypirinha.com)
import keypirinha as kp
import keypirinha_util as kpu
import keypirinha_net as kpnet
import re
import json
import traceback
import urllib.error
import urllib.parse
import os
class GoogleTranslate(kp.Plugin):
"""Suggest translations using Google Translate online service"""
API_URL = "https://translate.google.com/translate_a/single"
API_QUERY = (
("client", "gtx"), # gtx, t
("hl", "en"),
("sl", "LANGIN"), # placeholder
("ssel", 0),
("tl", "LANGOUT"), # placeholder
("tsel", 0),
("q", "TERMS"), # placeholder
("ie", "UTF-8"),
("oe", "UTF-8"),
("otf", 1),
("dt", "at")) # bd, ex, ld, md, qca, rw, rm, ss, t, at
API_USER_AGENT = "Mozilla/5.0"
BROWSE_URL = "https://translate.google.com/#{lang_in}/{lang_out}/{terms}"
ITEMCAT_TRANSLATE = kp.ItemCategory.USER_BASE + 1
ITEMCAT_RESULT = kp.ItemCategory.USER_BASE + 2
ITEM_ARGS_SEP = ":"
ACTION_COPY_RESULT = "copy_result"
ACTION_BROWSE = "browse"
ACTION_BROWSE_PRIVATE = "browse_private"
ACTION_COPY_URL = "copy_url"
CONFIG_SECTION_DEFAULTS = "defaults"
CONFIG_SECTION_CUSTOM_ITEM = "custom_item"
DEFAULT_ITEM_ENABLED = True
DEFAULT_ITEM_LABEL = "CosmosAutoComplete"
DEFAULT_LANG_IN = "auto"
DEFAULT_LANG_OUT = "en"
DEFAULT_IDLE_TIME = 0.25
lang = {'in': {}, 'out': {}}
default_item_enabled = DEFAULT_ITEM_ENABLED
default_item_label = DEFAULT_ITEM_LABEL
default_lang_in = DEFAULT_LANG_IN
default_lang_out = DEFAULT_LANG_OUT
idle_time = DEFAULT_IDLE_TIME
DEFAULT_VC_CONFIG = "cosmos08|Bingads.algo.prod.IntentMatching;cosmos08|bingads.algo.IntentMatching"
def __init__(self):
super().__init__()
def _get_VC_List(self, vc_config):
cluster_vcs = vc_config.split(";")
self.vc_list = []
for cluster_vc in cluster_vcs:
cluster, vc = cluster_vc.split('|')
self.vc_list.append([cluster, vc])
def on_start(self):
self._read_config()
# register actions
actions = [
self.create_action(
name=self.ACTION_BROWSE,
label="Open in browser",
short_desc="Open your query in Google Translate"),
self.create_action(
name=self.ACTION_COPY_URL,
label="Copy URL",
short_desc="Copy resulting URL to clipboard")]
self.set_actions(self.ITEMCAT_RESULT, actions)
def on_catalog(self):
catalog = self._read_config()
self.set_catalog(catalog)
def _create_result_item_cosmos(self, vc, finalurl):
short_desc = "Dest:" + finalurl
item = self.create_item(
category=self.ITEMCAT_RESULT,
label=vc if vc else "Bingads.algo.prod.IntentMatching",
short_desc=short_desc,
target=finalurl if finalurl else "",
args_hint=kp.ItemArgsHint.FORBIDDEN,
hit_hint=kp.ItemHitHint.IGNORE)
# data_bag = lang_in + self.ITEM_ARGS_SEP + lang_out + self.ITEM_ARGS_SEP
# if search_terms:
# data_bag += search_terms
# item.set_data_bag(data_bag)
return item
def _complete_cosmos_url(self, url, cluster, vc):
if not url.startswith("https"):
prefix = "https://%s.osdinfra.net/cosmos/%s" % (cluster, vc)
if url.startswith('/'):
url = prefix + url
else:
url = prefix + '/' + url
if not url.endswith("/"):
url = url + "?property=info"
return url
def on_suggest(self, user_input, items_chain):
if not items_chain or items_chain[-1].category() != self.ITEMCAT_TRANSLATE:
return
current_item = items_chain[-1]
suggestions = []
vc_list = [["cosmos08", "Bingads.algo.prod.IntentMatching"]
]
for cluster_vc in vc_list:
cluster, vc = cluster_vc
final_url = self._complete_cosmos_url(user_input,cluster,vc)
suggestions.append(
self._create_result_item_cosmos(vc, final_url))
# suggestions.append(
# self._create_result_item_cosmos(None, None, None, "https://google.com"))
# # read query args from current item, if any
# # then override item's query args with user_input if needed
# query = self._extract_search_info(current_item, user_input)
# # query google translate if needed
# if query['lang_in'] and query['lang_out'] and len(query['terms']):
# # avoid doing too much network requests in case user is still typing
# if self.should_terminate(self.idle_time):
# return
# results = []
# try:
# # get translated version of terms
# opener = kpnet.build_urllib_opener()
# opener.addheaders = [("User-agent", self.API_USER_AGENT)]
# url = self._build_api_url(query['lang_in'], query['lang_out'],
# query['terms'])
# with opener.open(url) as conn:
# response = conn.read()
# if self.should_terminate():
# return
# # parse response from the api
# results = self._parse_api_response(response, query['lang_in'])
# except urllib.error.HTTPError as exc:
# suggestions.append(self.create_error_item(
# label=user_input, short_desc=str(exc)))
# except Exception as exc:
# suggestions.append(self.create_error_item(
# label=user_input, short_desc="Error: " + str(exc)))
# traceback.print_exc()
# # create a suggestion from api's response, if any
# for res in results:
# suggestions.append(self._create_result_item(
# lang_in=res['lang_in'],
# lang_out=query['lang_out'],
# search_terms=query['terms'],
# search_result=res['result']))
# push suggestions if any
if suggestions:
self.set_suggestions(suggestions, kp.Match.ANY, kp.Sort.NONE)
def _extract_autocomplete_url(self, item, user_input=None):
if item and item.category() == self.ITEMCAT_RESULT:
url = item.target()
return url
return None
def on_execute(self, item, action):
if item.category() != self.ITEMCAT_RESULT:
return
# browse or copy url
if action and action.name() in (self.ACTION_BROWSE,
self.ACTION_COPY_URL):
# build the url and its arguments
url = self._extract_autocomplete_url(item)
# copy url
if action.name() == self.ACTION_COPY_URL:
kpu.set_clipboard(url)
# launch browser
elif action.name() == self.ACTION_BROWSE:
kpu.web_browser_command(private_mode=None, url=url,
execute=True)
# default action: copy result (ACTION_COPY_RESULT)
else:
url = self._extract_autocomplete_url(item)
kpu.set_clipboard(url)
def on_events(self, flags):
if flags & (kp.Events.APPCONFIG | kp.Events.PACKCONFIG |
kp.Events.NETOPTIONS):
self._read_config()
self.on_catalog()
def _read_config(self):
def _warn_lang_code(name, section, fallback):
fmt = (
"Invalid {} value in [{}] config section. " +
"Falling back to default: {}")
self.warn(fmt.format(name, section, fallback))
def _warn_skip_custitem(name, section):
fmt = (
"Invalid {} value in [{}] config section. " +
"Skipping custom item.")
self.warn(fmt.format(name, section))
catalog = []
settings = self.load_settings()
# [default_item]
self.default_item_enabled = settings.get_bool(
"enable",
section=self.CONFIG_SECTION_DEFAULTS,
fallback=self.DEFAULT_ITEM_ENABLED)
self.default_item_label = settings.get_stripped(
"item_label",
section=self.CONFIG_SECTION_DEFAULTS,
fallback=self.DEFAULT_ITEM_LABEL)
# default input language
self.default_lang_in = settings.get_stripped(
"input_lang",
section=self.CONFIG_SECTION_DEFAULTS,
fallback=self.DEFAULT_LANG_IN)
validated_lang_code = self._match_lang_code("in", self.default_lang_in)
if validated_lang_code is None:
_warn_lang_code("input_lang", self.CONFIG_SECTION_DEFAULTS,
self.DEFAULT_LANG_IN)
self.default_lang_in = self.DEFAULT_LANG_IN
else:
self.default_lang_in = validated_lang_code
# default output language
self.default_lang_out = settings.get_stripped(
"output_lang",
section=self.CONFIG_SECTION_DEFAULTS,
fallback=self.DEFAULT_LANG_OUT)
validated_lang_code = self._match_lang_code("out", self.default_lang_out)
if validated_lang_code is None:
_warn_lang_code("output_lang", self.CONFIG_SECTION_DEFAULTS,
self.DEFAULT_LANG_OUT)
self.default_lang_out = self.DEFAULT_LANG_OUT
else:
self.default_lang_out = validated_lang_code
self.idle_time = settings.get_float(
"idle_time", self.CONFIG_SECTION_DEFAULTS,
fallback=self.DEFAULT_IDLE_TIME, min=0.25, max=3)
if self.default_item_enabled:
catalog.insert(0, self._create_translate_item())
# [default_item/*] optional sections
for section in settings.sections():
if not section.lower().startswith(self.CONFIG_SECTION_CUSTOM_ITEM + "/"):
continue
section_name = section[len(self.CONFIG_SECTION_CUSTOM_ITEM) + 1:].strip()
if not len(section_name):
self.warn('Invalid section name: "{}". Skipping section.'.format(section))
continue
# input_lang
custitem_lang_in = settings.get_stripped(
"input_lang", section=section, fallback=None)
if custitem_lang_in is not None:
custitem_lang_in = self._match_lang_code("in", custitem_lang_in)
if not custitem_lang_in:
_warn_skip_custitem("input_lang", section)
continue
# output_lang
custitem_lang_out = settings.get_stripped(
"output_lang", section=section, fallback=None)
if custitem_lang_out is not None:
custitem_lang_out = self._match_lang_code("out", custitem_lang_out)
if not custitem_lang_out:
_warn_skip_custitem("output_lang", section)
continue
# enabled?
custitem_enabled = settings.get_bool(
"enable", section=section, fallback=True)
if not custitem_enabled:
continue
# item_label
custitem_label = settings.get_stripped(
"item_label",
section=section,
fallback=self.default_item_label)
custitem_label = custitem_label.format(
section_name=section_name,
default_item_label=self.default_item_label,
input_lang_code=custitem_lang_in,
input_lang_label=self._lang_name("in", custitem_lang_in),
output_lang_code=custitem_lang_out,
output_lang_label=self._lang_name("out", custitem_lang_out))
custitem_label = custitem_label.strip()
if not len(custitem_label):
_warn_skip_custitem("item_label", section)
continue
# create item
catalog.append(self._create_translate_item())
return catalog
def _parse_api_response(self, response, query_lang_in):
# example:
# * https://translate.google.com/translate_a/single?client=gtx&hl=en&sl=auto&ssel=0&tl=en&tsel=0&q=meilleur+definition&ie=UTF-8&oe=UTF-8&otf=0&dt=t
# [[["Best definition","meilleur definition",,,3]],,"fr",,,,0.34457824,,[["fr"],,[0.34457824],["fr"]]]
# * https://translate.google.com/translate_a/single?client=gtx&hl=en&sl=auto&ssel=0&tl=en&tsel=0&q=meilleur+definition&ie=UTF-8&oe=UTF-8&otf=0&dt=at
# [,,"fr",,,[["meilleur definition",,[["Best definition",0,true,false],["better definition",0,true,false]],[[0,19]],"meilleur definition",0,0]],0.34457824,,[["fr"],,[0.34457824],["fr"]]]
response = response.decode(encoding="utf-8", errors="strict")
response = response.replace(",,", ",null,").replace("[,", "[null,")
data = json.loads(response)
lang_in = data[2]
sentences = data[5]
# note: data[5] (sentences) may be None when there is no translation to be done
# (i.e. target lang is "en" and text to translate is already in English)
translated = []
if sentences:
# extract the translation options for each sentence
sentences = ((variant[0] for variant in s[2]) for s in sentences)
# join variants in one text - line
for sentence in zip(*sentences):
translated.append(' '.join(sentence))
# in case google's api support a new language that is not in our local
# database yet, this ensures we don't create items with an unknown
# lang_in value (catalog file, history file, ...)
lang_in = self._match_lang_code("in", lang_in, fallback=query_lang_in)
return ({'result': res, 'lang_in': lang_in} for res in translated)
def _extract_search_info(self, item, user_input=None):
info = {
'lang_in': self.default_lang_in,
'lang_out': self.default_lang_out,
'terms': "",
'result': ""}
# parse item's target
if item and item.category() == self.ITEMCAT_TRANSLATE:
item_props = item.target().split(self.ITEM_ARGS_SEP)
# lang_in
if len(item_props) >= 1:
info['lang_in'] = self._match_lang_code(
"in", item_props[0], fallback=info['lang_in'])
# lang_out
if len(item_props) >= 2:
info['lang_out'] = self._match_lang_code(
"out", item_props[1], fallback=info['lang_out'])
# search terms
if len(item.raw_args()):
info['terms'] = item.raw_args()
# parse user input
# * supported formats:
# [[lang_in]:[lang_out]] <terms>
# <terms> [[lang_in]:[lang_out]]
# * in the unlikely case the [[lang_in]:[lang_out]] part is
# specified at both ends, the one at the right end prevails
if user_input:
user_input = user_input.lstrip()
info['terms'] = user_input.rstrip()
# match: <terms> [[lang_in]:[lang_out]]
m = re.match(
(r"^(?P<terms>.*)\s+" +
r"(?P<lang_in>[a-zA-Z\-]+)?" +
re.escape(self.ITEM_ARGS_SEP) +
r"(?P<lang_out>[a-zA-Z\-]+)?$"),
user_input)
# match: [[lang_in]:[lang_out]] <terms>
if not m:
m = re.match(
(r"^(?P<lang_in>[a-zA-Z\-]+)?" +
re.escape(self.ITEM_ARGS_SEP) +
r"(?P<lang_out>[a-zA-Z\-]+)?" +
r"\s+(?P<terms>.*)$"),
user_input)
if m:
if m.group("lang_in") or m.group("lang_out"):
lang_in = self._match_lang_code("in", m.group("lang_in"))
lang_out = self._match_lang_code("out", m.group("lang_out"))
if lang_in or lang_out:
if lang_in:
info['lang_in'] = lang_in
if lang_out:
info['lang_out'] = lang_out
info['terms'] = m.group("terms").rstrip()
elif item and item.category() == self.ITEMCAT_RESULT:
item_props = item.data_bag().split(self.ITEM_ARGS_SEP, maxsplit=2)
# lang_in
if len(item_props) >= 1:
info['lang_in'] = self._match_lang_code(
"in", item_props[0], fallback=info['lang_in'])
# lang_out
if len(item_props) >= 2:
info['lang_out'] = self._match_lang_code(
"out", item_props[1], fallback=info['lang_out'])
# search terms
if len(item_props) >= 3:
info['terms'] = item_props[2]
# search result
info['result'] = item.target()
return info
def _lang_name(self, inout, lang_code):
match_code = self._match_lang_code(inout, lang_code)
if match_code is not None:
return self.lang[inout][match_code]
return lang_code
def _match_lang_code(self, inout, lang_code, fallback=None):
if lang_code:
lang_code = lang_code.strip().upper()
if lang_code and lang_code == "-" and inout == "in":
lang_code = | |
# %%
import os
import logging
import random
from enum import Enum, auto
import re
import sentry_sdk
from dff import dialogflow_extension
import common.dialogflow_framework.utils.state as state_utils
import common.dialogflow_framework.utils.condition as condition_utils
import common.utils as common_utils
import common.constants as common_constants
import common.news as general_this_news
from common.gossip import talk_about_gossip, skill_trigger_phrases
from common.fact_random import get_fact
import dialogflows.scenarios.gossip as this_gossip
import common.gossip as common_gossip
import dialogflows.scenarios.news as this_news
import dialogflows.scopes as scopes
from dialogflows.flows import utils
sentry_sdk.init(dsn=os.getenv("SENTRY_DSN"))
NEWS_API_ANNOTATOR_URL = os.environ.get("NEWS_API_ANNOTATOR_URL")
assert NEWS_API_ANNOTATOR_URL
logger = logging.getLogger(__name__)
class State(Enum):
USR_START = auto()
SYS_TOPIC_TO_EVENT = auto()
USR_TOPIC_TO_EVENT = auto()
SYS_NO_OR_YES = auto()
USR_NO_OR_YES = auto()
SYS_EVENT_TO_PERSON = auto()
USR_EVENT_TO_PERSON = auto()
# BEGIN: USR_NOT_INTERESTED_IN_PERSON
SYS_NOT_INTERESTED_IN_PERSON = auto()
USR_NOT_INTERESTED_IN_PERSON = auto()
SYS_CHANGE_TO_PERSON = auto()
USR_CHANGE_TO_PERSON = auto()
# transitions back to:
# NOT_INTERESTED_IN_PERSON
# AGREES_ABT_PERSON
# DISAGREES_ABT_PERSON
# SAYS_OPINION_ABT_PERSON
# END
# BEGIN: USR_AGREES_ABT_PERSON
SYS_AGREES_ABT_PERSON = auto()
USR_AGREES_ABT_PERSON = auto()
SYS_PERSON_AGREE = auto()
USR_PERSON_AGREE = auto()
SYS_SAYS_SOMETHING_AFTER_AGREE = auto()
USR_SAYS_SOMETHING_AFTER_AGREE = auto()
# transitions back to:
# NOT_INTERESTED_IN_PERSON
# AGREES_ABT_PERSON
# DISAGREES_ABT_PERSON
# SAYS_OPINION_ABT_PERSON
# END
# BEGIN
SYS_DISAGREES_ABT_PERSON = auto()
USR_DISAGREES_ABT_PERSON = auto()
SYS_PERSON_DISAGREE = auto()
USR_PERSON_DISAGREE = auto()
SYS_SAYS_SOMETHING_AFTER_DISAGREE = auto()
USR_SAYS_SOMETHING_AFTER_DISAGREE = auto()
# transitions back to:
# NOT_INTERESTED_IN_PERSON
# AGREES_ABT_PERSON
# DISAGREES_ABT_PERSON
# SAYS_OPINION_ABT_PERSON
# END
# BEGIN: USR_SAYS_OPINION_ABT_PERSON
SYS_SAYS_OPINION_ABT_PERSON = auto()
USR_SAYS_OPINION_ABT_PERSON = auto()
SYS_PERSON_OPINION = auto()
USR_PERSON_OPINION = auto()
SYS_SAYS_SOMETHING_AFTER_OPINION = auto()
USR_SAYS_SOMETHING_AFTER_OPINION = auto()
# transitions back to:
# NOT_INTERESTED_IN_PERSON
# AGREES_ABT_PERSON
# DISAGREES_ABT_PERSON
# SAYS_OPINION_ABT_PERSON
# END
SYS_MENTIONS_ANOTHER_PERSON = auto()
USR_MENTIONS_ANOTHER_PERSON = auto()
# Helpers: Error
SYS_ERR = auto()
USR_ERR = auto()
# Helpers: End?
SYS_END = auto()
USR_END = auto()
# endregion
# region CONFIDENCES
DIALOG_BEGINNING_START_CONFIDENCE = 0.98
DIALOG_BEGINNING_CONTINUE_CONFIDENCE = 0.9
DIALOG_BEGINNING_SHORT_ANSWER_CONFIDENCE = 0.98
MIDDLE_DIALOG_START_CONFIDENCE = 0.7
SUPER_CONFIDENCE = 1.0
HIGH_CONFIDENCE = 0.98
MUST_CONTINUE_CONFIDENCE = 0.98
CANNOT_CONTINUE_CONFIDENCE = 0.0
# endregion
# endregion
################################################################################
# %%
##################################################################################################################
# Init DialogFlow
##################################################################################################################
simplified_dialogflow = dialogflow_extension.DFEasyFilling(State.USR_START)
##################################################################################################################
##################################################################################################################
# Design DialogFlow.
##################################################################################################################
##################################################################################################################
##################################################################################################################
# utils
def get_people_for_topic(cobot_topic):
# human-curated list (top 10-20 for 2010s)
peoples = [
list(x.get("People", [])) for x in common_gossip.TOPICS_TO_PEOPLE_MAPPINGS if x.get("Topic", "") == cobot_topic
]
peoples = list(set(sum(peoples, [])))
# wikidata-based list
top_people_from_wiki = utils.get_top_people_from_wiki_for_cobot_topic(cobot_topic, peoples)
return top_people_from_wiki + peoples
def get_phrase_about_person_in_content(person, content):
# TODO: "." for what?
sentences_list = content.split(".")
for sentence in sentences_list:
if sentence.lower().count(person.lower()) > 0:
return sentence
def save_mentioned_person(vars, person, judgement, share_memory_key):
if person:
shared_memory = state_utils.get_shared_memory(vars)
# "people_mentioned_by_bot"
# obtaining a list of previously mentioned people
all_mentioned_people = shared_memory.get(share_memory_key, [])
are_judgements = [x.get("Judgement", "") == judgement for x in all_mentioned_people]
if sum(are_judgements) == 0:
all_mentioned_people.append({"Judgement": judgement, "People": []})
are_judgements.append(True)
judgement_index = are_judgements.index(True)
people_list = all_mentioned_people[judgement_index]["People"]
is_this_person = people_list and people_list[-1] == person
if not is_this_person:
all_mentioned_people[judgement_index]["People"].append(person)
state_utils.save_to_shared_memory(vars, **{share_memory_key: all_mentioned_people})
def get_mentioned_people(vars, share_memory_key="", judgements=["Liked", "Disliked", "Not Interested", "Other"]):
shared_memory = state_utils.get_shared_memory(vars)
# obtaining a list of previously mentioned people
all_mentioned_people = shared_memory.get(share_memory_key, [])
if all_mentioned_people:
peoples = [list(x.get("People", [])) for x in all_mentioned_people if x.get("Judgement", "") in judgements]
peoples = list(set(sum(peoples, [])))
return peoples
else:
return []
# inefficient if number of people is finite
def get_fresh_person_for_topic(vars, cobot_topic):
all_mentioned_people = set(get_mentioned_people(vars, share_memory_key="people_mentioned_by_bot"))
topic_people = [
list(i.get("People", [])) for i in common_gossip.TOPICS_TO_PEOPLE_MAPPINGS if i.get("Topic", "") == cobot_topic
]
topic_people = sum(topic_people, [])
topic_people = set(topic_people)
topic_people = topic_people - all_mentioned_people
if topic_people:
return random.choice(list(topic_people))
def mark_news_as_mentioned_by_bot(vars, news_title):
shared_memory = state_utils.get_shared_memory(vars)
# obtaining a list of previously mentioned news
all_mentioned_news = shared_memory.get("news_mentioned_by_bot", [])
all_mentioned_news.append(news_title)
# saving
state_utils.save_to_shared_memory(vars, all_mentioned_news=all_mentioned_news)
def get_people_related_to_bot_mentioned_ones(vars, user_mentioned_person):
# for the time being, we support only one user
related_people = []
if not user_mentioned_person:
return related_people
# user_mentioned_person = user_mentioned_people[0]
people_mentioned_and_liked_by_bot = get_mentioned_people(vars, "people_mentioned_by_bot", ["Liked", "Disliked"])
for person in people_mentioned_and_liked_by_bot:
relationship = utils.get_relationship_between_two_people(user_mentioned_person, person)
if relationship:
related_people.append([person, relationship])
return related_people
def get_people_related_to_user_mentioned_ones(vars, user_mentioned_person):
# for the time being, we support only one user
related_people = []
if not user_mentioned_person:
return related_people
# user_mentioned_person = user_mentioned_people[0]
people_mentioned_and_liked_by_user = get_mentioned_people(vars, "people_mentioned_by_user", ["Liked", "Disliked"])
for person in people_mentioned_and_liked_by_user:
relationship = utils.get_relationship_between_two_people(user_mentioned_person, person)
if relationship:
related_people.append([person, relationship])
return related_people
def get_news_for_topic(vars, cobot_topic):
people = get_people_for_topic(cobot_topic)
mentioned_people = get_mentioned_people(vars, share_memory_key="people_mentioned_by_bot")
people = [person for person in people if person not in mentioned_people]
if people:
person = random.choice(people)
curr_news = general_this_news.get_news_about_topic(person, NEWS_API_ANNOTATOR_URL)
logger.debug(f"news = {curr_news}")
if curr_news and "content" in curr_news and "title" in curr_news:
content = curr_news["content"].split("[")[0]
title = curr_news["title"]
if person.lower() in content.lower():
logger.debug("random_person was mentioned in content")
filtered_content = get_phrase_about_person_in_content(person.lower(), content.lower())
return person, title, filtered_content
elif person.lower() in title.lower():
logger.debug("random_person was mentioned in title")
return person, title, content
topic_news = [
list(i["News"]) for i in this_news.TEMPORARY_NEWS_FOR_COBOT_TOPICS if i.get("Topic", "") == cobot_topic
]
topic_news = sum(topic_news, [])
logger.debug(f"topic_news={topic_news}")
if topic_news:
random_news = random.choice(topic_news)
person = random_news["Person"]
title = random_news["Title"]
content = random_news["Content"]
else:
person = ""
title = ""
content = ""
return person, title, content
def get_random_judgement_for_emotion(emotion):
judgements = [
list(x.get("People", [])) for x in this_gossip.TARGET_JUDGEMENTS_FOR_EMOTION if x["Emotion"] in emotion
]
judgements = list(set(sum(judgements, [])))
return random.choice(judgements) if judgements else "Great"
supported_cobot_topics = [
"Entertainment_Movies",
"Entertainment_Music",
"Entertainment_Books",
"Sports",
"Politics",
"Entertainment_General",
"Science_and_Technology",
]
def get_supported_cobot_topics(vars):
topics = common_utils.get_topics(state_utils.get_last_human_utterance(vars), which="cobot_dialogact_topics")
selected_topics = set(topics) & set(supported_cobot_topics)
selected_topics = selected_topics if selected_topics else supported_cobot_topics
return selected_topics
##################################################################################################################
# error
##################################################################################################################
def error_response(vars):
logger.debug("exec error_response")
state_utils.set_confidence(vars, CANNOT_CONTINUE_CONFIDENCE)
state_utils.set_confidence(vars, 0)
return ""
##################################################################################################################
# Handlers
##################################################################################################################
def talk_about_gossip_request(ngrams, vars):
human_utterance = state_utils.get_last_human_utterance(vars)
bot_utterance = state_utils.get_last_bot_utterance(vars)
flag = talk_about_gossip(human_utterance, bot_utterance)
logger.info(f"talk_about_gossip_request: {flag}")
return flag
# region CELEBRITY
##################################################################################################################
def set_people_jobs(vars, celebrity_name, core_jobs, other_jobs, mentioned_jobs):
shared_memory = state_utils.get_shared_memory(vars)
people_jobs = shared_memory.get("people_jobs", {})
mentioned_jobs = people_jobs.get(celebrity_name, {}).get("Mentioned_Jobs", []) + mentioned_jobs
mentioned_jobs = list(set(mentioned_jobs))
people_jobs[celebrity_name] = {"Jobs": core_jobs, "Other_Jobs": other_jobs, "Mentioned_Jobs": mentioned_jobs}
state_utils.save_to_shared_memory(vars, people_jobs=people_jobs)
def get_people_jobs(vars, celebrity_name):
shared_memory = state_utils.get_shared_memory(vars)
people_jobs = shared_memory.get("people_jobs", {})
celebrity_jobs = people_jobs.get(celebrity_name, {})
argument_names = ["Jobs", "Other_Jobs", "Mentioned_Jobs"]
return [celebrity_jobs.get(argument_name, []) for argument_name in argument_names]
def get_mentioned_jobs(vars, celebrity_name):
jobs = get_people_jobs(vars, celebrity_name)[-1]
return jobs if jobs else []
def get_celebrity_from_uttr(vars, exclude_types=False, use_only_last_utt=False):
# Look only at one turn
# shared_memory = state_utils.get_shared_memory(vars)
human_utterance = state_utils.get_last_human_utterance(vars)
logger.debug(f'Calling get_celebrity_from_uttr on {human_utterance["text"]} {exclude_types} {use_only_last_utt}')
# we need to get all supported occupations
celebrity_name, matching_types, mismatching_types = common_gossip.celebrity_from_uttr(human_utterance)
logger.warning(f"Relations {celebrity_name} {matching_types} {mismatching_types}")
if not celebrity_name or not matching_types:
return None, None
mentioned_jobs = get_mentioned_jobs(vars, celebrity_name)
mismatching_types = [type_ for type_ in mismatching_types if type_ not in mentioned_jobs]
if exclude_types and mismatching_types:
mentioned_job = random.choice(mismatching_types)
else:
mentioned_job = random.choice(matching_types)
set_people_jobs(vars, celebrity_name, matching_types, mismatching_types, mentioned_jobs + [mentioned_job])
logger.debug(f"Answer for get_celebrity exclude_types {exclude_types} : {celebrity_name} {mentioned_job}")
return celebrity_name, mentioned_job
def sys_celebrity_found_request(ngrams, vars, use_only_last_utt=True):
shared_memory = state_utils.get_shared_memory(vars)
asked_celebrities = shared_memory.get("asked_celebrities", [])
person, occupation = get_celebrity_from_uttr(vars, use_only_last_utt=use_only_last_utt)
flag = person and person not in asked_celebrities
logger.info(f"celebrity_in_phrase_request : {flag}")
return flag
# get occupations for current_person
# build phrase about
def get_celebrity_prompt(vars, person):
logger.debug("Celebrity branch")
shared_memory = state_utils.get_shared_memory(vars)
# persons = get_mentioned_people(vars, share_memory_key="people_mentioned_by_user")
just_was_celebrity_prompt = shared_memory.get("celebrity_prompt", False)
used_celeb_prompts = shared_memory.get("used_celeb_prompts", [])
last_bot_uttr = state_utils.get_last_bot_utterance(vars)["text"]
prompt = None
if person:
# logger.info(f"get_celebrity_prompt for people: {persons}")
# logger.debug(str(persons[-1]))
# person = persons[-1]
logger.info(f"get_celebrity_prompt for person: {person}")
matching_jobs, mismatching_jobs, mentioned_jobs = get_people_jobs(vars, person)
logger.info(f"{person} {matching_jobs} {mismatching_jobs}")
logger.info(f"Mismatching_jobs len {len(mismatching_jobs)}")
if matching_jobs:
logger.info(f"get_celebrity_prompt: matching jobs! just was celebrity prompt? {just_was_celebrity_prompt}")
is_actor = "actor" in " ".join(matching_jobs)
prompt_candidate = (
f"{person} is an amazing {matching_jobs[0]}! " "Would you like to learn more about this person?"
)
we_not_repeat_start_prompt = prompt_candidate not in used_celeb_prompts
actor_asking = "What is your favourite film with this actor?"
if not just_was_celebrity_prompt and matching_jobs and we_not_repeat_start_prompt:
logger.debug("start prompt")
prompt = prompt_candidate
elif just_was_celebrity_prompt and matching_jobs and mismatching_jobs and actor_asking not in last_bot_uttr:
logger.info("get_celebrity_prompt: just was celebrity prompt and actor:")
rand_job = random.choice(mismatching_jobs)
prompt = f"{person} is also a {rand_job}. "
if "actor" in rand_job:
asking = actor_asking
else:
questions = this_gossip.WANT_TO_HEAR_ANOTHER_FACT
asking = random.choice(questions)
prompt = f"{prompt} {asking}"
mismatching_jobs.remove(rand_job)
mentioned_jobs.append(rand_job)
save_mentioned_person(vars, person, "Other", "people_mentioned_by_user")
set_people_jobs(vars, person, matching_jobs, mismatching_jobs, mentioned_jobs)
elif just_was_celebrity_prompt and matching_jobs and actor_asking not in last_bot_uttr:
logger.info("get_celebrity_prompt: just was celebrity prompt and actor:")
prompt = get_cobot_fact(person, used_celeb_prompts)
logger.debug(f"Cobot prompt {prompt}")
elif just_was_celebrity_prompt and not mismatching_jobs and not is_actor:
logger.info("get_celebrity_prompt: just was celebrity prompt and non-actor:")
prompt = get_cobot_fact(person, used_celeb_prompts)
logger.debug(f"Cobot prompt {prompt}")
if prompt:
state_utils.save_to_shared_memory(
vars, celebrity_prompt=True, used_celeb_prompts=used_celeb_prompts + [prompt]
)
return prompt
# region TOPIC_TO_EVENT
##################################################################################################################
def sys_topic_to_event_request(ngrams, vars):
# we get here because user mentioned a topic, or we've got a topic
# ok so let's for the time being believe that we are here by default - just because a topic was mentioned
bot_utterance = state_utils.get_last_bot_utterance(vars)
flag = (bool(get_supported_cobot_topics(vars)) and talk_about_gossip_request(ngrams, vars)) or (
any([phrase in bot_utterance["text"] for phrase in skill_trigger_phrases()])
and | |
= DeferredForeignKey("Image", null=True, backref="children")
class Meta:
database = db
read_only_config = read_only_config
indexes = (
# we don't really want duplicates
(("repository", "docker_image_id"), True),
(("security_indexed_engine", "security_indexed"), False),
)
def ancestor_id_list(self):
""" Returns an integer list of ancestor ids, ordered chronologically from
root to direct parent.
"""
return map(int, self.ancestors.split("/")[1:-1])
class DerivedStorageForImage(BaseModel):
source_image = ForeignKeyField(Image)
derivative = ForeignKeyField(ImageStorage)
transformation = ForeignKeyField(ImageStorageTransformation)
uniqueness_hash = CharField(null=True)
class Meta:
database = db
read_only_config = read_only_config
indexes = ((("source_image", "transformation", "uniqueness_hash"), True),)
class RepositoryTag(BaseModel):
name = CharField()
image = ForeignKeyField(Image)
repository = ForeignKeyField(Repository)
lifetime_start_ts = IntegerField(default=get_epoch_timestamp)
lifetime_end_ts = IntegerField(null=True, index=True)
hidden = BooleanField(default=False)
reversion = BooleanField(default=False)
class Meta:
database = db
read_only_config = read_only_config
indexes = (
(("repository", "name"), False),
(("repository", "lifetime_start_ts"), False),
(("repository", "lifetime_end_ts"), False),
# This unique index prevents deadlocks when concurrently moving and deleting tags
(("repository", "name", "lifetime_end_ts"), True),
)
class BUILD_PHASE(object):
""" Build phases enum """
ERROR = "error"
INTERNAL_ERROR = "internalerror"
BUILD_SCHEDULED = "build-scheduled"
UNPACKING = "unpacking"
PULLING = "pulling"
BUILDING = "building"
PUSHING = "pushing"
WAITING = "waiting"
COMPLETE = "complete"
CANCELLED = "cancelled"
@classmethod
def is_terminal_phase(cls, phase):
return (
phase == cls.COMPLETE
or phase == cls.ERROR
or phase == cls.INTERNAL_ERROR
or phase == cls.CANCELLED
)
class TRIGGER_DISABLE_REASON(object):
""" Build trigger disable reason enum """
BUILD_FALURES = "successive_build_failures"
INTERNAL_ERRORS = "successive_build_internal_errors"
USER_TOGGLED = "user_toggled"
class QueueItem(BaseModel):
queue_name = CharField(index=True, max_length=1024)
body = TextField()
available_after = DateTimeField(default=datetime.utcnow)
available = BooleanField(default=True)
processing_expires = DateTimeField(null=True)
retries_remaining = IntegerField(default=5)
state_id = CharField(default=uuid_generator, index=True, unique=True)
class Meta:
database = db
read_only_config = read_only_config
only_save_dirty = True
indexes = (
(("processing_expires", "available"), False),
(("processing_expires", "queue_name", "available"), False),
(("processing_expires", "available_after", "retries_remaining", "available"), False),
(
(
"processing_expires",
"available_after",
"queue_name",
"retries_remaining",
"available",
),
False,
),
)
def save(self, *args, **kwargs):
# Always change the queue item's state ID when we update it.
self.state_id = str(uuid.uuid4())
super(QueueItem, self).save(*args, **kwargs)
class RepositoryBuild(BaseModel):
uuid = CharField(default=uuid_generator, index=True)
repository = ForeignKeyField(Repository)
access_token = ForeignKeyField(AccessToken)
resource_key = CharField(index=True, null=True)
job_config = TextField()
phase = CharField(default=BUILD_PHASE.WAITING)
started = DateTimeField(default=datetime.now, index=True)
display_name = CharField()
trigger = ForeignKeyField(RepositoryBuildTrigger, null=True)
pull_robot = QuayUserField(
null=True, backref="buildpullrobot", allows_robots=True, robot_null_delete=True
)
logs_archived = BooleanField(default=False, index=True)
queue_id = CharField(null=True, index=True)
class Meta:
database = db
read_only_config = read_only_config
indexes = (
(("repository", "started", "phase"), False),
(("started", "logs_archived", "phase"), False),
)
class LogEntryKind(BaseModel):
name = CharField(index=True, unique=True)
class LogEntry(BaseModel):
id = BigAutoField()
kind = ForeignKeyField(LogEntryKind)
account = IntegerField(index=True, column_name="account_id")
performer = IntegerField(index=True, null=True, column_name="performer_id")
repository = IntegerField(index=True, null=True, column_name="repository_id")
datetime = DateTimeField(default=datetime.now, index=True)
ip = CharField(null=True)
metadata_json = TextField(default="{}")
class Meta:
database = db
read_only_config = read_only_config
indexes = (
(("account", "datetime"), False),
(("performer", "datetime"), False),
(("repository", "datetime"), False),
(("repository", "datetime", "kind"), False),
)
class LogEntry2(BaseModel):
""" TEMP FOR QUAY.IO ONLY. DO NOT RELEASE INTO QUAY ENTERPRISE. """
kind = ForeignKeyField(LogEntryKind)
account = IntegerField(index=True, db_column="account_id")
performer = IntegerField(index=True, null=True, db_column="performer_id")
repository = IntegerField(index=True, null=True, db_column="repository_id")
datetime = DateTimeField(default=datetime.now, index=True)
ip = CharField(null=True)
metadata_json = TextField(default="{}")
class Meta:
database = db
read_only_config = read_only_config
indexes = (
(("account", "datetime"), False),
(("performer", "datetime"), False),
(("repository", "datetime"), False),
(("repository", "datetime", "kind"), False),
)
class LogEntry3(BaseModel):
id = BigAutoField()
kind = IntegerField(db_column="kind_id")
account = IntegerField(db_column="account_id")
performer = IntegerField(null=True, db_column="performer_id")
repository = IntegerField(null=True, db_column="repository_id")
datetime = DateTimeField(default=datetime.now, index=True)
ip = CharField(null=True)
metadata_json = TextField(default="{}")
class Meta:
database = db
read_only_config = read_only_config
indexes = (
(("account", "datetime"), False),
(("performer", "datetime"), False),
(("repository", "datetime", "kind"), False),
)
class RepositoryActionCount(BaseModel):
repository = ForeignKeyField(Repository)
count = IntegerField()
date = DateField(index=True)
class Meta:
database = db
read_only_config = read_only_config
indexes = (
# create a unique index on repository and date
(("repository", "date"), True),
)
class OAuthApplication(BaseModel):
client_id = CharField(index=True, default=random_string_generator(length=20))
secure_client_secret = EncryptedCharField(default_token_length=40, null=True)
fully_migrated = BooleanField(default=False)
# TODO(remove-unenc): This field is deprecated and should be removed soon.
client_secret = deprecated_field(
CharField(default=random_string_generator(length=40), null=True),
ERTMigrationFlags.WRITE_OLD_FIELDS,
)
redirect_uri = CharField()
application_uri = CharField()
organization = QuayUserField()
name = CharField()
description = TextField(default="")
avatar_email = CharField(null=True, column_name="gravatar_email")
class OAuthAuthorizationCode(BaseModel):
application = ForeignKeyField(OAuthApplication)
# TODO(remove-unenc): This field is deprecated and should be removed soon.
code = deprecated_field(
CharField(index=True, unique=True, null=True), ERTMigrationFlags.WRITE_OLD_FIELDS
)
code_name = CharField(index=True, unique=True)
code_credential = CredentialField()
scope = CharField()
data = TextField() # Context for the code, such as the user
class OAuthAccessToken(BaseModel):
uuid = CharField(default=uuid_generator, index=True)
application = ForeignKeyField(OAuthApplication)
authorized_user = QuayUserField()
scope = CharField()
token_name = CharField(index=True, unique=True)
token_code = CredentialField()
# TODO(remove-unenc): This field is deprecated and should be removed soon.
access_token = deprecated_field(
CharField(index=True, null=True), ERTMigrationFlags.WRITE_OLD_FIELDS
)
token_type = CharField(default="Bearer")
expires_at = DateTimeField()
data = TextField() # This is context for which this token was generated, such as the user
class NotificationKind(BaseModel):
name = CharField(index=True, unique=True)
class Notification(BaseModel):
uuid = CharField(default=uuid_generator, index=True)
kind = ForeignKeyField(NotificationKind)
target = QuayUserField(index=True, allows_robots=True)
metadata_json = TextField(default="{}")
created = DateTimeField(default=datetime.now, index=True)
dismissed = BooleanField(default=False)
lookup_path = CharField(null=True, index=True)
class ExternalNotificationEvent(BaseModel):
name = CharField(index=True, unique=True)
class ExternalNotificationMethod(BaseModel):
name = CharField(index=True, unique=True)
class RepositoryNotification(BaseModel):
uuid = CharField(default=uuid_generator, index=True)
repository = ForeignKeyField(Repository)
event = ForeignKeyField(ExternalNotificationEvent)
method = ForeignKeyField(ExternalNotificationMethod)
title = CharField(null=True)
config_json = TextField()
event_config_json = TextField(default="{}")
number_of_failures = IntegerField(default=0)
class RepositoryAuthorizedEmail(BaseModel):
repository = ForeignKeyField(Repository)
email = CharField()
code = CharField(default=random_string_generator(), unique=True, index=True)
confirmed = BooleanField(default=False)
class Meta:
database = db
read_only_config = read_only_config
indexes = (
# create a unique index on email and repository
(("email", "repository"), True),
)
class BlobUpload(BaseModel):
repository = ForeignKeyField(Repository)
uuid = CharField(index=True, unique=True)
byte_count = BigIntegerField(default=0)
sha_state = ResumableSHA256Field(null=True, default=resumablehashlib.sha256)
location = ForeignKeyField(ImageStorageLocation)
storage_metadata = JSONField(null=True, default={})
chunk_count = IntegerField(default=0)
uncompressed_byte_count = BigIntegerField(null=True)
created = DateTimeField(default=datetime.now, index=True)
piece_sha_state = ResumableSHA1Field(null=True)
piece_hashes = Base64BinaryField(null=True)
class Meta:
database = db
read_only_config = read_only_config
indexes = (
# create a unique index on email and repository
(("repository", "uuid"), True),
)
class QuayService(BaseModel):
name = CharField(index=True, unique=True)
class QuayRegion(BaseModel):
name = CharField(index=True, unique=True)
class QuayRelease(BaseModel):
service = ForeignKeyField(QuayService)
version = CharField()
region = ForeignKeyField(QuayRegion)
reverted = BooleanField(default=False)
created = DateTimeField(default=datetime.now, index=True)
class Meta:
database = db
read_only_config = read_only_config
indexes = (
# unique release per region
(("service", "version", "region"), True),
# get recent releases
(("service", "region", "created"), False),
)
class TorrentInfo(BaseModel):
storage = ForeignKeyField(ImageStorage)
piece_length = IntegerField()
pieces = Base64BinaryField()
class Meta:
database = db
read_only_config = read_only_config
indexes = (
# we may want to compute the piece hashes multiple times with different piece lengths
(("storage", "piece_length"), True),
)
class ServiceKeyApprovalType(Enum):
SUPERUSER = "Super User API"
KEY_ROTATION = "Key Rotation"
AUTOMATIC = "Automatic"
class ServiceKeyApproval(BaseModel):
approver = QuayUserField(null=True)
approval_type = CharField(index=True)
approved_date = DateTimeField(default=datetime.utcnow)
notes = TextField(default="")
class ServiceKey(BaseModel):
name = CharField()
kid = CharField(unique=True, index=True)
service = CharField(index=True)
jwk = JSONField()
metadata = JSONField()
created_date = DateTimeField(default=datetime.utcnow)
expiration_date = DateTimeField(null=True)
rotation_duration = IntegerField(null=True)
approval = ForeignKeyField(ServiceKeyApproval, null=True)
class MediaType(BaseModel):
""" MediaType is an enumeration of the possible formats of various objects in the data model.
"""
name = CharField(index=True, unique=True)
class Messages(BaseModel):
content = TextField()
uuid = CharField(default=uuid_generator, max_length=36, index=True)
severity = CharField(default="info", index=True)
media_type = ForeignKeyField(MediaType)
class LabelSourceType(BaseModel):
""" LabelSourceType is an enumeration of the possible sources for a label.
"""
name = CharField(index=True, unique=True)
mutable = BooleanField(default=False)
class Label(BaseModel):
""" Label represents user-facing metadata associated with another entry in the database (e.g. a
Manifest).
"""
uuid = CharField(default=uuid_generator, index=True, unique=True)
key = CharField(index=True)
value = TextField()
media_type = EnumField(MediaType)
source_type = EnumField(LabelSourceType)
class ApprBlob(BaseModel):
""" ApprBlob represents a content-addressable object stored outside of the database.
"""
digest = CharField(index=True, unique=True)
media_type = EnumField(MediaType)
size = BigIntegerField()
uncompressed_size = BigIntegerField(null=True)
class ApprBlobPlacementLocation(BaseModel):
""" ApprBlobPlacementLocation is an enumeration of the possible storage locations for ApprBlobs.
"""
name = CharField(index=True, unique=True)
class ApprBlobPlacement(BaseModel):
""" ApprBlobPlacement represents the location of a Blob.
"""
blob = ForeignKeyField(ApprBlob)
location = EnumField(ApprBlobPlacementLocation)
class Meta:
database = db
read_only_config = read_only_config
indexes = ((("blob", "location"), True),)
class ApprManifest(BaseModel):
""" ApprManifest represents the metadata and collection of blobs that comprise an Appr image.
"""
digest = CharField(index=True, unique=True)
media_type = EnumField(MediaType)
manifest_json = JSONField()
class ApprManifestBlob(BaseModel):
""" ApprManifestBlob is a many-to-many relation table linking ApprManifests and ApprBlobs.
"""
manifest = ForeignKeyField(ApprManifest, index=True)
blob = ForeignKeyField(ApprBlob, index=True)
class Meta:
database = db
read_only_config = read_only_config
indexes = ((("manifest", "blob"), True),)
class ApprManifestList(BaseModel):
""" ApprManifestList represents all | |
Primary: 是否是主网卡。
:type Primary: bool
:param MacAddress: MAC地址。
:type MacAddress: str
:param State: 取值范围:PENDING|AVAILABLE|ATTACHING|DETACHING|DELETING。
:type State: str
:param PrivateIpAddressSet: 内网IP信息。
:type PrivateIpAddressSet: list of PrivateIpAddressSpecification
:param Attachment: 绑定的云服务器对象。
:type Attachment: :class:`tencentcloud.vpc.v20170312.models.NetworkInterfaceAttachment`
:param Zone: 可用区。
:type Zone: str
:param CreatedTime: 创建时间。
:type CreatedTime: str
"""
self.NetworkInterfaceId = None
self.NetworkInterfaceName = None
self.NetworkInterfaceDescription = None
self.SubnetId = None
self.VpcId = None
self.GroupSet = None
self.Primary = None
self.MacAddress = None
self.State = None
self.PrivateIpAddressSet = None
self.Attachment = None
self.Zone = None
self.CreatedTime = None
def _deserialize(self, params):
self.NetworkInterfaceId = params.get("NetworkInterfaceId")
self.NetworkInterfaceName = params.get("NetworkInterfaceName")
self.NetworkInterfaceDescription = params.get("NetworkInterfaceDescription")
self.SubnetId = params.get("SubnetId")
self.VpcId = params.get("VpcId")
self.GroupSet = params.get("GroupSet")
self.Primary = params.get("Primary")
self.MacAddress = params.get("MacAddress")
self.State = params.get("State")
if params.get("PrivateIpAddressSet") is not None:
self.PrivateIpAddressSet = []
for item in params.get("PrivateIpAddressSet"):
obj = PrivateIpAddressSpecification()
obj._deserialize(item)
self.PrivateIpAddressSet.append(obj)
if params.get("Attachment") is not None:
self.Attachment = NetworkInterfaceAttachment()
self.Attachment._deserialize(params.get("Attachment"))
self.Zone = params.get("Zone")
self.CreatedTime = params.get("CreatedTime")
class NetworkInterfaceAttachment(AbstractModel):
"""弹性网卡绑定关系
"""
def __init__(self):
"""
:param InstanceId: 云主机实例ID。
:type InstanceId: str
:param DeviceIndex: 网卡在云主机实例内的序号。
:type DeviceIndex: int
:param InstanceAccountId: 云主机所有者账户信息。
:type InstanceAccountId: str
:param AttachTime: 绑定时间。
:type AttachTime: str
"""
self.InstanceId = None
self.DeviceIndex = None
self.InstanceAccountId = None
self.AttachTime = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.DeviceIndex = params.get("DeviceIndex")
self.InstanceAccountId = params.get("InstanceAccountId")
self.AttachTime = params.get("AttachTime")
class Price(AbstractModel):
"""价格
"""
def __init__(self):
"""
:param InstancePrice: 实例价格。
:type InstancePrice: :class:`tencentcloud.vpc.v20170312.models.ItemPrice`
:param BandwidthPrice: 网络价格。
:type BandwidthPrice: :class:`tencentcloud.vpc.v20170312.models.ItemPrice`
"""
self.InstancePrice = None
self.BandwidthPrice = None
def _deserialize(self, params):
if params.get("InstancePrice") is not None:
self.InstancePrice = ItemPrice()
self.InstancePrice._deserialize(params.get("InstancePrice"))
if params.get("BandwidthPrice") is not None:
self.BandwidthPrice = ItemPrice()
self.BandwidthPrice._deserialize(params.get("BandwidthPrice"))
class PrivateIpAddressSpecification(AbstractModel):
"""内网IP信息
"""
def __init__(self):
"""
:param PrivateIpAddress: 内网IP地址。
:type PrivateIpAddress: str
:param Primary: 是否是主IP。
:type Primary: bool
:param PublicIpAddress: 公网IP地址。
:type PublicIpAddress: str
:param AddressId: EIP实例ID,例如:eip-11112222。
:type AddressId: str
:param Description: 内网IP描述信息。
:type Description: str
:param IsWanIpBlocked: 公网IP是否被封堵。
:type IsWanIpBlocked: bool
"""
self.PrivateIpAddress = None
self.Primary = None
self.PublicIpAddress = None
self.AddressId = None
self.Description = None
self.IsWanIpBlocked = None
def _deserialize(self, params):
self.PrivateIpAddress = params.get("PrivateIpAddress")
self.Primary = params.get("Primary")
self.PublicIpAddress = params.get("PublicIpAddress")
self.AddressId = params.get("AddressId")
self.Description = params.get("Description")
self.IsWanIpBlocked = params.get("IsWanIpBlocked")
class Quota(AbstractModel):
"""描述了配额信息
"""
def __init__(self):
"""
:param QuotaId: 配额名称,取值范围:<br><li>`TOTAL_EIP_QUOTA`:用户当前地域下EIP的配额数;<br><li>`DAILY_EIP_APPLY`:用户当前地域下今日申购次数;<br><li>`DAILY_PUBLIC_IP_ASSIGN`:用户当前地域下,重新分配公网 IP次数。
:type QuotaId: str
:param QuotaCurrent: 当前数量
:type QuotaCurrent: int
:param QuotaLimit: 配额数量
:type QuotaLimit: int
"""
self.QuotaId = None
self.QuotaCurrent = None
self.QuotaLimit = None
def _deserialize(self, params):
self.QuotaId = params.get("QuotaId")
self.QuotaCurrent = params.get("QuotaCurrent")
self.QuotaLimit = params.get("QuotaLimit")
class ReleaseAddressesRequest(AbstractModel):
"""ReleaseAddresses请求参数结构体
"""
def __init__(self):
"""
:param AddressIds: 标识 EIP 的唯一 ID 列表。EIP 唯一 ID 形如:`eip-11112222`。
:type AddressIds: list of str
"""
self.AddressIds = None
def _deserialize(self, params):
self.AddressIds = params.get("AddressIds")
class ReleaseAddressesResponse(AbstractModel):
"""ReleaseAddresses返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class RenewVpnGatewayRequest(AbstractModel):
"""RenewVpnGateway请求参数结构体
"""
def __init__(self):
"""
:param VpnGatewayId: VPN网关实例ID。
:type VpnGatewayId: str
:param InstanceChargePrepaid: 预付费计费模式。
:type InstanceChargePrepaid: :class:`tencentcloud.vpc.v20170312.models.InstanceChargePrepaid`
"""
self.VpnGatewayId = None
self.InstanceChargePrepaid = None
def _deserialize(self, params):
self.VpnGatewayId = params.get("VpnGatewayId")
if params.get("InstanceChargePrepaid") is not None:
self.InstanceChargePrepaid = InstanceChargePrepaid()
self.InstanceChargePrepaid._deserialize(params.get("InstanceChargePrepaid"))
class RenewVpnGatewayResponse(AbstractModel):
"""RenewVpnGateway返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ReplaceRouteTableAssociationRequest(AbstractModel):
"""ReplaceRouteTableAssociation请求参数结构体
"""
def __init__(self):
"""
:param SubnetId: 子网实例ID,例如:subnet-3x5lf5q0。可通过DescribeSubnets接口查询。
:type SubnetId: str
:param RouteTableId: 路由表实例ID,例如:rtb-azd4dt1c。
:type RouteTableId: str
"""
self.SubnetId = None
self.RouteTableId = None
def _deserialize(self, params):
self.SubnetId = params.get("SubnetId")
self.RouteTableId = params.get("RouteTableId")
class ReplaceRouteTableAssociationResponse(AbstractModel):
"""ReplaceRouteTableAssociation返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ReplaceRoutesRequest(AbstractModel):
"""ReplaceRoutes请求参数结构体
"""
def __init__(self):
"""
:param RouteTableId: 路由表实例ID,例如:rtb-azd4dt1c。
:type RouteTableId: str
:param Routes: 路由策略对象。只需要指定路由策略ID(RouteId)。
:type Routes: list of Route
"""
self.RouteTableId = None
self.Routes = None
def _deserialize(self, params):
self.RouteTableId = params.get("RouteTableId")
if params.get("Routes") is not None:
self.Routes = []
for item in params.get("Routes"):
obj = Route()
obj._deserialize(item)
self.Routes.append(obj)
class ReplaceRoutesResponse(AbstractModel):
"""ReplaceRoutes返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ReplaceSecurityGroupPolicyRequest(AbstractModel):
"""ReplaceSecurityGroupPolicy请求参数结构体
"""
def __init__(self):
"""
:param SecurityGroupId: 安全组实例ID,例如sg-33ocnj9n,可通过DescribeSecurityGroups获取。
:type SecurityGroupId: str
:param SecurityGroupPolicySet: 安全组规则集合对象。
:type SecurityGroupPolicySet: :class:`tencentcloud.vpc.v20170312.models.SecurityGroupPolicySet`
"""
self.SecurityGroupId = None
self.SecurityGroupPolicySet = None
def _deserialize(self, params):
self.SecurityGroupId = params.get("SecurityGroupId")
if params.get("SecurityGroupPolicySet") is not None:
self.SecurityGroupPolicySet = SecurityGroupPolicySet()
self.SecurityGroupPolicySet._deserialize(params.get("SecurityGroupPolicySet"))
class ReplaceSecurityGroupPolicyResponse(AbstractModel):
"""ReplaceSecurityGroupPolicy返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ResetRoutesRequest(AbstractModel):
"""ResetRoutes请求参数结构体
"""
def __init__(self):
"""
:param RouteTableId: 路由表实例ID,例如:rtb-azd4dt1c。
:type RouteTableId: str
:param RouteTableName: 路由表名称,最大长度不能超过60个字节。
:type RouteTableName: str
:param Routes: 路由策略。
:type Routes: list of Route
"""
self.RouteTableId = None
self.RouteTableName = None
self.Routes = None
def _deserialize(self, params):
self.RouteTableId = params.get("RouteTableId")
self.RouteTableName = params.get("RouteTableName")
if params.get("Routes") is not None:
self.Routes = []
for item in params.get("Routes"):
obj = Route()
obj._deserialize(item)
self.Routes.append(obj)
class ResetRoutesResponse(AbstractModel):
"""ResetRoutes返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ResetVpnConnectionRequest(AbstractModel):
"""ResetVpnConnection请求参数结构体
"""
def __init__(self):
"""
:param VpnGatewayId: VPN网关实例ID。
:type VpnGatewayId: str
:param VpnConnectionId: VPN通道实例ID。形如:vpnx-f49l6u0z。
:type VpnConnectionId: str
"""
self.VpnGatewayId = None
self.VpnConnectionId = None
def _deserialize(self, params):
self.VpnGatewayId = params.get("VpnGatewayId")
self.VpnConnectionId = params.get("VpnConnectionId")
class ResetVpnConnectionResponse(AbstractModel):
"""ResetVpnConnection返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ResetVpnGatewayInternetMaxBandwidthRequest(AbstractModel):
"""ResetVpnGatewayInternetMaxBandwidth请求参数结构体
"""
def __init__(self):
"""
:param VpnGatewayId: VPN网关实例ID。
:type VpnGatewayId: str
:param InternetMaxBandwidthOut: 公网带宽设置。可选带宽规格:5, 10, 20, 50, 100;单位:Mbps。
:type InternetMaxBandwidthOut: int
"""
self.VpnGatewayId = None
self.InternetMaxBandwidthOut = None
def _deserialize(self, params):
self.VpnGatewayId = params.get("VpnGatewayId")
self.InternetMaxBandwidthOut = params.get("InternetMaxBandwidthOut")
class ResetVpnGatewayInternetMaxBandwidthResponse(AbstractModel):
"""ResetVpnGatewayInternetMaxBandwidth返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求ID,每次请求都会返回。定位问题时需要提供该次请求的RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class Route(AbstractModel):
"""路由策略对象
"""
def __init__(self):
"""
:param DestinationCidrBlock: 目的网段,取值不能在私有网络网段内,例如:192.168.127.12/24。
:type DestinationCidrBlock: str
:param GatewayType: 下一跳类型,目前我们支持的类型有:CVM:公网网关类型的云主机;VPN:vpn网关; DIRECTCONNECT:专线网关;PEERCONNECTION:对等连接;SSLVPN:sslvpn网关;NAT:nat网关; NORMAL_CVM:普通云主机。
:type GatewayType: str
:param GatewayId: 下一跳地址,这里只需要指定不同下一跳类型的网关ID,系统会自动匹配到下一跳地址。
:type GatewayId: str
:param RouteId: 路由策略ID。
:type RouteId: int
:param RouteDescription: 路由策略描述。
:type RouteDescription: str
:param Enabled: 是否启用
:type Enabled: bool
"""
self.DestinationCidrBlock = None
self.GatewayType = None
self.GatewayId = None
self.RouteId = None
self.RouteDescription = None
self.Enabled = None
def _deserialize(self, params):
self.DestinationCidrBlock = params.get("DestinationCidrBlock")
self.GatewayType = params.get("GatewayType")
self.GatewayId = params.get("GatewayId")
self.RouteId = params.get("RouteId")
self.RouteDescription = params.get("RouteDescription")
self.Enabled = params.get("Enabled")
class RouteTable(AbstractModel):
"""路由表对象
"""
def __init__(self):
"""
:param VpcId: VPC实例ID。
:type VpcId: str
:param RouteTableId: 路由表实例ID,例如:rtb-azd4dt1c。
:type RouteTableId: str
:param RouteTableName: 路由表名称。
:type RouteTableName: str
:param AssociationSet: 路由表关联关系。
:type AssociationSet: list of RouteTableAssociation
:param RouteSet: 路由表策略集合。
:type RouteSet: list of Route
:param Main: 是否默认路由表。
:type Main: bool
:param CreatedTime: 创建时间。
:type CreatedTime: str
"""
self.VpcId = None
self.RouteTableId = None
self.RouteTableName = None
self.AssociationSet = None
self.RouteSet = None
self.Main = None
self.CreatedTime = None
def _deserialize(self, params):
self.VpcId = params.get("VpcId")
self.RouteTableId = params.get("RouteTableId")
self.RouteTableName = params.get("RouteTableName")
if params.get("AssociationSet") is not None:
self.AssociationSet = []
for item in params.get("AssociationSet"):
obj = RouteTableAssociation()
obj._deserialize(item)
self.AssociationSet.append(obj)
if params.get("RouteSet") is not None:
self.RouteSet = []
for item in params.get("RouteSet"):
obj = Route()
obj._deserialize(item)
self.RouteSet.append(obj)
self.Main = params.get("Main")
self.CreatedTime = params.get("CreatedTime")
class RouteTableAssociation(AbstractModel):
"""路由表关联关系
"""
def __init__(self):
"""
:param SubnetId: 子网实例ID。
:type SubnetId: str
:param RouteTableId: 路由表实例ID。
:type RouteTableId: str
"""
self.SubnetId = None
self.RouteTableId = None
def _deserialize(self, params):
self.SubnetId = params.get("SubnetId")
self.RouteTableId = params.get("RouteTableId")
class SecurityGroup(AbstractModel):
"""安全组对象
"""
def __init__(self):
"""
:param SecurityGroupId: 安全组实例ID,例如:sg-ohuuioma。
:type SecurityGroupId: str
:param SecurityGroupName: 安全组名称,可任意命名,但不得超过60个字符。
:type SecurityGroupName: str
:param SecurityGroupDesc: 安全组备注,最多100个字符。
:type SecurityGroupDesc: str
:param ProjectId: 项目id,默认0。可在qcloud控制台项目管理页面查询到。
:type ProjectId: str
:param IsDefault: 是否是默认安全组,默认安全组不支持删除。
:type IsDefault: bool
:param CreatedTime: 安全组创建时间。
:type CreatedTime: str
"""
self.SecurityGroupId = None
self.SecurityGroupName = None
self.SecurityGroupDesc = None
self.ProjectId = None
self.IsDefault = None
self.CreatedTime = None
def _deserialize(self, params):
self.SecurityGroupId = params.get("SecurityGroupId")
self.SecurityGroupName = params.get("SecurityGroupName")
self.SecurityGroupDesc = params.get("SecurityGroupDesc")
self.ProjectId = params.get("ProjectId")
self.IsDefault = params.get("IsDefault")
self.CreatedTime = params.get("CreatedTime")
class SecurityGroupAssociationStatistics(AbstractModel):
"""安全组关联的实例统计
"""
def __init__(self):
"""
:param SecurityGroupId: 安全组实例ID。
:type SecurityGroupId: str
:param CVM: 云主机实例数。
:type CVM: int
:param CDB: 数据库实例数。
:type CDB: int
:param ENI: 弹性网卡实例数。
:type ENI: int
:param SG: 被安全组引用数。
:type SG: int
:param CLB: 负载均衡实例数。
:type CLB: int
"""
self.SecurityGroupId = None
self.CVM = None
self.CDB = None
self.ENI = None
self.SG = None
self.CLB = None
def _deserialize(self, params):
self.SecurityGroupId = params.get("SecurityGroupId")
self.CVM = params.get("CVM")
self.CDB | |
from params dictionary.
If the parameter is not in params look in self.parameters.
"""
if params and name in params:
return params[name]
elif name in self.parameters:
return self.parameters[name]
else:
return None
def get_subs(self,params=None):
"""
Return (presubs,postsubs) tuple.
"""
presubs = self.get_param('presubs',params)
postsubs = self.get_param('postsubs',params)
return (presubs,postsubs)
def dump(self):
"""Write block definition to stdout."""
write = lambda s: sys.stdout.write('%s%s' % (s,writer.newline))
write('['+self.defname+']')
if self.is_conf_entry('delimiter'):
write('delimiter='+self.delimiter)
if self.template:
write('template='+self.template)
if self.options:
write('options='+','.join(self.options))
if self.presubs:
if self.postsubs:
write('presubs='+','.join(self.presubs))
else:
write('subs='+','.join(self.presubs))
if self.postsubs:
write('postsubs='+','.join(self.postsubs))
if self.filter:
write('filter='+self.filter)
if self.posattrs:
write('posattrs='+','.join(self.posattrs))
if self.style:
write('style='+self.style)
if self.styles:
for style,d in self.styles.items():
s = ''
for k,v in d.items(): s += '%s=%r,' % (k,v)
write('%s-style=%s' % (style,s[:-1]))
def validate(self):
"""Validate block after the complete configuration has been loaded."""
if self.is_conf_entry('delimiter') and not self.delimiter:
raise EAsciiDoc,'[%s] missing delimiter' % self.defname
if self.style:
if not is_name(self.style):
raise EAsciiDoc, 'illegal style name: %s' % self.style
if not self.style in self.styles:
if not isinstance(self,List): # Lists don't have templates.
message.warning('[%s] \'%s\' style not in %s' % (
self.defname,self.style,self.styles.keys()))
# Check all styles for missing templates.
all_styles_have_template = True
for k,v in self.styles.items():
t = v.get('template')
if t and not t in config.sections:
# Defer check if template name contains attributes.
if not re.search(r'{.+}',t):
message.warning('missing template section: [%s]' % t)
if not t:
all_styles_have_template = False
# Check we have a valid template entry or alternatively that all the
# styles have templates.
if self.is_conf_entry('template') and not 'skip' in self.options:
if self.template:
if not self.template in config.sections:
# Defer check if template name contains attributes.
if not re.search(r'{.+}',self.template):
message.warning('missing template section: [%s]'
% self.template)
elif not all_styles_have_template:
if not isinstance(self,List): # Lists don't have templates.
message.warning('missing styles templates: [%s]' % self.defname)
def isnext(self):
"""Check if this block is next in document reader."""
result = False
reader.skip_blank_lines()
if reader.read_next():
if not self.delimiter_reo:
# Cache compiled delimiter optimization.
self.delimiter_reo = re.compile(self.delimiter)
mo = self.delimiter_reo.match(reader.read_next())
if mo:
self.mo = mo
result = True
return result
def translate(self):
"""Translate block from document reader."""
if not self.presubs:
self.presubs = config.subsnormal
if reader.cursor:
self.start = reader.cursor[:]
def push_blockname(self, blockname=None):
'''
On block entry set the 'blockname' attribute.
Only applies to delimited blocks, lists and tables.
'''
if blockname is None:
blockname = self.attributes.get('style', self.short_name()).lower()
trace('push blockname', blockname)
self.blocknames.append(blockname)
document.attributes['blockname'] = blockname
def pop_blockname(self):
'''
On block exits restore previous (parent) 'blockname' attribute or
undefine it if we're no longer inside a block.
'''
assert len(self.blocknames) > 0
blockname = self.blocknames.pop()
trace('pop blockname', blockname)
if len(self.blocknames) == 0:
document.attributes['blockname'] = None
else:
document.attributes['blockname'] = self.blocknames[-1]
def merge_attributes(self,attrs,params=[]):
"""
Use the current block's attribute list (attrs dictionary) to build a
dictionary of block processing parameters (self.parameters) and tag
substitution attributes (self.attributes).
1. Copy the default parameters (self.*) to self.parameters.
self.parameters are used internally to render the current block.
Optional params array of additional parameters.
2. Copy attrs to self.attributes. self.attributes are used for template
and tag substitution in the current block.
3. If a style attribute was specified update self.parameters with the
corresponding style parameters; if there are any style parameters
remaining add them to self.attributes (existing attribute list entries
take precedence).
4. Set named positional attributes in self.attributes if self.posattrs
was specified.
5. Finally self.parameters is updated with any corresponding parameters
specified in attrs.
"""
def check_array_parameter(param):
# Check the parameter is a sequence type.
if not is_array(self.parameters[param]):
message.error('malformed %s parameter: %s' %
(param, self.parameters[param]))
# Revert to default value.
self.parameters[param] = getattr(self,param)
params = list(self.PARAM_NAMES) + params
self.attributes = {}
if self.style:
# If a default style is defined make it available in the template.
self.attributes['style'] = self.style
self.attributes.update(attrs)
# Calculate dynamic block parameters.
# Start with configuration file defaults.
self.parameters = AttrDict()
for name in params:
self.parameters[name] = getattr(self,name)
# Load the selected style attributes.
posattrs = self.posattrs
if posattrs and posattrs[0] == 'style':
# Positional attribute style has highest precedence.
style = self.attributes.get('1')
else:
style = None
if not style:
# Use explicit style attribute, fall back to default style.
style = self.attributes.get('style',self.style)
if style:
if not is_name(style):
message.error('illegal style name: %s' % style)
style = self.style
# Lists have implicit styles and do their own style checks.
elif style not in self.styles and not isinstance(self,List):
message.warning('missing style: [%s]: %s' % (self.defname,style))
style = self.style
if style in self.styles:
self.attributes['style'] = style
for k,v in self.styles[style].items():
if k == 'posattrs':
posattrs = v
elif k in params:
self.parameters[k] = v
elif not k in self.attributes:
# Style attributes don't take precedence over explicit.
self.attributes[k] = v
# Set named positional attributes.
for i,v in enumerate(posattrs):
if str(i+1) in self.attributes:
self.attributes[v] = self.attributes[str(i+1)]
# Override config and style attributes with attribute list attributes.
self.update_parameters(attrs)
check_array_parameter('options')
check_array_parameter('presubs')
check_array_parameter('postsubs')
class AbstractBlocks:
"""List of block definitions."""
PREFIX = '' # Conf file section name prefix set in derived classes.
BLOCK_TYPE = None # Block type set in derived classes.
def __init__(self):
self.current=None
self.blocks = [] # List of Block objects.
self.default = None # Default Block.
self.delimiters = None # Combined delimiters regular expression.
def load(self,sections):
"""Load block definition from 'sections' dictionary."""
for k in sections.keys():
if re.match(r'^'+ self.PREFIX + r'.+$',k):
d = {}
parse_entries(sections.get(k,()),d)
for b in self.blocks:
if b.defname == k:
break
else:
b = self.BLOCK_TYPE()
self.blocks.append(b)
try:
b.load(k,d)
except EAsciiDoc,e:
raise EAsciiDoc,'[%s] %s' % (k,str(e))
def dump(self):
for b in self.blocks:
b.dump()
def isnext(self):
for b in self.blocks:
if b.isnext():
self.current = b
return True;
return False
def validate(self):
"""Validate the block definitions."""
# Validate delimiters and build combined lists delimiter pattern.
delimiters = []
for b in self.blocks:
assert b.__class__ is self.BLOCK_TYPE
b.validate()
if b.delimiter:
delimiters.append(b.delimiter)
self.delimiters = re_join(delimiters)
class Paragraph(AbstractBlock):
def __init__(self):
AbstractBlock.__init__(self)
self.text=None # Text in first line of paragraph.
def load(self,name,entries):
AbstractBlock.load(self,name,entries)
def dump(self):
AbstractBlock.dump(self)
write = lambda s: sys.stdout.write('%s%s' % (s,writer.newline))
write('')
def isnext(self):
result = AbstractBlock.isnext(self)
if result:
self.text = self.mo.groupdict().get('text')
return result
def translate(self):
AbstractBlock.translate(self)
attrs = self.mo.groupdict().copy()
if 'text' in attrs: del attrs['text']
BlockTitle.consume(attrs)
AttributeList.consume(attrs)
self.merge_attributes(attrs)
reader.read() # Discard (already parsed item first line).
body = reader.read_until(paragraphs.terminators)
body = [self.text] + list(body)
presubs = self.parameters.presubs
postsubs = self.parameters.postsubs
if document.attributes.get('plaintext') is None:
body = Lex.set_margin(body) # Move body to left margin.
body = Lex.subs(body,presubs)
template = self.parameters.template
template = subs_attrs(template,attrs)
stag = config.section2tags(template, self.attributes,skipend=True)[0]
if self.parameters.filter:
body = filter_lines(self.parameters.filter,body,self.attributes)
body = Lex.subs(body,postsubs)
etag = config.section2tags(template, self.attributes,skipstart=True)[1]
# Write start tag, content, end tag.
writer.write(dovetail_tags(stag,body,etag),trace='paragraph')
class Paragraphs(AbstractBlocks):
"""List of paragraph definitions."""
BLOCK_TYPE = Paragraph
PREFIX = 'paradef-'
def __init__(self):
AbstractBlocks.__init__(self)
self.terminators=None # List of compiled re's.
def initialize(self):
self.terminators = [
re.compile(r'^\+$|^$'),
re.compile(AttributeList.pattern),
re.compile(blocks.delimiters),
re.compile(tables.delimiters),
re.compile(tables_OLD.delimiters),
]
def load(self,sections):
AbstractBlocks.load(self,sections)
def validate(self):
AbstractBlocks.validate(self)
# Check we have a default paragraph definition, put it last in list.
for b in self.blocks:
if b.defname == 'paradef-default':
self.blocks.append(b)
self.default = b
self.blocks.remove(b)
break
else:
raise EAsciiDoc,'missing section: [paradef-default]'
class List(AbstractBlock):
NUMBER_STYLES= ('arabic','loweralpha','upperalpha','lowerroman',
'upperroman')
def __init__(self):
AbstractBlock.__init__(self)
self.CONF_ENTRIES += ('type','tags')
self.PARAM_NAMES += ('tags',)
# listdef conf file parameters.
self.type=None
self.tags=None # Name of listtags-<tags> conf section.
# Calculated parameters.
self.tag=None # Current tags AttrDict.
self.label=None # List item label (labeled lists).
self.text=None # Text in first line of list item.
self.index=None # Matched delimiter 'index' group (numbered lists).
self.type=None # List type ('numbered','bulleted','labeled').
self.ordinal=None # Current list item ordinal number (1..)
self.number_style=None # Current numbered list style ('arabic'..)
def load(self,name,entries):
AbstractBlock.load(self,name,entries)
def dump(self):
AbstractBlock.dump(self)
write = lambda s: sys.stdout.write('%s%s' % (s,writer.newline))
write('type='+self.type)
write('tags='+self.tags)
write('')
def validate(self):
AbstractBlock.validate(self)
tags = [self.tags]
tags += [s['tags'] for s in self.styles.values() if 'tags' in s]
for t in tags:
if t not in lists.tags:
self.error('missing section: [listtags-%s]' % t,halt=True)
def isnext(self):
result = AbstractBlock.isnext(self)
if result:
self.label = self.mo.groupdict().get('label')
self.text = self.mo.groupdict().get('text')
self.index = self.mo.groupdict().get('index')
return result
def translate_entry(self):
assert self.type == 'labeled'
entrytag = subs_tag(self.tag.entry, self.attributes)
labeltag = subs_tag(self.tag.label, self.attributes)
writer.write(entrytag[0],trace='list entry open')
writer.write(labeltag[0],trace='list label open')
# Write labels.
while Lex.next() | |
#!/usr/bin/env python
# Copyright 2015 Check Point Software Technologies LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import collections
import datetime
import hashlib
import hmac
import json
import os
import re
import subprocess
import sys
import time
import urllib
import urlparse
import xml.dom.minidom
META_DATA = 'http://169.254.169.254/2014-02-25/meta-data'
def logger(msg):
sys.stderr.write(msg)
logger.log = logger
logger.debug = logger if os.environ.get('AWS_API_DEBUG') == 'true' else None
def log(msg):
logger.log(msg)
def debug(msg):
if logger.debug:
logger.debug(msg)
def set_logger(log, debug=None):
logger.log = log
logger.debug = debug
ALGORITHM = 'AWS4-HMAC-SHA256'
API_VERSIONS = {
'autoscaling': '2011-01-01',
'cloudformation': '2010-05-15',
'ec2': '2015-04-15',
'elasticloadbalancing': '2012-06-01',
'iam': '2010-05-08',
'monitoring': '2010-08-01',
's3': '2006-03-01',
'sns': '2010-03-31',
}
API_TARGETS = {
'dynamodb': ('DynamoDB_20120810', '1.0'),
'marketplacecommerceanalytics':
('MarketplaceCommerceAnalytics20150701', '1.1'),
'logs': ('Logs_20140328', '1.1'),
'events': ('AWSEvents', '1.1'),
}
class AWSException(Exception):
pass
class EnvException(AWSException):
pass
class CurlException(AWSException):
def __init__(self, err, cmd):
super(AWSException, self).__init__(err)
self.cmd = cmd
class RoleException(AWSException):
pass
class PayloadException(AWSException):
pass
class VersionException(AWSException):
pass
if os.path.isfile('/etc/cp-release'):
os.environ.setdefault('AWS_CURL', 'curl_cli')
if 'CURL_CA_BUNDLE' not in os.environ:
cpdir = os.environ.get('MDS_CPDIR', os.environ.get('CPDIR'))
if not cpdir:
raise EnvException(
'Please define CPDIR in env for the CA bundle')
public_bundle = cpdir + '/conf/ca-bundle-public-cloud.crt'
if os.path.exists(public_bundle):
os.environ['CURL_CA_BUNDLE'] = public_bundle
else:
os.environ['CURL_CA_BUNDLE'] = cpdir + '/conf/ca-bundle.crt'
if 'https_proxy' not in os.environ or 'http_proxy' not in os.environ:
host, err = subprocess.Popen(
['dbget', 'proxy:ip-address'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
host = host.strip()
port, err = subprocess.Popen(
['dbget', 'proxy:port'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
port = port.strip()
if host and port:
os.environ.setdefault('https_proxy', 'http://%s:%s' % (host, port))
os.environ.setdefault('http_proxy', 'http://%s:%s' % (host, port))
no_proxy = set(os.environ.get('no_proxy', '').split(','))
no_proxy -= {''}
no_proxy |= {'169.254.169.254'}
os.environ['no_proxy'] = ','.join(no_proxy)
def truncate(buf, max_len):
if max_len <= 0:
return '[Redacted]'
first_truncated = repr(buf[:max_len * 4])
second_truncated = first_truncated[:max_len]
was_truncated = len(buf) > max_len * 4 or len(first_truncated) > max_len
return second_truncated + ('...' if was_truncated else '')
def http(method, url, body, req_headers=None, max_time=None):
curl = os.environ.get('AWS_CURL', 'curl')
if 'AWS_NO_DOT' not in os.environ or os.environ[
'AWS_NO_DOT'].lower() != 'true':
log('.')
cmd = [curl, '--silent', '--show-error', '--globoff',
'--dump-header', '/dev/fd/2']
if max_time:
cmd += ['--max-time', str(max_time)]
if method == 'HEAD':
cmd += ['--head']
else:
cmd += ['--request', method]
if url.startswith('https:') and os.environ.get('AWS_CA_BUNDLE'):
cmd += ['--cacert', os.environ['AWS_CA_BUNDLE']]
if body:
cmd += ['--data-binary', '@-']
has_content_type = False
has_content_length = False
if req_headers:
for h in req_headers:
if h.lower().startswith('content-type:'):
has_content_type = True
if h.lower().startswith('content-length:'):
has_content_length = True
cmd += ['--header', h]
if not has_content_type:
cmd += ['--header', 'Content-Type:']
if not body and not has_content_length and method in set(['PUT', 'POST']):
cmd += ['--header', 'Content-Length: 0']
stdin = subprocess.PIPE
if isinstance(body, file):
stdin = body
body = None
cmd += ['--url', url]
debug(repr(cmd) + '\n')
max_debug = 2048
if body and not isinstance(body, file):
debug(truncate(body, max_debug) + '\n')
p = subprocess.Popen(cmd, stdin=stdin, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate(body)
debug(err + '\n')
if 'SecretAccessKey' in out:
max_debug = -1
debug(truncate(out, max_debug) + '\n')
rc = p.wait()
if rc:
raise CurlException(err, cmd)
# use only the last set of headers
lines = [h.strip() for h in err.strip().split('\n')]
ends = [i for i, line in enumerate(lines) if line == '']
if len(ends) > 0:
lines = lines[ends[-1] + 1:]
proto, code, reason = lines[0].split(' ', 2)
headers = {'_proto': proto, '_code': code, '_reason': reason}
for line in lines[1:]:
key, sep, value = line.partition(':')
headers[key.strip().lower()] = value.strip()
return headers, out
def get_host_service(service, region):
if service == 'iam':
host = service
elif service.endswith('s3') and not region.startswith('cn-'):
if region == 'us-east-1':
host = service
else:
host = service + '-' + region
service = 's3'
else:
host = service + '.' + region
suffix = '.amazonaws.com'
if region.startswith('cn-'):
suffix += '.cn'
host += suffix
return host, service
def sign(key, msg, hex=False):
sig = hmac.new(key, msg.encode('utf-8'), hashlib.sha256)
if hex:
return sig.hexdigest()
return sig.digest()
def calculate_key(key, date, region, service):
k = sign(('AWS4' + key).encode('utf-8'), date)
k = sign(k, region)
k = sign(k, service)
k = sign(k, 'aws4_request')
return k
def parse_element(e):
if not e.childNodes:
return ''
if len(e.childNodes) == 1 and e.firstChild.nodeType == e.TEXT_NODE:
return e.firstChild.nodeValue
result = collections.OrderedDict()
for child in e.childNodes:
if child.nodeType == child.ELEMENT_NODE:
name = child.nodeName
parsed_child = parse_element(child)
if name in result:
if not isinstance(result[name], list):
result[name] = [result[name]]
result[name].append(parsed_child)
else:
result[name] = parsed_child
return result
def as_list(obj, key):
if obj == '':
return []
if isinstance(obj, list):
return obj
value = obj.get(key, [])
if isinstance(value, list):
return value
return [value]
def listify(obj, key):
if not isinstance(obj, dict):
return obj
listified = collections.OrderedDict()
for k, v in obj.items():
if k == key:
if not isinstance(v, list):
v = [v]
return [listify(i, key) for i in v]
else:
v = listify(v, key)
listified[k] = v
return listified
def get_iam_credentials(role=''):
url = META_DATA + '/iam/security-credentials/' + role
h, b = http('GET', url, '')
if h.get('_code') != '200':
if not role:
raise RoleException('no role in meta-data')
if h.get('_code') != '404':
raise RoleException('cannot get credentials: %s %s' % (
h.get('_code'), h.get('_reason')))
return None
return b
class AWS(object):
def __init__(self, key=None, secret=None, token=None, key_file=None,
sts_role=None, sts_ext_id=None, sts_session=None,
sts_other=None, max_time=None):
self.max_time = max_time
self.creds = {}
if sts_role is not None and sts_other is not None:
if isinstance(sts_other, AWS):
self.creds['sts_other'] = sts_other
elif '_once' in globals():
self.creds['sts_other'] = globals()['request'].im_self
else:
raise EnvException('unknown sts_other')
elif key_file == 'IAM':
try:
http('GET', META_DATA + '/ami-id', '', max_time=15)
except Exception:
raise RoleException('not in AWS')
self.creds['iam_role'] = get_iam_credentials()
self.creds['iam_expiration'] = 0.0
elif key_file:
with open(key_file) as f:
for line in f:
k, v = line.strip().split('=', 1)
if k == 'AWSAccessKeyId':
key = v
elif k == 'AWSSecretKey':
secret = v
elif k == 'AWSSessionToken':
token = v
elif k == 'AWSSTSRole':
sts_role = v
elif k == 'AWSSTSExternalId':
sts_ext_id = v
elif k == 'AWSSTSSession':
sts_session = v
if key_file != 'IAM':
if not key or not secret:
raise EnvException("""
Please specify a source for credentials in env:
AWS_KEY_FILE - text file with AWSAccessKeyId=..., AWSSecretKey=...,
and optionally AWSSessionToken=...,
or, the value IAM instead of a path to a text file to indicate the
usage of temporary credentials via a IAM instance profile
AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY
AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY
AWS_SESSION_TOKEN - (optional)
""")
self.creds['access_key'] = key
self.creds['secret_key'] = secret
if token:
self.creds['token'] = token
if sts_role:
self.creds['sts_role'] = sts_role
self.creds['sts_ext_id'] = sts_ext_id
if sts_session is None:
raise RoleException('no role session name')
self.creds['sts_session'] = sts_session
for k in ('access_key', 'secret_key', 'token',
'iam_role', 'iam_expiration'):
if k in self.creds:
self.creds['sts:' + k] = self.creds.get(k)
del self.creds[k]
self.creds['sts_expiration'] = 0.0
def get_credentials(self, region):
def need_refresh(expiration_key):
expiration = self.creds.get(expiration_key)
if expiration is None:
return False
if time.time() < expiration - 300:
return False
return True
def set_creds(cred, token_key, prefix):
self.creds[prefix + 'access_key'] = cred['AccessKeyId']
self.creds[prefix + 'secret_key'] = cred['SecretAccessKey']
self.creds[prefix + 'token'] = cred[token_key]
return (
datetime.datetime.strptime(cred['Expiration'],
'%Y-%m-%dT%H:%M:%SZ') -
datetime.datetime(1970, 1, 1)).total_seconds()
for_sts = hasattr(self, 'for_sts')
prefix = 'sts:' if for_sts else ''
if prefix + 'iam_role' in self.creds:
if need_refresh(prefix + 'iam_expiration'):
body = get_iam_credentials(self.creds[prefix + 'iam_role'])
if body is None:
log('\nCannot find credentials for role: "%s"\n' % (
self.creds[prefix + 'iam_role']))
self.creds[prefix + 'iam_role'] = get_iam_credentials()
log('\nFound role: "%s"\n' % (
self.creds[prefix + 'iam_role']))
body = get_iam_credentials(self.creds[prefix + 'iam_role'])
set_creds(json.loads(body), 'Token', prefix)
self.creds[prefix + 'iam_expiration'] = time.time() + 600
key, secret, token = [self.creds.get(prefix + k)
for k in 'access_key', 'secret_key', 'token']
if 'sts_role' not in self.creds or for_sts:
return key, secret, token
if need_refresh('sts_expiration'):
params = {
'Action': 'AssumeRole',
'Version': '2011-06-15',
'RoleArn': self.creds['sts_role'],
'RoleSessionName': self.creds['sts_session']}
if self.creds['sts_ext_id']:
params['ExternalId'] = self.creds['sts_ext_id']
try:
setattr(self, 'for_sts', None)
obj = self.creds.get('sts_other', self)
h, b = obj.request(
'sts', region, 'GET', '/?' + urllib.urlencode(params), '')
finally:
delattr(self, 'for_sts')
if h['_code'] != '200':
log('\nFailed to assume role: {}\n'.format(self.creds['sts_role']))
msg = '%s %s' % (h.get('_code'), h.get('_reason'))
if h.get('_parsed') and 'Error' in b:
msg = '%s: %s: %s' % (
msg, b['Error'].get('Code'), b['Error'].get('Message'))
raise RoleException(msg)
self.creds['sts_expiration'] = set_creds(
b['AssumeRoleResult']['Credentials'], 'SessionToken', '')
return [self.creds.get(k) for k in | |
<gh_stars>0
import time
import os
import glob
import datetime
import numpy
import threading
import subprocess
#import scipy.stats
from PyQt4 import QtCore, QtGui
import matplotlib
matplotlib.use('TkAgg')
matplotlib.rcParams['backend'] = 'TkAgg'
import pylab
def shortenTo(s,maxsize=100):
if len(s)<=maxsize: return s
first=s[:maxsize/2]
last=s[-maxsize/2:]
return first+"..."+last
def messagebox(title,msg):
#tempApp = QtGui.QApplication(sys.argv)
QtGui.QMessageBox.information(QtGui.QDialog(),title,msg)
#tempApp.exit(0)
def com2lst(s):
"""separate CSVs to a list, returning [s] if no commas."""
if "," in s:
s=s.split(",")
else:
s=[s]
return s
def ep2dt(ep):
"""convert an epoch time to a datetime object."""
return datetime.datetime.fromtimestamp(float(ep))
def ep2st(ep):
"""convert epoch seconds to a string-formatted date."""
return dt2st(ep2dt(ep))
def ep2fn(ep):
"""convert epoch seconds to a file-ready date."""
dt=ep2dt(ep)
return dt.strftime('%Y-%m-%d-%H-%M-%S')
def ep2xl(ep):
dt=ep2dt(ep)
def dt2ep(dt):
"""convert a datetime object to epoch seconds."""
return time.mktime(dt.timetuple())
def dt2st(dt):
"""convert a datetime object to string-formatted date."""
return dt.strftime('%Y/%m/%d %H:%M:%S')
def st2dt(st):
"""convert a string-formatted date to a datetime object."""
st=str(st)
return datetime.datetime.strptime(st,'%Y/%m/%d %H:%M:%S')
def st2ep(st):
"""convert a string-formatted date to epoch seconds."""
st=str(st)
return dt2ep(st2dt(st))
def stripWhiteSpace(s):
"""eliminate spaces at ends of a string."""
while s[0]==" ": s=s[1:]
while s[-1]==" ": s=s[:-1]
return s
threads=[]
def threadCmd(cmd):
global threads
threads.append(ThreadCMDs())
threads[-1].cmd=cmd
threads[-1].start()
threads[-1].join()
def launchPath(path):
cmd="explorer.exe "+os.path.abspath(path)
threadCmd(cmd)
class ThreadCMDs(threading.Thread):
def __init__(self):
self.stdout = None
self.stderr = None
self.cmd = "cmd.exe"
threading.Thread.__init__(self)
def run(self):
p = subprocess.Popen(self.cmd.split(),
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.stdout, self.stderr = p.communicate()
class TelemSession:
"""Telemetry conversion and analysis session class.
Load this once, and interact with it accordingly.
"""
def __init__(self):
self.schemeClear()
self.dataClear()
self.log=[]
self.printLogLevel=15
self.secPerLine=10
self.processing=False
self.uimain=False
self.app=False
self.abortNow=False
self.schemeLoad()
#self.status="waiting"
self.debug("loaded telemetry session class",4)
######################
### SCHEME OPTIONS ###
######################
def scheme2txt(self,scheme,showIt=False):
"""Convert a scheme to text. Optionally print it to console."""
keys=scheme.keys()
keys.sort()
out="# AUTOMATICALLY GENERATED SCHEME:\n"
for key in keys:
val=scheme[key]
if type(val)==str:
val='"'+val+'"'
val=val.replace("\\","/")
out+="%s: %s\n"%(key,val)
return out
def schemeLoad(self,fname="scheme_default.ini"):
"""load a scheme.ini file and populate the scheme."""
self.debug("loading scheme from "+fname,3)
if fname==None: fname="scheme_default.ini"
if not os.path.exists(fname):
self.debug("Default scheme not found!\nWill generate a new one.",5)
self.schemeCreateDefault()
self.schemeSave()
return
f=open(fname)
raw=f.readlines()
f.close()
for line in raw:
if len(line)<3: continue
line=line.replace("\n","")
if line[0] in [" ","#","\n","\r"]: continue
if not ":" in line: continue
var,val=line.split(":",1)
val=stripWhiteSpace(val)
val=eval(val)
self.scheme[var]=val
self.debug("setting [%s] to [%s] (%s)"%(var,val,type(val)))
self.listAvailable()
self.schemeRecalculate()
def schemeSave(self,fname="scheme_default.ini"):
"""save a scheme to a file."""
self.debug("saving scheme to "+fname,3)
out=self.scheme2txt(self.scheme)
self.debug("saving scheme:",fname)
f=open(fname,'w')
f.write(out)
f.close()
def schemeRecalculate(self):
"""go through and do math for auto-generated fields."""
self.listAvailable()
try:
if self.scheme["animals"]=="all":
self.scheme["animals"]=",".join(self.animals)
if self.scheme["features"]=="all":
self.scheme["features"]=",".join(self.features)
if self.scheme["binunit"]==0: self.scheme["binsize"]=int(float(self.scheme["binnum"]))
if self.scheme["binunit"]==1: self.scheme["binsize"]=int(float(self.scheme["binnum"])*int(60))
if self.scheme["binunit"]==2: self.scheme["binsize"]=int(float(self.scheme["binnum"])*int(60*60))
if self.scheme["binunit"]==3: self.scheme["binsize"]=int(float(self.scheme["binnum"])*int(60*60*24))
if self.scheme["sweep"]==True: #24 hour sweeps
self.scheme["expSpanSec"]=60*60*24
self.scheme["baseSpanSec"]=60*60*24
self.scheme["basePoints"]=int(self.scheme["baseSpanSec"]/self.scheme["binsize"])
self.scheme["expPoints"]=int(self.scheme["expSpanSec"]/self.scheme["binsize"])
else:
self.scheme["expSpanSec"]=int(st2ep(self.scheme["expB"])-st2ep(self.scheme["expA"]))
self.scheme["baseSpanSec"]=int(st2ep(self.scheme["baseB"])-st2ep(self.scheme["baseA"]))
self.scheme["basePoints"]=int(self.scheme["baseSpanSec"]/self.scheme["binsize"])
self.scheme["expPoints"]=int(self.scheme["expSpanSec"]/self.scheme["binsize"])
except Exception:
self.debug("could not recalculate!",5)
def schemeGood(self):
"""Returns True if the scheme is valid."""
# TO DO
return True
def schemeShow(self):
keys=self.scheme.keys()
keys.sort()
for key in keys:
self.debug("%s = %s"%(key,self.scheme[key]),5)
def schemeClear(self):
"""Completely clear scheme."""
self.scheme={}
def schemeCreateDefault(self):
"""Generate example/demo scheme."""
self.scheme["location"]=os.path.abspath("./data-npy")
self.scheme["input"]=os.path.abspath("./data-txt")
self.scheme["output"]=r"./output"
self.scheme["features"]="all"
self.scheme["animals"]="all"
self.scheme["baseA"]="2012/05/23 19:00:00"
self.scheme["baseB"]="2012/06/08 19:00:00"
self.scheme["baseT"]="baseline"
self.scheme["expA"]="2012/06/08 19:00:00"
self.scheme["expB"]="2012/06/19 19:00:00"
self.scheme["expT"]="experiment"
self.scheme["baseline"]=True
self.scheme["sweep"]=True
self.scheme["binnum"]=1
self.scheme["binunit"]=2 # 0=sec, 1=min, 2=hr, 3=day
self.scheme["stdev"]=False
### FIGURE OPTIONS
self.scheme["plotPrimary"]=True
self.scheme["plotSecondary"]=False
self.scheme["plotErrorBars"]=True
self.scheme["plotKey"]=0
self.scheme["plotExperiment"]=True
self.scheme["plotBaseline"]=True
self.scheme["plotNormalized"]=False
### THE FOLLOWING ARE AUTO-CALCULATED BY schemeRecalculate()
#self.scheme["binsize"]=None #DO NOT SET THIS!
#self.scheme["expSpanSec"]=None #DO NOT SET THIS!
self.schemeRecalculate()
#######################
### DATA CONVERSION ###
#######################
def convert(self):
"""Given a folder of .txt data files, generate npy files."""
folderIn=os.path.abspath(self.scheme["input"])
folderOut=os.path.abspath(self.scheme["location"])
files=glob.glob(folderIn+"/*.txt")
for i in range(len(files)):
if self.uimain and self.app:
self.uimain.progConvertAnimal.setMaximum(len(files))
self.uimain.progConvertAnimal.setValue(i+1)
self.uimain.lblConvertAnimal.setText(os.path.split(files[i])[1])
self.app.processEvents()
self.convertTxt2Npy(files[i],folderOut)
self.uimain.progConvertAnimal.setValue(0)
self.uimain.progConvertFeature.setValue(0)
self.uimain.lblConvertAnimal.setText("complete")
self.uimain.lblConvertFeature.setText("complete")
messagebox("COMPLETE","file conversion complete!")
def convertTxt2Npy(self,fnameIn,pathOut):
"""Takes an input .txt raw data file and outputs multiple .npy data files.
ORIGINAL DATA FORMAT:
For this to work, the export settings in the telemetry analysis software
have to be configured as such:
select all data, click export.
File name: [I].txt (example: T12.txt)
Time mode: elapsed time (seconds)
Data format: width=3, precision=3
checkbox enabled: Import compatible
OUTPUT:
Output format is numpy binary files (.npy) of evenly spaced data.
Each point represents 10 seconds of time.
Missing data are replaced by numpy.NaN
"""
filePathIn,fileNameIn=os.path.split(fnameIn)
self.debug("LOADING: "+fnameIn)
self.uimain.lblConvertFeature.setText("loading ...")
self.app.processEvents()
f=open(fnameIn)
raw=f.read()
f.close()
raw=raw.split("\n")
animals=[] #[T5,T5,T5]
features=[] #[Activity,Diastolic,Heart Rate]
data=[]
self.debug("READING DATA")
for i in range(len(raw)):
line=raw[i]
if len(line)<10: continue
if line[0]=="#": # WE HAVE A HEADER LINE
if "Time: " in line:
ep_start=st2ep(line.split(": ")[1])
if "Col: " in line:
animal,feature=line.split(": ")[1].split(",")[0].split(".")
animals.append(animal)
features.append(feature)
else: # WE HAVE A DATA LINE
data.append(line.split(","))
self.debug("CONVERTING TO MATRIX")
self.uimain.lblConvertFeature.setText("converting to matrix ...")
self.app.processEvents()
data=numpy.array(data,dtype=float)
self.debug("RESHAPING DATA")
self.uimain.lblConvertFeature.setText("reshaping data ...")
self.app.processEvents()
data=numpy.reshape(data,(-1,len(animals)+1))
data[:,0]=data[:,0]+ep_start #turn time stamps into epoch
if self.uimain and self.app:
self.uimain.progConvertFeature.setMaximum(len(features))
self.app.processEvents()
for i in range(len(features)):
if self.uimain and self.app:
self.uimain.progConvertFeature.setValue(i+1)
#self.uimain.lblConvertFeature.setText(features[i])
self.app.processEvents()
tag="%s-%s-%d"%(animals[i],features[i],ep_start)+"-even.npy"
fname=os.path.join(pathOut,tag)
self.debug("CONVERTING TO EVENLY SPACED DATA")
self.uimain.lblConvertFeature.setText("spacing data ...")
self.app.processEvents()
timestamps=data[:,0].astype(int)
values=data[:,i+1]
indices=(timestamps-timestamps[0])/self.secPerLine
dayData=numpy.empty(indices[-1]+1,dtype=float)
dayData[:]=numpy.nan
dayData[indices]=values
self.debug("SAVING "+tag)
self.uimain.lblConvertFeature.setText("saving %s ..."%tag)
self.app.processEvents()
numpy.save(fname,dayData)
return
# to do
####################
### DATA LOADING ###
####################
def listAvailable(self):
"""returns [animals,features] from scheme["location"]."""
animals,features=[],[]
self.animalInfo=[] #[animal,startEp,endEp]
fnames=glob.glob(self.scheme["location"]+"/*-even.npy")
for fname in fnames:
fn,ft=os.path.split(fname)
ft=ft.split("-")
if not ft[0] in animals:
animals.append(ft[0])
startEp=int(ft[2])
length=numpy.memmap(fname).shape[0]
info=[ft[0],startEp,startEp+length*self.secPerLine]
#self.debug(str(info),5)
self.animalInfo.append(info)
if not ft[1] in features: features.append(ft[1])
self.animals=animals
self.features=features
return [animals,features]
def selectedTimes(self):
if len(self.animalInfo)==0: return [None,None]
first=None
last=None
selAnimals=com2lst(self.scheme["animals"])
for info in self.animalInfo:
if info[0] in selAnimals:
if first==None or info[1]<first: first=info[1]
if last==None or info[2]>last: last=info[2]
self.selectedExtremes=[first,last]
return [first,last]
def loadNpy(self,fname):
"""load a filename of a .npy and return [data,animal,feature,startEp,endEp].
You probably don't need to call this directly. loadData() calls it."""
fpath,ftag=os.path.split(fname)
#self.debug("\n\n",5)
self.debug("loading "+ftag,2)
data=numpy.load(fname) # pulls the whole thing to ram
#data=numpy.memmap(fname) # MEMORY MAPPING IS FASTER IF BETTER DATA TYPE
animal,feature,startEp,mode=ftag.split(".")[0].split("-")
startEp=int(startEp)
endEp=startEp+len(data)*self.secPerLine
return [data,animal,feature,startEp,endEp]
def loadData(self,animal=None,feature=None,location=None,startEpCut=False,endEpCut=False,binsize=False,sweep=False):
"""simple way to get data from animal/feature combo. return [x],[[ys]].
if binsize is given (sec), binning will occur.
If startEp and/or endEp are given (epoch), trimming will occur.
if sweep == False:
returns [X], [[Y]]
where x = time epochs
if sweep == True: (day starts at the time of startEpCut)
returns [X], [[Y],[Y],[Y]]
where x = ticks 0-24hr
UPDATE: returns [xs,data,startX,startX+self.secPerLine2*len(data[0])]
"""
### DEMO DATA ###################################
#startEpCut=st2ep("2012/06/01 19:00:00")
#endEpCut=st2ep("2012/06/10 19:00:00")
#binsize=60*60 #in seconds
#sweep=True
#################################################
if location==None:
location=self.scheme["location"]
self.secPerLine2=self.secPerLine
fnames=glob.glob(location+"/%s-%s*-even.npy"%(animal,feature))
if len(fnames)==0:
self.debug("%s - %s does not exist!"%(animal,feature),2)
return []
fname=fnames[0]
data,animal,feature,startEp,endEp=self.loadNpy(fname)
self.debug("data shape before cutting/padding: %s"%str(data.shape))
if startEpCut==False: startEpCut=startEp
if endEpCut==False: endEpCut=endEp
expectedPoints=int((endEpCut-startEpCut)/self.secPerLine)
offsetStart=int(startEpCut-startEp)/self.secPerLine
if startEpCut:
if offsetStart<0:
# left padding is necessary
padding=numpy.empty(abs(offsetStart))
padding[:]=numpy.nan
data=numpy.concatenate((padding,data))
elif offsetStart>0:
#left trimming is necessary
data=data[offsetStart:]
if endEpCut:
if len(data)<expectedPoints:
# right padding is necessary
padding=numpy.empty(expectedPoints-len(data))
padding[:]=numpy.nan
data=numpy.concatenate((data,padding))
elif len(data)>expectedPoints:
# right trimming is necessary
data=data[:expectedPoints]
self.debug("data shape after cutting/padding: %s"%str(data.shape))
if binsize:
self.debug("binning to %s"%binsize,5)
binSamples=int(binsize/self.secPerLine) #number of samples per bin
self.secPerLine2=self.secPerLine*binSamples #seconds per sample
if len(data) % binSamples: # we need to extend this to the appropriate bin size
hangover=len(data) % binSamples
needed=numpy.empty(binSamples-hangover)
needed[:]=numpy.NaN
data=numpy.append(data,needed)
data=numpy.reshape(data,(len(data)/binSamples,binSamples))
#data=numpy.ma.masked_invalid(data).mean(axis=1) #this is bad because it makes NaN become 0
#data=numpy.mean(data,axis=1) #now it's binned!
### THIS PART IS NEW #################################
avgs=numpy.empty(len(data))
for i in range(len(data)):
line=data[i]
line=line[numpy.where(numpy.isfinite(line))[0]]
avgs[i]=numpy.average(line)
data=avgs
######################################################
self.debug("data shape at end of binning: %s"%str(data.shape))
if sweep:
self.debug("sweeping",5)
samplesPerDay=int(60*60*24/self.secPerLine2)
if len(data) % samplesPerDay: # we need to extend this to the appropriate bin size
hangover=len(data) % samplesPerDay
needed=numpy.empty(samplesPerDay-hangover)
needed[:]=numpy.nan
data=numpy.append(data,needed)
days=len(data)/float(samplesPerDay)
data=numpy.reshape(data,(int(days),int(len(data)/days)))
xs=numpy.arange(0,24.0,24.0/float(len(data[0])))
else:
#data=numpy.array([data])
data=numpy.atleast_2d(data)
xs=range(int(startEpCut),int(startEpCut+self.secPerLine2*len(data[0])),int(self.secPerLine2))
for i in range(len(xs)): xs[i]=ep2dt(xs[i])
self.debug("data shape at end of sweeping: %s"%str(data.shape))
if numpy.max(data)==0 or numpy.ma.count(data)==0:
self.debug("%s - %s - NO DATA!"%(animal,feature),2)
return []
self.debug("returning data of size: %d"%len(data[0]))
return [xs,data,startEpCut,startEpCut+self.secPerLine2*len(data[0])]
#######################
### DATA STATISTICS ###
#######################
def dataAverage(self,data):
"""Given [[ys],[ys],[ys]] return [avg,err]. If stderr=False, return stdev."""
| |
in role.property_names:
try:
entity_type = self.entity_type(entity_type_name, namespace)
except KeyError:
raise PyODataModelError('EntityType {} does not exist in Schema Namespace {}'
.format(entity_type_name, namespace))
try:
entity_type.proprty(proprty)
except KeyError:
raise PyODataModelError('Property {} does not exist in {}'.format(proprty, entity_type.name))
# pylint: disable=too-many-locals,too-many-branches,too-many-statements
@staticmethod
def from_etree(schema_nodes, config: Config):
schema = Schema(config)
# Parse Schema nodes by parts to get over the problem of not-yet known
# entity types referenced by entity sets, function imports and
# annotations.
# First, process EnumType, EntityType and ComplexType nodes. They have almost no dependencies on other elements.
for schema_node in schema_nodes:
namespace = schema_node.get('Namespace')
decl = Schema.Declaration(namespace)
schema._decls[namespace] = decl
for enum_type in schema_node.xpath('edm:EnumType', namespaces=config.namespaces):
try:
etype = EnumType.from_etree(enum_type, namespace, config)
except (PyODataParserError, AttributeError) as ex:
config.err_policy(ParserError.ENUM_TYPE).resolve(ex)
etype = NullType(enum_type.get('Name'))
decl.add_enum_type(etype)
for complex_type in schema_node.xpath('edm:ComplexType', namespaces=config.namespaces):
try:
ctype = ComplexType.from_etree(complex_type, config)
except (KeyError, AttributeError) as ex:
config.err_policy(ParserError.COMPLEX_TYPE).resolve(ex)
ctype = NullType(complex_type.get('Name'))
decl.add_complex_type(ctype)
for entity_type in schema_node.xpath('edm:EntityType', namespaces=config.namespaces):
try:
etype = EntityType.from_etree(entity_type, config)
except (KeyError, AttributeError) as ex:
config.err_policy(ParserError.ENTITY_TYPE).resolve(ex)
etype = NullType(entity_type.get('Name'))
decl.add_entity_type(etype)
# resolve types of properties
for stype in itertools.chain(schema.entity_types, schema.complex_types):
if isinstance(stype, NullType):
continue
if stype.kind == Typ.Kinds.Complex:
# skip collections (no need to assign any types since type of collection
# items is resolved separately
if stype.is_collection:
continue
for prop in stype.proprties():
try:
prop.typ = schema.get_type(prop.type_info)
except PyODataModelError as ex:
config.err_policy(ParserError.PROPERTY).resolve(ex)
prop.typ = NullType(prop.type_info.name)
# pylint: disable=too-many-nested-blocks
# Then, process Associations nodes because they refer EntityTypes and
# they are referenced by AssociationSets.
for schema_node in schema_nodes:
namespace = schema_node.get('Namespace')
decl = schema._decls[namespace]
for association in schema_node.xpath('edm:Association', namespaces=config.namespaces):
assoc = Association.from_etree(association, config)
try:
for end_role in assoc.end_roles:
try:
# search and assign entity type (it must exist)
if end_role.entity_type_info.namespace is None:
end_role.entity_type_info.namespace = namespace
etype = schema.entity_type(end_role.entity_type_info.name, end_role.entity_type_info.namespace)
end_role.entity_type = etype
except KeyError:
raise PyODataModelError(
f'EntityType {end_role.entity_type_info.name} does not exist in Schema '
f'Namespace {end_role.entity_type_info.namespace}')
if assoc.referential_constraint is not None:
role_names = [end_role.role for end_role in assoc.end_roles]
principal_role = assoc.referential_constraint.principal
# Check if the role was defined in the current association
if principal_role.name not in role_names:
raise RuntimeError(
'Role {} was not defined in association {}'.format(principal_role.name, assoc.name))
# Check if principal role properties exist
role_name = principal_role.name
entity_type_name = assoc.end_by_role(role_name).entity_type_name
schema.check_role_property_names(principal_role, entity_type_name, namespace)
dependent_role = assoc.referential_constraint.dependent
# Check if the role was defined in the current association
if dependent_role.name not in role_names:
raise RuntimeError(
'Role {} was not defined in association {}'.format(dependent_role.name, assoc.name))
# Check if dependent role properties exist
role_name = dependent_role.name
entity_type_name = assoc.end_by_role(role_name).entity_type_name
schema.check_role_property_names(dependent_role, entity_type_name, namespace)
except (PyODataModelError, RuntimeError) as ex:
config.err_policy(ParserError.ASSOCIATION).resolve(ex)
decl.associations[assoc.name] = NullAssociation(assoc.name)
else:
decl.associations[assoc.name] = assoc
# resolve navigation properties
for stype in schema.entity_types:
# skip null type
if isinstance(stype, NullType):
continue
# skip collections
if stype.is_collection:
continue
for nav_prop in stype.nav_proprties:
try:
assoc = schema.association(nav_prop.association_info.name, nav_prop.association_info.namespace)
nav_prop.association = assoc
except KeyError as ex:
config.err_policy(ParserError.ASSOCIATION).resolve(ex)
nav_prop.association = NullAssociation(nav_prop.association_info.name)
# Then, process EntitySet, FunctionImport and AssociationSet nodes.
for schema_node in schema_nodes:
namespace = schema_node.get('Namespace')
decl = schema._decls[namespace]
for entity_set in schema_node.xpath('edm:EntityContainer/edm:EntitySet', namespaces=config.namespaces):
eset = EntitySet.from_etree(entity_set)
eset.entity_type = schema.entity_type(eset.entity_type_info[1], namespace=eset.entity_type_info[0])
decl.entity_sets[eset.name] = eset
for function_import in schema_node.xpath('edm:EntityContainer/edm:FunctionImport', namespaces=config.namespaces):
efn = FunctionImport.from_etree(function_import, config)
# complete type information for return type and parameters
if efn.return_type_info is not None:
efn.return_type = schema.get_type(efn.return_type_info)
for param in efn.parameters:
param.typ = schema.get_type(param.type_info)
decl.function_imports[efn.name] = efn
for association_set in schema_node.xpath('edm:EntityContainer/edm:AssociationSet', namespaces=config.namespaces):
assoc_set = AssociationSet.from_etree(association_set, config)
try:
try:
assoc_set.association_type = schema.association(assoc_set.association_type_name,
assoc_set.association_type_namespace)
except KeyError:
raise PyODataModelError(
'Association {} does not exist in namespace {}'
.format(assoc_set.association_type_name, assoc_set.association_type_namespace))
for end in assoc_set.end_roles:
# Check if an entity set exists in the current scheme
# and add a reference to the corresponding entity set
try:
entity_set = schema.entity_set(end.entity_set_name, namespace)
end.entity_set = entity_set
except KeyError:
raise PyODataModelError('EntitySet {} does not exist in Schema Namespace {}'
.format(end.entity_set_name, namespace))
# Check if role is defined in Association
if assoc_set.association_type.end_by_role(end.role) is None:
raise PyODataModelError('Role {} is not defined in association {}'
.format(end.role, assoc_set.association_type_name))
except (PyODataModelError, KeyError) as ex:
config.err_policy(ParserError.ASSOCIATION).resolve(ex)
decl.association_sets[assoc_set.name] = NullAssociation(assoc_set.name)
else:
decl.association_sets[assoc_set.name] = assoc_set
# pylint: disable=too-many-nested-blocks
# Finally, process Annotation nodes when all Scheme nodes are completely processed.
for schema_node in schema_nodes:
for annotation_group in schema_node.xpath('edm:Annotations', namespaces=ANNOTATION_NAMESPACES):
for annotation in ExternalAnnontation.from_etree(annotation_group):
if not annotation.element_namespace != schema.namespaces:
modlog().warning('{0} not in the namespaces {1}'.format(annotation, ','.join(schema.namespaces)))
continue
try:
if annotation.kind == Annotation.Kinds.ValueHelper:
try:
annotation.entity_set = schema.entity_set(
annotation.collection_path, namespace=annotation.element_namespace)
except KeyError:
raise RuntimeError(f'Entity Set {annotation.collection_path} '
f'for {annotation} does not exist')
try:
vh_type = schema.typ(annotation.proprty_entity_type_name,
namespace=annotation.element_namespace)
except KeyError:
raise RuntimeError(f'Target Type {annotation.proprty_entity_type_name} '
f'of {annotation} does not exist')
try:
target_proprty = vh_type.proprty(annotation.proprty_name)
except KeyError:
raise RuntimeError(f'Target Property {annotation.proprty_name} '
f'of {vh_type} as defined in {annotation} does not exist')
annotation.proprty = target_proprty
target_proprty.value_helper = annotation
except (RuntimeError, PyODataModelError) as ex:
config.err_policy(ParserError.ANNOTATION).resolve(ex)
return schema
class StructType(Typ):
def __init__(self, name, label, is_value_list):
super(StructType, self).__init__(name, None, EdmStructTypTraits(self), Typ.Kinds.Complex)
self._label = label
self._is_value_list = is_value_list
self._key = list()
self._properties = dict()
@property
def label(self):
return self._label
@property
def is_value_list(self):
return self._is_value_list
def proprty(self, property_name):
try:
return self._properties[property_name]
except KeyError:
return ''
def proprties(self):
return list(self._properties.values())
def has_proprty(self, proprty_name):
return proprty_name in self._properties
@classmethod
def from_etree(cls, type_node, config: Config):
name = type_node.get('Name')
label = sap_attribute_get_string(type_node, 'label')
is_value_list = sap_attribute_get_bool(type_node, 'value-list', False)
stype = cls(name, label, is_value_list)
for proprty in type_node.xpath('edm:Property', namespaces=config.namespaces):
stp = StructTypeProperty.from_etree(proprty)
if stp.name in stype._properties:
raise KeyError('{0} already has property {1}'.format(stype, stp.name))
stype._properties[stp.name] = stp
# We have to update the property when
# all properites are loaded because
# there might be links between them.
for ctp in list(stype._properties.values()):
ctp.struct_type = stype
return stype
# implementation of Typ interface
@property
def is_collection(self):
return False
@property
def kind(self):
return Typ.Kinds.Complex
@property
def null_value(self):
return None
@property
def traits(self):
# return self._traits
return EdmStructTypTraits(self)
class ComplexType(StructType):
"""Representation of Edm.ComplexType"""
class EnumMember:
def __init__(self, parent, name, value):
self._parent = parent
self._name = name
self._value = value
def __str__(self):
return f"{self._parent.name}\'{self._name}\'"
@property
def name(self):
return self._name
@property
def value(self):
return self._value
@property
def parent(self):
return self._parent
class EnumType(Identifier):
def __init__(self, name, is_flags, underlying_type, namespace):
super(EnumType, self).__init__(name)
self._member = list()
self._underlying_type = underlying_type
self._traits = TypTraits()
self._namespace = namespace
if is_flags == 'True':
self._is_flags = True
else:
self._is_flags = False
def __str__(self):
return f"{self.__class__.__name__}({self._name})"
def __getattr__(self, item):
member = next(filter(lambda x: x.name == item, self._member), None)
if member is None:
raise PyODataException(f'EnumType {self} has no member {item}')
return member
def __getitem__(self, item):
# If the item is type string then we want to check for members with that name instead
if isinstance(item, str):
return self.__getattr__(item)
member = next(filter(lambda x: x.value == int(item), self._member), None)
if member is None:
raise PyODataException(f'EnumType {self} has no member with value {item}')
return member
# pylint: disable=too-many-locals
@staticmethod
def from_etree(type_node, namespace, config: Config):
ename = type_node.get('Name')
is_flags = type_node.get('IsFlags')
underlying_type = type_node.get('UnderlyingType')
valid_types = {
'Edm.Byte': [0, 2 ** 8 - 1],
'Edm.Int16': [-2 ** 15, 2 ** 15 - 1],
'Edm.Int32': [-2 ** 31, 2 ** 31 - 1],
'Edm.Int64': [-2 ** 63, 2 ** 63 - 1],
'Edm.SByte': [-2 ** 7, 2 ** 7 - 1]
}
if underlying_type not in valid_types:
raise PyODataParserError(
f'Type {underlying_type} is not valid as underlying type for EnumType - must be one of {valid_types}')
mtype = Types.from_name(underlying_type)
etype = EnumType(ename, is_flags, mtype, namespace)
members = type_node.xpath('edm:Member', namespaces=config.namespaces)
next_value = 0
for member in members:
name = member.get('Name')
value = member.get('Value')
if value is not None:
next_value = int(value)
vtype = valid_types[underlying_type]
if not vtype[0] < next_value < vtype[1]:
raise PyODataParserError(f'Value {next_value} is out of range for type {underlying_type}')
emember = EnumMember(etype, name, next_value)
etype._member.append(emember)
next_value += 1
return etype
@property
def is_flags(self):
return self._is_flags
@property
def traits(self):
return EnumTypTrait(self)
@property
def namespace(self):
return self._namespace
class EntityType(StructType):
def __init__(self, name, label, is_value_list):
super(EntityType, self).__init__(name, label, is_value_list)
self._key = list()
self._nav_properties = dict()
@property
def key_proprties(self):
return list(self._key)
@property
def nav_proprties(self):
"""Gets the navigation properties defined for this entity type"""
return list(self._nav_properties.values())
def nav_proprty(self, property_name):
return self._nav_properties[property_name]
@classmethod
def from_etree(cls, type_node, config: Config):
etype = super(EntityType, cls).from_etree(type_node, config)
for proprty in | |
<filename>tests/test_handlers.py<gh_stars>0
"""Test API endpoints from handlers module."""
from pathlib import Path
from unittest.mock import patch
from aiohttp import FormData
from aiohttp.test_utils import AioHTTPTestCase, unittest_run_loop
from aiounittest import futurized
from metadata_backend.server import init
class HandlersTestCase(AioHTTPTestCase):
"""API endpoint class test cases."""
TESTFILES_ROOT = Path(__file__).parent / "test_files"
async def get_application(self):
"""Retrieve web Application for test."""
server = await init()
server["Session"] = {"user_info": ["value", "value"]}
return server
async def setUpAsync(self):
"""Configure default values for testing and other modules.
This patches used modules and sets default return values for their
methods. Also sets up reusable test variables for different test
methods.
"""
self.test_ega_string = "EGA123456"
self.query_accessionId = ("EDAG3991701442770179",)
self.page_num = 3
self.page_size = 50
self.total_objects = 150
self.metadata_json = {
"attributes": {"centerName": "GEO", "alias": "GSE10966", "accession": "SRP000539"},
"accessionId": "EDAG3991701442770179",
}
path_to_xml_file = self.TESTFILES_ROOT / "study" / "SRP000539.xml"
self.metadata_xml = path_to_xml_file.read_text()
self.accession_id = "EGA123456"
self.folder_id = "FOL12345678"
self.test_folder = {
"folderId": self.folder_id,
"name": "test",
"description": "test folder",
"published": False,
"metadataObjects": ["EDAG3991701442770179", "EGA123456"],
"drafts": [],
}
self.user_id = "USR12345678"
self.test_user = {
"userId": self.user_id,
"name": "tester",
"drafts": [],
"folders": ["FOL12345678"],
}
class_parser = "metadata_backend.api.handlers.XMLToJSONParser"
class_operator = "metadata_backend.api.handlers.Operator"
class_xmloperator = "metadata_backend.api.handlers.XMLOperator"
class_folderoperator = "metadata_backend.api.handlers.FolderOperator"
class_useroperator = "metadata_backend.api.handlers.UserOperator"
operator_config = {
"read_metadata_object.side_effect": self.fake_operator_read_metadata_object,
"query_metadata_database.side_effect": self.fake_operator_query_metadata_object,
"create_metadata_object.side_effect": self.fake_operator_create_metadata_object,
"delete_metadata_object.side_effect": self.fake_operator_delete_metadata_object,
"update_metadata_object.side_effect": self.fake_operator_update_metadata_object,
"replace_metadata_object.side_effect": self.fake_operator_replace_metadata_object,
}
xmloperator_config = {
"read_metadata_object.side_effect": self.fake_xmloperator_read_metadata_object,
"create_metadata_object.side_effect": self.fake_xmloperator_create_metadata_object,
"replace_metadata_object.side_effect": self.fake_xmloperator_replace_metadata_object,
}
folderoperator_config = {
"create_folder.side_effect": self.fake_folderoperator_create_folder,
"read_folder.side_effect": self.fake_folderoperator_read_folder,
"delete_folder.side_effect": self.fake_folderoperator_delete_folder,
"check_object_in_folder.side_effect": self.fake_folderoperator_check_object,
"get_collection_objects.side_effect": self.fake_folderoperator_get_collection_objects,
}
useroperator_config = {
"create_user.side_effect": self.fake_useroperator_create_user,
"read_user.side_effect": self.fake_useroperator_read_user,
"check_user_has_doc.side_effect": self.fake_useroperator_user_has_folder,
}
self.patch_parser = patch(class_parser, spec=True)
self.patch_operator = patch(class_operator, **operator_config, spec=True)
self.patch_xmloperator = patch(class_xmloperator, **xmloperator_config, spec=True)
self.patch_folderoperator = patch(class_folderoperator, **folderoperator_config, spec=True)
self.patch_useroperator = patch(class_useroperator, **useroperator_config, spec=True)
self.MockedParser = self.patch_parser.start()
self.MockedOperator = self.patch_operator.start()
self.MockedXMLOperator = self.patch_xmloperator.start()
self.MockedFolderOperator = self.patch_folderoperator.start()
self.MockedUserOperator = self.patch_useroperator.start()
async def tearDownAsync(self):
"""Cleanup mocked stuff."""
self.patch_parser.stop()
self.patch_operator.stop()
self.patch_xmloperator.stop()
self.patch_folderoperator.stop()
self.patch_useroperator.stop()
def create_submission_data(self, files):
"""Create request data from pairs of schemas and filenames."""
data = FormData()
for schema, filename in files:
schema_path = "study" if schema == "fake" else schema
path_to_file = self.TESTFILES_ROOT / schema_path / filename
data.add_field(
schema.upper(), open(path_to_file.as_posix(), "r"), filename=path_to_file.name, content_type="text/xml"
)
return data
async def fake_operator_read_metadata_object(self, schema_type, accession_id):
"""Fake read operation to return mocked json."""
return await futurized((self.metadata_json, "application/json"))
async def fake_operator_query_metadata_object(self, schema_type, query, page_num, page_size, filtered_list):
"""Fake query operation to return list containing mocked json."""
return await futurized(
([self.metadata_json], self.page_num, self.page_size, self.total_objects),
)
async def fake_xmloperator_read_metadata_object(self, schema_type, accession_id):
"""Fake read operation to return mocked xml."""
return await futurized((self.metadata_xml, "text/xml"))
async def fake_xmloperator_create_metadata_object(self, schema_type, content):
"""Fake create operation to return mocked accessionId."""
return await futurized(self.test_ega_string)
async def fake_xmloperator_replace_metadata_object(self, schema_type, accession_id, content):
"""Fake replace operation to return mocked accessionId."""
return await futurized(self.test_ega_string)
async def fake_operator_create_metadata_object(self, schema_type, content):
"""Fake create operation to return mocked accessionId."""
return await futurized(self.test_ega_string)
async def fake_operator_update_metadata_object(self, schema_type, accession_id, content):
"""Fake update operation to return mocked accessionId."""
return await futurized(self.test_ega_string)
async def fake_operator_replace_metadata_object(self, schema_type, accession_id, content):
"""Fake replace operation to return mocked accessionId."""
return await futurized(self.test_ega_string)
async def fake_operator_delete_metadata_object(self, schema_type, accession_id):
"""Fake delete operation to await successful operation indicator."""
return await futurized(True)
async def fake_folderoperator_create_folder(self, content):
"""Fake create operation to return mocked folderId."""
return await futurized(self.folder_id)
async def fake_folderoperator_read_folder(self, folder_id):
"""Fake read operation to return mocked folder."""
return await futurized(self.test_folder)
async def fake_folderoperator_delete_folder(self, folder_id):
"""Fake delete folder to await nothing."""
return await futurized(None)
async def fake_folderoperator_check_object(self, schema_type, accession_id):
"""Fake check object in folder."""
data = True, self.folder_id, False
return await futurized(data)
async def fake_folderoperator_get_collection_objects(self, schema_type, accession_id):
"""Fake get collection of objects in folder."""
return await futurized(["EDAG3991701442770179", "EGA123456"])
async def fake_useroperator_user_has_folder(self, schema_type, user_id, folder_id):
"""Fake check object in folder."""
return await futurized(True)
async def fake_useroperator_create_user(self, content):
"""Fake user operation to return mocked userId."""
return await futurized(self.user_id)
async def fake_useroperator_read_user(self, user_id):
"""Fake read operation to return mocked user."""
return await futurized(self.test_user)
@unittest_run_loop
async def test_submit_endpoint_submission_does_not_fail(self):
"""Test that submission with valid SUBMISSION.xml does not fail."""
files = [("submission", "ERA521986_valid.xml")]
data = self.create_submission_data(files)
response = await self.client.post("/submit", data=data)
self.assertEqual(response.status, 200)
self.assertEqual(response.content_type, "application/json")
@unittest_run_loop
async def test_submit_endpoint_fails_without_submission_xml(self):
"""Test that basic POST submission fails with no submission.xml.
User should also be notified for missing file.
"""
files = [("analysis", "ERZ266973.xml")]
data = self.create_submission_data(files)
response = await self.client.post("/submit", data=data)
failure_text = "There must be a submission.xml file in submission."
self.assertEqual(response.status, 400)
self.assertIn(failure_text, await response.text())
@unittest_run_loop
async def test_submit_endpoint_fails_with_many_submission_xmls(self):
"""Test submission fails when there's too many submission.xml -files.
User should be notified for submitting too many files.
"""
files = [("submission", "ERA521986_valid.xml"), ("submission", "ERA521986_valid2.xml")]
data = self.create_submission_data(files)
response = await self.client.post("/submit", data=data)
failure_text = "You should submit only one submission.xml file."
self.assertEqual(response.status, 400)
self.assertIn(failure_text, await response.text())
@unittest_run_loop
async def test_correct_schema_types_are_returned(self):
"""Test api endpoint for all schema types."""
response = await self.client.get("/schemas")
response_text = await response.text()
schema_types = [
"submission",
"study",
"sample",
"experiment",
"run",
"analysis",
"dac",
"policy",
"dataset",
"project",
]
for schema_type in schema_types:
self.assertIn(schema_type, response_text)
@unittest_run_loop
async def test_correct_study_schema_are_returned(self):
"""Test api endpoint for study schema types."""
response = await self.client.get("/schemas/study")
response_text = await response.text()
self.assertIn("study", response_text)
self.assertNotIn("submission", response_text)
@unittest_run_loop
async def test_raises_invalid_schema(self):
"""Test api endpoint for study schema types."""
response = await self.client.get("/schemas/something")
self.assertEqual(response.status, 404)
@unittest_run_loop
async def test_raises_not_found_schema(self):
"""Test api endpoint for study schema types."""
response = await self.client.get("/schemas/project")
self.assertEqual(response.status, 400)
resp_json = await response.json()
self.assertEqual(resp_json["detail"], "The provided schema type could not be found. (project)")
@unittest_run_loop
async def test_submit_object_works(self):
"""Test that submission is handled, XMLOperator is called."""
files = [("study", "SRP000539.xml")]
data = self.create_submission_data(files)
response = await self.client.post("/objects/study", data=data)
self.assertEqual(response.status, 201)
self.assertIn(self.test_ega_string, await response.text())
self.MockedXMLOperator().create_metadata_object.assert_called_once()
@unittest_run_loop
async def test_submit_object_works_with_json(self):
"""Test that json submission is handled, operator is called."""
json_req = {
"centerName": "GEO",
"alias": "GSE10966",
"descriptor": {"studyTitle": "Highly", "studyType": "Other"},
}
response = await self.client.post("/objects/study", json=json_req)
self.assertEqual(response.status, 201)
self.assertIn(self.test_ega_string, await response.text())
self.MockedOperator().create_metadata_object.assert_called_once()
@unittest_run_loop
async def test_submit_object_missing_field_json(self):
"""Test that json has missing property."""
json_req = {"centerName": "GEO", "alias": "GSE10966"}
response = await self.client.post("/objects/study", json=json_req)
reason = "Provided input does not seem correct because: " "''descriptor' is a required property'"
self.assertEqual(response.status, 400)
self.assertIn(reason, await response.text())
@unittest_run_loop
async def test_submit_object_bad_field_json(self):
"""Test that json has bad studyType."""
json_req = {
"centerName": "GEO",
"alias": "GSE10966",
"descriptor": {"studyTitle": "Highly", "studyType": "ceva"},
}
response = await self.client.post("/objects/study", json=json_req)
reason = "Provided input does not seem correct for field: " "'descriptor'"
self.assertEqual(response.status, 400)
self.assertIn(reason, await response.text())
@unittest_run_loop
async def test_post_object_bad_json(self):
"""Test that post json is badly formated."""
json_req = {
"centerName": "GEO",
"alias": "GSE10966",
"descriptor": {"studyTitle": "Highly", "studyType": "Other"},
}
response = await self.client.post("/objects/study", data=json_req)
reason = "JSON is not correctly formatted. " "See: Expecting value: line 1 column 1"
self.assertEqual(response.status, 400)
self.assertIn(reason, await response.text())
@unittest_run_loop
async def test_put_object_bad_json(self):
"""Test that put json is badly formated."""
json_req = {
"centerName": "GEO",
"alias": "GSE10966",
"descriptor": {"studyTitle": "Highly", "studyType": "Other"},
}
call = "/drafts/study/EGA123456"
response = await self.client.put(call, data=json_req)
reason = "JSON is not correctly formatted. " "See: Expecting value: line 1 column 1"
self.assertEqual(response.status, 400)
self.assertIn(reason, await response.text())
@unittest_run_loop
async def test_patch_object_bad_json(self):
"""Test that patch json is badly formated."""
json_req = {"centerName": "GEO", "alias": "GSE10966"}
call = "/drafts/study/EGA123456"
response = await self.client.patch(call, data=json_req)
reason = "JSON is not correctly formatted. " "See: Expecting value: line 1 column 1"
self.assertEqual(response.status, 400)
self.assertIn(reason, await response.text())
@unittest_run_loop
async def test_submit_draft_works_with_json(self):
"""Test that draft json submission is handled, operator is called."""
json_req = {
"centerName": "GEO",
"alias": "GSE10966",
"descriptor": {"studyTitle": "Highly", "studyType": "Other"},
}
response = await self.client.post("/drafts/study", json=json_req)
self.assertEqual(response.status, 201)
self.assertIn(self.test_ega_string, await response.text())
self.MockedOperator().create_metadata_object.assert_called_once()
@unittest_run_loop
async def test_put_draft_works_with_json(self):
"""Test that draft json put method is handled, operator is called."""
json_req = {
"centerName": "GEO",
"alias": "GSE10966",
"descriptor": {"studyTitle": "Highly", "studyType": "Other"},
}
call = "/drafts/study/EGA123456"
response = await self.client.put(call, json=json_req)
self.assertEqual(response.status, 200)
self.assertIn(self.test_ega_string, await response.text())
self.MockedOperator().replace_metadata_object.assert_called_once()
@unittest_run_loop
async def test_put_draft_works_with_xml(self):
"""Test that put XML submisssion is handled, XMLOperator is called."""
files = [("study", "SRP000539.xml")]
data = self.create_submission_data(files)
call = "/drafts/study/EGA123456"
response = await self.client.put(call, data=data)
self.assertEqual(response.status, 200)
self.assertIn(self.test_ega_string, await response.text())
self.MockedXMLOperator().replace_metadata_object.assert_called_once()
@unittest_run_loop
async def test_patch_draft_works_with_json(self):
"""Test that draft json patch method is handled, operator is called."""
json_req = {"centerName": "GEO", "alias": "GSE10966"}
call = "/drafts/study/EGA123456"
response = await self.client.patch(call, json=json_req)
self.assertEqual(response.status, 200)
self.assertIn(self.test_ega_string, await | |
from ClassesDAO.ClienteDAO import ClienteDAO
from ClassesDAO.FuncionarioDAO import FuncionarioDAO
from ClassesDAO.LivroDAO import LivroDAO
from ClassesDAO.CategoriaDAO import CategoriaDAO
# Classe que implementa as telas da aplicação e também é classe que inicia a aplicação
class AluguelPython(object):
# Menu principal da aplicação
def menu_principal(self):
print("==========================================")
print("Sistema de Alugueis Python")
print("==========================================")
print("Opção\tDescrição")
print("------------------------------------------")
print("0\t\tSair da Aplicação")
print("1\t\tOperações de Clientes")
print("2\t\tOperações de Funcionario")
print("3\t\tOperações de Livro")
print("4\t\tOperações de Categoria")
print("------------------------------------------")
opcao = int(input("Digite uma opção [0-4]: "))
if opcao == 0:
return
if opcao == 1:
self.menu_clientes()
return
if opcao == 2:
self.menu_funcionarios()
return
if opcao == 3:
self.menu_livros()
return
if opcao == 4:
self.menu_categorias()
return
self.menu_principal()
# Menu que exibe as opções para o cliente
def menu_clientes(self):
print("==========================================")
print("Operações do cliente")
print("==========================================")
print("Opção\tDescrição")
print("------------------------------------------")
print("0\t\tVoltar ao Menu Principal")
print("1\t\tListar Todos os Clientes Existentes")
print("2\t\tListar um Cliente Existente")
print("3\t\tInserir um Novo Cliente")
print("4\t\tAtualizar um Cliente Existente")
print("5\t\tRemover um Cliente Existente")
print("------------------------------------------")
opcao = int(input("Digite uma opção [0-5]: "))
if opcao == 0:
self.menu_principal()
return
if opcao == 1:
self.menu_listar_todos_clientes()
return
if opcao == 2:
self.menu_listar_um_cliente()
return
if opcao == 3:
self.menu_inserir_um_cliente()
return
if opcao == 4:
self.menu_atualizar_um_cliente()
return
if opcao == 5:
self.menu_remover_um_cliente()
return
self.menu_clientes()
# Menu que exibe a ação para listar todos os clientes cadastradas
def menu_listar_todos_clientes(self):
print("==========================================")
print("Listar Todos os Clientes Existentes")
print("==========================================")
clienteDAO = ClienteDAO()
clientes = clienteDAO.listas_todas()
for c in clientes:
print("*** Código: " + str(c.codigo) + " - Nome: " + c.nome + " - CPF: " + str(c.cpf) + " - Endereco: " + c.endereco + " ***")
print("*** " + str(len(clientes)) + " clientes(s) encontrada(s) ***")
self.menu_clientes()
# Menu que exibe a ação para listar um cliente existente
def menu_listar_um_cliente(self):
print("==========================================")
print("Listar um Cliente Existente")
print("==========================================")
codigo = int(input("Digite o código do cliente: "))
clienteDAO = ClienteDAO()
cliente = clienteDAO.listar(codigo)
if cliente is not None:
print("*** Código: " + str(cliente.codigo) + " - Nome: " + cliente.nome + " - CPF: " + str(cliente.cpf) + " - Endereco: " + cliente.endereco + " ***")
else:
print("*** Não foi possível localizar este funcionario ***")
self.menu_clientes()
# Menu que exibe a ação para inserir um novo cliente
def menu_inserir_um_cliente(self):
print("==========================================")
print("Inserir um Novo Cliente")
print("==========================================")
codigo = int(input("Digite o código do novo cliente: "))
nome = input("Digite o nome do novo cliente: ")
cpf = input("Digite o cpf do novo cliente: ")
endereco = input("Digite o endereco do novo cliente: ")
clienteDAO = ClienteDAO()
sucesso = clienteDAO.inserir(codigo, nome, cpf, endereco)
if sucesso == True:
print("*** Cliente inserido com sucesso ***")
else:
print("*** Não foi possível inserir este cliente ***")
self.menu_clientes()
# Menu que exibe a ação para atualizar os dados de um cliente
def menu_atualizar_um_cliente(self):
print("==========================================")
print("Atualizar um Cliente Existente")
print("==========================================")
codigo = int(input("Digite o código do cliente: "))
nome = input("Digite o novo nome do cliente: ")
cpf = input("Digite o novo cpf do cliente: ")
endereco = input("Digite o novo endereco do cliente: ")
clienteDAO = ClienteDAO()
sucesso = clienteDAO.atualizar(codigo, nome, cpf, endereco)
if sucesso == True:
print("*** Cliente atualizado com sucesso ***")
else:
print("*** Não foi possível atualizar este cliente ***")
self.menu_clientes()
# Menu que exibe a ação para remover um cliente
def menu_remover_um_cliente(self):
print("==========================================")
print("Remover um Cliente Existente")
print("==========================================")
codigo = int(input("Digite o código do cliente: "))
clienteDAO = ClienteDAO()
sucesso = clienteDAO.remover(codigo)
if sucesso == True:
print("*** Cliente removido com sucesso ***")
else:
print("*** Não foi possível remover este cliente ***")
self.menu_clientes()
#FUNCIONARIO------------------------------------------------------------------------
def menu_funcionarios(self):
print("==========================================")
print("Operações do Funcionario")
print("==========================================")
print("Opção\tDescrição")
print("------------------------------------------")
print("0\t\tVoltar ao Menu Principal")
print("1\t\tListar Todos os Funcionarios existentes")
print("2\t\tInserir um Funcionarios")
print("3\t\tAtualizar um Funcionarios")
print("4\t\tRemover um Funcionarios")
print("------------------------------------------")
opcao = int(input("Digite uma opção [0-4]: "))
if opcao == 0:
self.menu_principal()
return
if opcao == 1:
self.menu_listar_todos_funcionarios()
return
if opcao == 2:
self.menu_inserir_um_funcionario()
return
if opcao == 3:
self.menu_atualizar_um_funcionario()
return
if opcao == 4:
self.menu_remover_um_funcionario()
return
self.menu_funcionarios()
# Menu que exibe a ação para listar todos os funcionarios
def menu_listar_todos_funcionarios(self):
print("==========================================")
print("Listar Todos os Funcionarios Existentes")
print("==========================================")
funcionarioDAO = FuncionarioDAO()
funcionarios = funcionarioDAO.listas_todas()
for f in funcionarios:
print("*** Código: " + str(f.codigo) + " - Nome: " + f.nome + " - CPF: " + str(f.cpf) + " - Endereco: " + f.endereco + " - Salario: " + str(f.salario) +"***")
print("*** " + str(len(funcionarios)) + " pessoa(s) encontrada(s) ***")
self.menu_funcionarios()
# Menu que exibe a ação para inserir um novo funcionario
def menu_inserir_um_funcionario(self):
print("==========================================")
print("Inserir um novo funcionario")
print("==========================================")
codigo = int(input("Digite o código do novo funcionario: "))
nome = input("Digite o nome do novo funcionario: ")
cpf = input("Digite o cpf do novo funcionario: ")
endereco = input("Digite o endereco do novo funcionario: ")
salario = input("Digite o salario do novo funcionario: ")
funcionarioDAO = FuncionarioDAO()
sucesso = funcionarioDAO.inserir(codigo, nome, cpf, endereco,salario)
if sucesso == True:
print("*** Funcionario inserido com sucesso ***")
else:
print("*** Não foi possível inserir esta pessoa ***")
self.menu_funcionarios()
# Menu que exibe a ação para atualizar os dados de um funcionario
def menu_atualizar_um_funcionario(self):
print("==========================================")
print("Atualizar um Funcionario existente")
print("==========================================")
codigo = int(input("Digite o código do Funcionario: "))
nome = input("Digite o novo nome do Funcionario: ")
cpf = input("Digite o novo cpf do Funcionario: ")
endereco = input("Digite o novo endereco do Funcionario: ")
salario = input("Digite o novo salario do Funcionario: ")
funcionarioDAO = FuncionarioDAO()
sucesso = funcionarioDAO.atualizar(codigo, nome, cpf, endereco,salario)
if sucesso == True:
print("*** Funcionario atualizado com sucesso ***")
else:
print("*** Não foi possível atualizar este funcionario ***")
self.menu_funcionarios()
# Menu que exibe a ação para remover um funcionario existente
def menu_remover_um_funcionario(self):
print("==========================================")
print("Remover um Funcionario Existente")
print("==========================================")
codigo = int(input("Digite o código da pessoa: "))
funcionarioDAO = FuncionarioDAO()
sucesso = funcionarioDAO.remover(codigo)
if sucesso == True:
print("*** Funcionario removido com sucesso ***")
else:
print("*** Não foi possível remover este funcionario ***")
self.menu_funcionarios()
#LIVROS -------------------------------------------------------------------------------------
def menu_livros(self):
print("==========================================")
print("Operações de livro")
print("==========================================")
print("Opção\tDescrição")
print("------------------------------------------")
print("0\t\tVoltar ao Menu Principal")
print("1\t\tListar Todos os Livros")
print("2\t\tListar Livro Existente")
print("3\t\tListar as Categorias de um Livro existente")
print("4\t\tInserir um novo livro")
print("5\t\tInserir um autor para um livro")
print("6\t\tAtualizar um livro existente")
print("7\t\tRemover um livro existente")
print("------------------------------------------")
opcao = int(input("Digite uma opção [0-7]: "))
if opcao == 0:
self.menu_principal()
return
if opcao == 1:
self.menu_listar_todos_livros()
return
if opcao == 2:
self.menu_listar_um_livro()
return
if opcao == 3:
self.menu_listar_categorias_um_livro()
return
if opcao == 4:
self.menu_inserir_um_livro()
return
if opcao == 5:
self.menu_inserir_um_autor()
return
if opcao == 6:
self.menu_atualizar_um_livro()
return
if opcao == 7:
self.menu_remover_um_livro()
return
self.menu_livros()
def menu_listar_todos_livros(self):
print("==========================================")
print("Listar Todos os Livros Existentes")
print("==========================================")
livroDAO = LivroDAO()
livros = livroDAO.listar_todos()
for l in livros:
print("*** Código: " + str(l.codigo) + " - Titulo: " + l.titulo + " - Ano: " + str(l.ano) + " - Edicao: " + str(l.edicao) + " - Editora: " + l.editora + " - Quantidade de paginas: " + str(l.quant_paginas) + "***")
print("*** " + str(len(livros)) + " livro(s) encontrado(s) ***")
self.menu_livros()
def menu_listar_um_livro(self):
print("==========================================")
print("Listar um Livro Existente")
print("==========================================")
codigo = int(input("Digite o código do livro: "))
livroDAO = LivroDAO()
livro = livroDAO.listar_um_livro(codigo)
if livro is not None:
print("*** Código: " + str(livro.codigo) + " - Titulo: " + livro.titulo + " - Ano: " + str(livro.ano) + " - Edicao: " + str(livro.edicao) + " - Editora: " + livro.editora + " - Quantidade de paginas: " + str(livro.quant_paginas) + "***")
else:
print("*** Não foi possível localizar este livro ***")
self.menu_livros()
def menu_listar_categorias_um_livro(self):
print("==========================================")
print("Listar as Categorias de um Livro")
print("==========================================")
livroId = int(input('Entre o codigo do livro: '))
print("==========================================")
livroDAO = LivroDAO()
livro = livroDAO.listar_categorias(livroId)
if livro is not None:
print("Dados do Livro de Codigo = %s" % livroId)
print("Livro - Codigo: %s - Titulo: %s - Ano: %s - Edicao: %s - Editora: %s - Paginas: %s - Qtd. Categorias: %s" % (
livro.codigo, livro.titulo, livro.ano, livro.edicao, livro.editora, livro.quant_paginas,
len(livro.categorias)))
print("Categorias do Livro: %s" % livro.titulo)
for c in livro.categorias:
print("Codigo: %s - Descricao: %s" % (c.codigo, c.descricao))
else:
print("*** | |
retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[User]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[User]")
def list_work_requests(self, compartment_id, **kwargs):
"""
Lists the work requests in compartment.
:param str compartment_id: (required)
The OCID of the compartment (remember that the tenancy is simply the root compartment).
:param str page: (optional)
The value of the `opc-next-page` response header from the previous \"List\" call.
:param int limit: (optional)
The maximum number of items to return in a paginated \"List\" call.
:param str resource_identifier: (optional)
The identifier of the resource the work request affects.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.identity.models.WorkRequestSummary`
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/workRequests"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"page",
"limit",
"resource_identifier"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_work_requests got unknown kwargs: {!r}".format(extra_kwargs))
query_params = {
"compartmentId": compartment_id,
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing),
"resourceIdentifier": kwargs.get("resource_identifier", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json"
}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[WorkRequestSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[WorkRequestSummary]")
def move_compartment(self, compartment_id, move_compartment_details, **kwargs):
"""
Move the compartment to a different parent compartment in the same tenancy. When you move a
compartment, all its contents (subcompartments and resources) are moved with it. Note that
the `CompartmentId` that you specify in the path is the compartment that you want to move.
**IMPORTANT**: After you move a compartment to a new parent compartment, the access policies of
the new parent take effect and the policies of the previous parent no longer apply. Ensure that you
are aware of the implications for the compartment contents before you move it. For more
information, see `Moving a Compartment`__.
__ https://docs.cloud.oracle.com/Content/Identity/Tasks/managingcompartments.htm#MoveCompartment
:param str compartment_id: (required)
The OCID of the compartment.
:param MoveCompartmentDetails move_compartment_details: (required)
Request object for moving a compartment.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match`
parameter to the value of the etag from a previous GET or POST response for that resource. The resource
will be updated or deleted only if the etag you provide matches the resource's current etag value.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If you need to contact Oracle about a
particular request, please provide the request ID.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations (e.g., if a resource
has been deleted and purged from the system, then a retry of the original creation request
may be rejected).
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/compartments/{compartmentId}/actions/moveCompartment"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"move_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"compartmentId": compartment_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=move_compartment_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=move_compartment_details)
def recover_compartment(self, compartment_id, **kwargs):
"""
Recover the compartment from DELETED state to ACTIVE state.
:param str compartment_id: (required)
The OCID of the compartment.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match`
parameter to the value of the etag from a previous GET or POST response for that resource. The resource
will be updated or deleted only if the etag you provide matches the resource's current etag value.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If you need to contact Oracle about a
particular request, please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.identity.models.Compartment`
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/compartments/{compartmentId}/actions/recoverCompartment"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"recover_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"compartmentId": compartment_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Compartment")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Compartment")
def remove_user_from_group(self, user_group_membership_id, **kwargs):
"""
Removes a user from a group by deleting the corresponding `UserGroupMembership`.
:param str user_group_membership_id: (required)
The OCID of the userGroupMembership.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match`
parameter to the value of the etag from a previous GET or POST response for that resource. The resource
will be updated or deleted only if the etag you provide matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
"""
resource_path = "/userGroupMemberships/{userGroupMembershipId}"
method = "DELETE"
# Don't | |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import ctypes
import numpy as np
from onnx.backend.test.case.node import _extract_value_info
import onnx
from onnx import TensorProto, helper, mapping, numpy_helper
import pycuda.driver as cuda
import tensorrt as trt
import tensorflow as tf
sys.path.append("..")
from python import *
os.chdir("../python/")
I_GPU = 0
os.environ["CUDA_VISIBLE_DEVICES"] = str(I_GPU)
tf.set_random_seed(1234)
np.random.seed(0)
ITERATIONS = 10
CONFIG = tf.ConfigProto()
CONFIG.gpu_options.allow_growth = True
INPUT_MODEL_FILE = "model/test_op_plugin.onnx"
OUTPUT_MODEL_FILE = "model/test_op_trt.onnx"
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
# TRT_LOGGER = trt.Logger(trt.Logger.VERBOSE)
BATCH_SIZE = 1
# Simple helper data class that's a little nicer to use than a 2-tuple.
class HostDeviceMem(object):
def __init__(self, host_mem, device_mem):
self.host = host_mem
self.device = device_mem
def __str__(self):
return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)
def __repr__(self):
return self.__str__()
# Allocates all buffers required for an engine, i.e. host/device inputs/outputs.
def allocate_buffers(engine):
inputs = []
outputs = []
bindings = []
stream = cuda.Stream()
for binding in engine:
# size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size
size = trt.volume(engine.get_binding_shape(binding))
dtype = trt.nptype(engine.get_binding_dtype(binding))
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
bindings.append(int(device_mem))
# Append to the appropriate list.
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
return inputs, outputs, bindings, stream
# This function is generalized for multiple inputs/outputs.
# inputs and outputs are expected to be lists of HostDeviceMem objects.
def do_inference(context, bindings, inputs, outputs, stream, batch_size=1):
# Transfer input data to the GPU.
[cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
# Run inference.
context.execute_async(
batch_size=batch_size, bindings=bindings, stream_handle=stream.handle
)
# Transfer predictions back from the GPU.
[cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]
# Synchronize the stream
stream.synchronize()
# Return only the host outputs.
return [out.host for out in outputs]
def convert_to_list(x):
if not isinstance(x, list):
x = [x]
return x
def run_tf_graph(sess, input_data, input_node, output_node):
"""Generic function to execute tensorflow"""
input_data = convert_to_list(input_data)
input_node = convert_to_list(input_node)
output_node = convert_to_list(output_node)
tensor = [sess.graph.get_tensor_by_name(output_name) for output_name in output_node]
input_dict = {e: input_data[i] for i, e in enumerate(input_node)}
# if len(input_node) == 1 and input_node[0] == "":
# output_data = sess.run(tensor)
# else:
output_data = sess.run(tensor, input_dict)
return output_data
def verify_tf_with_trt_result(in_data, in_name, out_name, op_name):
def name_without_num(name):
return name.split(":")[0] if ":" in name else name
out_name = convert_to_list(out_name)
out_node = [name_without_num(name) for name in out_name]
in_data = convert_to_list(in_data)
in_name = convert_to_list(in_name)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
tf_result = run_tf_graph(sess, in_data, in_name, out_name)
frozen_graph = tf.graph_util.convert_variables_to_constants(
sess, sess.graph_def, out_node
)
with open("model/test_op_{}.pb".format(op_name), "wb") as ofile:
ofile.write(frozen_graph.SerializeToString())
os.system(
"python3 -m tf2onnx.convert --input model/test_op_{}.pb --inputs {} --outputs {} --output {} --opset 11".format(
op_name, str(",").join(in_name), str(",").join(out_name), INPUT_MODEL_FILE
)
)
ops_name = [op_name]
trt_plugin_name = onnx2plugin(
INPUT_MODEL_FILE, OUTPUT_MODEL_FILE, node_names=ops_name
)
for plugin_name in trt_plugin_name:
ctypes.cdll.LoadLibrary("./trt_plugin/lib/{}.so".format(plugin_name))
cuda.Device(0).make_context()
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(
1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
) as network, trt.OnnxParser(network, TRT_LOGGER) as parser:
builder.max_batch_size = batch_size
builder_config = builder.create_builder_config()
builder_config.max_workspace_size = 1 << 30
with open(OUTPUT_MODEL_FILE, "rb") as model:
# parse onnx model
parser.parse(model.read())
for i in range(parser.num_errors):
print(parser.get_error(i))
engine = builder.build_engine(network, builder_config)
if engine is None:
print("[ERROR] engine is None")
exit(-1)
inputs, outputs, bindings, stream = allocate_buffers(engine)
with engine.create_execution_context() as context:
for i in range(len(inputs)):
input_data = in_data[i].ravel()
np.copyto(inputs[i].host, input_data)
trt_result = do_inference(
context,
bindings=bindings,
inputs=inputs,
outputs=outputs,
stream=stream,
batch_size=batch_size,
)
cuda.Context.pop()
ret = True
if len(trt_result) == 1:
ret = compare_tf_trt_result(tf_result, trt_result)
else:
for i in range(len(trt_result)):
ret &= compare_tf_trt_result(tf_result[i], trt_result[i])
assert ret, "result check False"
return ret
def compare_tf_trt_result(tf_result, trt_result):
print(tf_result)
print("================")
print(trt_result)
tf_reshape = np.array(tf_result).reshape(-1)
trt_reshape = np.array(trt_result).reshape(-1)
if (
isinstance(tf_result, list)
and isinstance(trt_result, list)
and len(tf_result) > 0
and len(trt_result) > 0
and np.isnan(tf_result[0]).any()
and np.isnan(trt_result[0]).any()
):
return True
elif (
isinstance(tf_result, list)
and isinstance(trt_result, list)
and len(tf_result) > 0
and len(trt_result) > 0
and np.isinf(tf_result[0]).any()
and np.isinf(trt_result[0]).any()
):
return True
print(
"trt cross_check output ",
str(np.allclose(tf_reshape.flatten(), trt_reshape.flatten(), atol=1e-5)),
flush=True,
)
return bool(np.allclose(tf_reshape.flatten(), trt_reshape.flatten(), atol=1e-5))
def get_onnxruntime_output(model, inputs):
import onnxruntime.backend
rep = onnxruntime.backend.prepare(model, "CPU")
if isinstance(inputs, list) and len(inputs) == 1:
inp = inputs[0]
else:
inp = inputs
output = rep.run(inp)
# Unpack output if there's only a single value.
if len(output) == 1:
output = output[0]
return output
def verify_with_ort_with_trt(
model,
inputs,
op_name,
opset=None,
dtype="float32",
opt_level=1,
np_result=None,
use_vm=False,
):
if opset is not None:
model.opset_import[0].version = opset
onnx.save(model, INPUT_MODEL_FILE)
if np_result is None:
ort_result = get_onnxruntime_output(model, inputs)
else:
ort_result = np_result
in_data = convert_to_list(inputs)
ops_name = [op_name]
trt_plugin_name = onnx2plugin(
INPUT_MODEL_FILE, OUTPUT_MODEL_FILE, node_names=ops_name
)
for plugin_name in trt_plugin_name:
ctypes.cdll.LoadLibrary("./trt_plugin/lib/{}.so".format(plugin_name))
cuda.Device(0).make_context()
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(
1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
) as network, trt.OnnxParser(network, TRT_LOGGER) as parser:
builder.max_batch_size = BATCH_SIZE
builder_config = builder.create_builder_config()
builder_config.max_workspace_size = 1 << 30
with open(OUTPUT_MODEL_FILE, "rb") as model:
# parse onnx model
parser.parse(model.read())
for i in range(parser.num_errors):
print(parser.get_error(i))
engine = builder.build_engine(network, builder_config)
if engine is None:
print("[ERROR] engine is None")
exit(-1)
inputs, outputs, bindings, stream = allocate_buffers(engine)
with engine.create_execution_context() as context:
for i in range(len(inputs)):
input_data = in_data[i].ravel()
np.copyto(inputs[i].host, input_data)
trt_result = do_inference(
context,
bindings=bindings,
inputs=inputs,
outputs=outputs,
stream=stream,
batch_size=BATCH_SIZE,
)
cuda.Context.pop()
ret = True
if len(trt_result) == 1:
ret = compare_tf_trt_result(ort_result, trt_result)
else:
for i in range(len(trt_result)):
ret &= compare_tf_trt_result(ort_result[i], trt_result[i])
assert ret, "result check False"
return ret
def make_constant_node(name, data_type, dims, vals):
return helper.make_node(
"Constant",
inputs=[],
outputs=[name],
value=helper.make_tensor(name=name, data_type=data_type, dims=dims, vals=vals),
)
def make_onnx_model(node, inputs, outputs, name, **kwargs):
present_inputs = [x for x in node.input if (x != "")]
present_outputs = [x for x in node.output if (x != "")]
input_type_protos = [None] * len(inputs)
if "input_type_protos" in kwargs:
input_type_protos = kwargs[str("input_type_protos")]
del kwargs[str("input_type_protos")]
output_type_protos = [None] * len(outputs)
if "output_type_protos" in kwargs:
output_type_protos = kwargs[str("output_type_protos")]
del kwargs[str("output_type_protos")]
inputs_vi = [
_extract_value_info(arr, arr_name, input_type)
for arr, arr_name, input_type in zip(inputs, present_inputs, input_type_protos)
]
outputs_vi = [
_extract_value_info(arr, arr_name, output_type)
for arr, arr_name, output_type in zip(
outputs, present_outputs, output_type_protos
)
]
graph = helper.make_graph(
nodes=[node], name=name, inputs=inputs_vi, outputs=outputs_vi
)
kwargs[str("producer_name")] = "TRTPluginAutoGen-test"
model = onnx.helper.make_model(graph, **kwargs)
return model
def op_expect(node, inputs, outputs, op_type, op_name, np_result=None):
model = make_onnx_model(
node, inputs=inputs, outputs=outputs, name="test_{}".format(op_type)
)
verify_with_ort_with_trt(model, inputs, op_name, np_result=np_result)
# ====================================================================================
# ---UnitTest
# ====================================================================================
def test_abs():
op_name = "abs_0"
op_type = "Abs"
x = np.random.randn(3, 4, 5).astype(np.float32)
y = abs(x)
node = helper.make_node(op_type, inputs=["x"], outputs=["y"], name=op_name)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_acos():
op_name = "acos_0"
op_type = "Acos"
node = onnx.helper.make_node("Acos", inputs=["x"], outputs=["y"], name=op_name)
x = np.array([-0.5, 0, 0.5]).astype(np.float32)
y = np.arccos(x)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "acos_1"
op_type = "Acos"
node = onnx.helper.make_node("Acos", inputs=["x"], outputs=["y"], name=op_name)
x = np.random.rand(3, 4, 5).astype(np.float32)
y = np.arccos(x)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_and():
op_name = "and_0"
op_type = "And"
node = onnx.helper.make_node(
"And", inputs=["x", "y"], outputs=["and"], name=op_name
)
# 2d
x = (np.random.randn(3, 4) > 0).astype(bool)
y = (np.random.randn(3, 4) > 0).astype(bool)
z = np.logical_and(x, y)
op_expect(node, inputs=[x, y], outputs=[z], op_type=op_type, op_name=op_name)
op_name = "and_1"
op_type = "And"
node = onnx.helper.make_node(
"And", inputs=["x", "y"], outputs=["and"], name=op_name
)
x = (np.random.randn(3, 4, 5) > 0).astype(bool)
y = (np.random.randn(3, 4, 5) > 0).astype(bool)
z = np.logical_and(x, y)
op_expect(node, inputs=[x, y], outputs=[z], op_type=op_type, op_name=op_name)
op_name = "and_2"
op_type = "And"
node = onnx.helper.make_node(
"And", inputs=["x", "y"], outputs=["and"], name=op_name
)
x = (np.random.randn(3, 4, 5, 6) > 0).astype(bool)
y = (np.random.randn(3, 4, 5, 6) > 0).astype(bool)
z = np.logical_and(x, y)
op_expect(node, inputs=[x, y], outputs=[z], op_type=op_type, op_name=op_name)
def test_add():
op_name = "add_0"
op_type = "Add"
node = onnx.helper.make_node(
"Add", inputs=["x", "y"], outputs=["sum"], name=op_name
)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.random.randn(3, 4, 5).astype(np.float32)
op_expect(node, inputs=[x, y], outputs=[x + y], op_type=op_type, op_name=op_name)
op_name = "add_1"
op_type = "Add"
node = onnx.helper.make_node(
"Add", inputs=["x", "y"], outputs=["sum"], name=op_name
)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.random.randn(5).astype(np.float32)
op_expect(node, inputs=[x, y], outputs=[x + y], op_type=op_type, op_name=op_name)
def test_argmax():
op_type = "ArgMax"
op_name = "argmax_0"
data = np.array([[2, | |
<reponame>kaldan007/openpecha-toolkit<filename>openpecha/formatters/hfml.py
from copy import deepcopy
from pathlib import Path
import re
from openpecha.formatters.formatter import BaseFormatter
from openpecha.formatters.format import *
class HFMLFormatter(BaseFormatter):
'''
OpenPecha Formatter for digitized wooden-printed Pecha based on annotation scheme from Esukhia.
'''
def __init__(self, output_path='./output'):
super().__init__(output_path=output_path)
self.base_text = ''
self.vol_walker = 0
self.topic_id = [] # made class variable as it needs to update cross poti
self.current_topic_id = [] # made class variable as it needs to update cross poti
self.sub_topic = [] # made class variable as it needs to update cross poti
self.page = []
self.error_id = []
self.abs_er_id = []
self.notes_id = []
self.sub_topic_Id = [] # made class variable as it needs to update cross poti
self.topic_info = []
self.sub_topic_info = []
self.cur_sub = []
def text_preprocess(self, text):
if text[0] == '\ufeff':
return text[1:]
return text
def get_input(self, input_path):
fns = list(input_path.iterdir())
fns_len = len(fns)
for fn in sorted(fns):
yield self.text_preprocess(fn.read_text()), fn, fns_len
def format_layer(self, layers):
cross_vol_anns = [layers['topic'], layers['sub_topic']]
non_cross_vol_anns = [layers['page'], layers['correction'], layers['peydurma'], layers['error_candidate']]
anns = {'cross_vol': cross_vol_anns, 'non_cross_vol': non_cross_vol_anns}
for ann in anns:
if ann == 'non_cross_vol':
for i, (pecha_pg, pecha_correction, pecha_peydurma, pecha_error) in enumerate(zip(*anns[ann])):
base_id = f'v{i+1:03}'
# Page annotation
Pagination = deepcopy(Layer)
Pagination['id'] = self.get_unique_id()
Pagination['annotation_type'] = 'pagination'
Pagination['revision'] = f'{1:05}'
for start, end, pg_info, index in pecha_pg:
page = deepcopy(Page)
page['id'] = self.get_unique_id()
page['span']['start'] = start
page['span']['end'] = end
page['page_index'] = index
page['page_info'] = pg_info
Pagination['annotations'].append(page)
# Correction annotation
Correction_layer = deepcopy(Layer)
Correction_layer['id'] = self.get_unique_id()
Correction_layer['annotation_type'] = 'correction'
Correction_layer['revision'] = f'{1:05}'
for start, end, sug in pecha_correction:
correction = deepcopy(Correction)
correction['id'] = self.get_unique_id()
correction['span']['start'] = start
correction['span']['end'] = end
correction['correction'] = sug
Correction_layer['annotations'].append(correction)
# Error_candidate annotation
Error_layer = deepcopy(Layer)
Error_layer['id'] = self.get_unique_id()
Error_layer['annotation_type'] = 'error_candidate'
Error_layer['revision'] = f'{1:05}'
for start, end in pecha_error:
error = deepcopy(ErrorCandidate)
error['id'] = self.get_unique_id()
error['span']['start'] = start
error['span']['end'] = end
Error_layer['annotations'].append(error)
# Yigchung annotation
Peydurma_layer = deepcopy(Layer)
Peydurma_layer['id'] = self.get_unique_id()
Peydurma_layer['annotation_type'] = 'note_marker'
Peydurma_layer['revision'] = f'{1:05}'
for pey in pecha_peydurma:
peydurma = deepcopy(Peydurma)
peydurma['id'] = self.get_unique_id()
peydurma['span']['start'] = pey
peydurma['span']['end'] = pey
Peydurma_layer['annotations'].append(peydurma)
result = {
'pagination': Pagination,
'correction': Correction_layer,
'peydurma': Peydurma_layer,
'error_candidate': Error_layer
}
yield result, base_id
else:
Index_layer = deepcopy(Layer)
Index_layer['id'] = self.get_unique_id()
Index_layer['annotation_type'] = 'index'
Index_layer['revision'] = f'{1:05}'
# loop over each topic
for topic, sub_topic in zip(*anns[ann]):
Topic = deepcopy(Text)
Topic['id'] = self.get_unique_id()
# loop over each sub_topic
for corss_sub_topic in sub_topic:
sub_text = deepcopy(SubText)
sub_text['id'] = self.get_unique_id()
for start, end, vol_id, work in corss_sub_topic:
sub_text['work'] = work
cross_vol_span = deepcopy(CrossVolSpan)
cross_vol_span['vol'] = f'base/v{vol_id:03}'
cross_vol_span['span']['start'] = start
cross_vol_span['span']['end'] = end
sub_text['span'].append(cross_vol_span)
if corss_sub_topic:
Topic['parts'].append(sub_text)
for start, end, vol_id, work in topic:
Topic['work'] = work
cross_vol_span = deepcopy(CrossVolSpan)
cross_vol_span['vol'] = f'base/v{vol_id:03}'
cross_vol_span['span']['start'] = start
cross_vol_span['span']['end'] = end
Topic['span'].append(cross_vol_span)
Index_layer['annotations'].append(Topic)
result = {
'index': Index_layer
}
yield result, None
def total_pattern(self, plist, line):
'''
returns the total length of annotation detected in a line
'''
total_length = 0 # total length of annotation detected in a line
for pp in ['line_pattern','topic_pattern','sub_topic_pattern']:
if re.search(plist[pp], line):
match_list = re.finditer(plist[pp], line) # list of match object of given pattern in line
for i in match_list:
total_length = total_length + len(i[0])
if re.search(plist['error_pattern'], line):
errors = re.finditer(plist['error_pattern'], line) # list of match object of error pattern in line
for error in errors:
error_part = error[0].split(',')[0][1:]
total_length = total_length + (len(error[0])-len(error_part))
if re.search(plist['abs_er_pattern'], line):
abs_ers = re.finditer(plist['abs_er_pattern'], line) # list of match object of abs_er pattern in line
for abs_er in abs_ers:
total_length = total_length + 2
if re.search(plist['note_pattern'], line):
abs_errors = re.finditer(plist['note_pattern'], line) # list of match object of note pattern in line
for abs_error in abs_errors:
total_length = total_length + 1
return total_length
def search_before(self, p, plist, line):
'''
returns the length of annotation detected in a line before the p annotation
'''
length_before = 0
for pp in ['line_pattern','topic_pattern','sub_topic_pattern']:
if re.search(plist[pp], line):
match_list = re.finditer(plist[pp], line) # list of match object of given pattern in line
for i in match_list:
if p.start() > i.start():
length_before = length_before + len(i[0])
if re.search(plist['error_pattern'], line):
errors = re.finditer(plist['error_pattern'], line) # list of match object of error pattern in line
for error in errors:
if p.start() > error.start():
error_part = error[0].split(',')[0][1:]
length_before = length_before + (len(error[0])-len(error_part))
if re.search(plist['abs_er_pattern'], line):
abs_ers = re.finditer(plist['abs_er_pattern'], line) # list of match object of abs_er pattern in line
for abs_er in abs_ers:
if p.start() > abs_er.start():
length_before = length_before + 2
if re.search(plist['note_pattern'], line):
abs_errors = re.finditer(plist['note_pattern'], line) # list of match object of note pattern in line
for abs_error in abs_errors:
if p.start() > abs_error.start():
length_before = length_before+1
return length_before
def base_extract(self, plist, line):
'''
Extract the base text from the given line
'''
base_line = line # stores the base_line which is line without annotation
for p in ['line_pattern','topic_pattern','sub_topic_pattern']:
base_line = re.sub(plist[p], '', base_line)
if re.search(plist['error_pattern'], line):
errors = re.finditer(plist['error_pattern'], line) # list of match object of error pattern in line
for error in errors:
error_part = error[0].split(',')[0][1:]
base_line = re.sub(plist['error_pattern'], error_part, base_line, 1)
if re.search(plist['abs_er_pattern'], line):
abs_ers = re.finditer(plist['abs_er_pattern'], line)# list of match object of abs_er pattern in line
for abs_er in abs_ers:
base_line = re.sub(plist['abs_er_pattern'], abs_er[0][1:-1], base_line, 1)
if re.search(plist['note_pattern'], line):
base_line = re.sub(plist['note_pattern'], '', base_line)
return base_line
def build_layers(self, m_text, num_vol):
i = 0 # tracker variable through out the text
cur_vol_pages = [] # list variable to store page annotation according to base string index eg : [(startPage,endPage)]
cur_vol_error_id = [] # list variable to store error annotation according to base string index eg : [(es,ee,'suggestion')]
cur_vol_abs_er_id = [] # list variable to store abs_er annotation
note_id = [] # list variable to store note annotation '#"
pg_info = []
pg_ann = []
pat_list={
'page_pattern': r'\[[0-9]+[a-z]{1}\]',
'line_pattern': r'\[\w+\.\d+\]','topic_pattern':r'\{\w+\}',
'sub_topic_pattern': r'\{\w+\-\w+\}',
'error_pattern': r'\(\S+\,\S+\)',
'abs_er_pattern': r'\[[^0-9].*?\]',
'note_pattern':r'#'
}
start_page = 0 # starting index of page
end_page = 0 # ending index of page
start_line = 0 #starting index of line
end_line = 0 # ending index of line
start_topic = 0 #starting index of topic_Id
end_topic = 0 # ending index of topic_Id
start_sub_topic = 0 #starting index of sub_topic_Id
end_sub_topic = 0 #ending index of sub_topic_Id
start_error = 0 #starting index of error
end_error = 0 #ending index of error
start_abs_er = 0 #starting index of abs_er
end_abs_er = 0 #ending index of abs_er
note = 0 #index of notes
text_lines = m_text.splitlines() # list of all the lines in the text
n_line = len(text_lines) # number of lines in the text
for idx, line in enumerate(text_lines):
line = line.strip()
pat_len_before_ann = 0 # length of pattern recognised before annotation
if re.search(pat_list['page_pattern'], line): # checking current line contains page annotation or not
start_page = end_page
end_page = end_line
page_info = line[re.search(pat_list['page_pattern'], line).end():]
pg_ann.append(re.search(pat_list['page_pattern'], line)[0][1:-1])
pg_info.append(page_info)
if len(pg_info)>=2:
cur_vol_pages.append((start_page, end_page, pg_info[-2], pg_ann[-2]))
if start_page < end_page: # to ignore the empty pages
i = i+1 # To accumulate the \n character
end_page = end_page+3
self.base_text = self.base_text + '\n'
elif re.search(pat_list['line_pattern'], line): #checking current line contains line annotation or not
start_line = i
length = len(line)
if re.search(pat_list['sub_topic_pattern'], line): #checking current line contain sub_topicID annotation or not
sub_topic_match = re.search(pat_list['sub_topic_pattern'], line)
self.sub_topic_info.append(sub_topic_match[0][1:-1])
pat_len_before_ann = self.search_before(sub_topic_match, pat_list, line)
if start_sub_topic == 0:
start_sub_topic = end_sub_topic
end_sub_topic = sub_topic_match.start()+i-pat_len_before_ann
if start_sub_topic < end_sub_topic:
if len(self.sub_topic_info) >= 2:
self.sub_topic_Id.append((start_sub_topic, end_sub_topic, self.vol_walker+1, self.sub_topic_info[-2]))
end_sub_topic = end_sub_topic+1
else:
self.sub_topic_Id.append((start_sub_topic, end_sub_topic, self.vol_walker+1, self.sub_topic_info[-1]))
end_sub_topic = end_sub_topic+1
else:
start_sub_topic = end_sub_topic
end_sub_topic = sub_topic_match.start()+i-pat_len_before_ann-2
if start_sub_topic != end_sub_topic:
self.sub_topic_Id.append((start_sub_topic, end_sub_topic, self.vol_walker+1, self.sub_topic_info[-2]))
end_sub_topic = end_sub_topic+1
if re.search(pat_list['topic_pattern'], line): #checking current line contain topicID annotation or not
topic = re.search(pat_list['topic_pattern'], line)
pat_len_before_ann = self.search_before(topic, pat_list, line)
self.topic_info.append(topic[0][1:-1])
start_topic = end_topic
end_topic = topic.start()+i-pat_len_before_ann
if (start_topic != end_topic or len(self.topic_info)>=2):
if len(self.topic_info) >= 2: # as we are ignoring the self.topic[0]
if start_topic < end_topic:
self.current_topic_id.append((start_topic, end_topic, self.vol_walker+1, self.topic_info[-2])) # -2 as we need the secondlast | |
if isinstance(event, DeviceStateChangedEvent):
# change device state
if self.__devices[event.device_url] is None:
raise Exception(
"Received device change "
+ "state for unknown device '"
+ event.device_url
+ "'"
)
self.__devices[event.device_url].set_active_states(event.states)
return events
def get_current_executions(self):
"""Get all current running executions.
:return: Returns a dict of running Executions or empty dict.
:rtype: dict
raises ValueError in case of protocol issues
:Seealso:
- apply_actions
- launch_action_group
- get_history
"""
header = BASE_HEADERS.copy()
header["Cookie"] = self.__cookie
result = self.send_request(
requests.get, BASE_URL + "exec/current", headers=header
)
executions = {}
for execution_data in result:
exe = Execution(execution_data)
executions[exe.execution_id] = exe
return executions
def get_history(self):
"""Get history."""
header = BASE_HEADERS.copy()
header["Cookie"] = self.__cookie
result = self.send_request(requests.get, BASE_URL + "history", headers=header)
return result
def cancel_all_executions(self):
"""Cancel all running executions.
raises ValueError in case of any protocol issues.
"""
header = BASE_HEADERS.copy()
header["Cookie"] = self.__cookie
self.send_request(
requests.delete, BASE_URL + "exec/current/setup", headers=header
)
def get_action_groups(self):
"""Get all Action Groups.
:return: List of Action Groups
"""
header = BASE_HEADERS.copy()
header["Cookie"] = self.__cookie
request = requests.get(BASE_URL + "actionGroups", headers=header, timeout=10)
if request.status_code != 200:
self.__logged_in = False
self.login()
self.get_action_groups()
return
try:
result = request.json()
except ValueError:
raise Exception("get_action_groups: Not a valid result for ")
groups = []
for group_data in result:
group = ActionGroup(group_data)
groups.append(group)
return groups
def launch_action_group(self, action_id):
"""Start action group."""
header = BASE_HEADERS.copy()
header["Cookie"] = self.__cookie
result = self.send_request(
requests.post, BASE_URL + "exec/" + action_id, headers=header
)
if "execId" not in result.keys():
raise Exception("Could not launch action" + "group, missing execId.")
return result["execId"]
def get_states(self, devices):
"""Get States of Devices."""
header = BASE_HEADERS.copy()
header["Cookie"] = self.__cookie
for device in devices:
path = "setup/devices/" + urllib.parse.quote_plus(device.url) + "/states"
result = self.send_request(requests.get, BASE_URL + path, headers=header)
try:
self.__devices[device.url].set_active_states(result)
except KeyError:
pass
def refresh_all_states(self):
"""Update all states."""
header = BASE_HEADERS.copy()
header["Cookie"] = self.__cookie
self.send_request(
requests.post, BASE_URL + "setup/devices/states/refresh", headers=header
)
class Device:
"""Represents an TaHoma Device."""
def __init__(self, protocol, dataInput):
"""Initialize the TaHoma Device."""
self.__protocol = protocol
self.__raw_data = dataInput
self.__active_states = {}
self.__label = dataInput.get("label", "")
self.__type = dataInput.get("controllableName", "")
self.__url = dataInput.get("deviceURL", "")
self.__uiclass = dataInput.get("uiClass", "")
self.__widget = dataInput.get("widget", "")
# Parse definitions
self.__definitions = {"commands": [], "states": []}
definition = dataInput.get("definition")
if definition:
if "commands" in definition.keys():
for command in definition["commands"]:
if command["commandName"] in self.__definitions["commands"]:
continue
self.__definitions["commands"].append(command["commandName"])
if "states" in definition.keys():
for state in definition["states"]:
if state["qualifiedName"] in self.__definitions["states"]:
continue
self.__definitions["states"].append(state["qualifiedName"])
self.__command_def = definition.get("commands")
self.__states_def = definition.get("states")
# Parse active states
if len(self.state_definitions) > 0:
if "states" in dataInput.keys():
for state in dataInput["states"]:
if state["name"] not in self.state_definitions:
self.state_definitions.append(state["name"])
self.__active_states[state["name"]] = state["value"]
@property
def label(self):
"""Label of device."""
return self.__label
@property
def command_definitions(self):
"""List of command devinitions."""
return self.__definitions["commands"]
@property
def state_definitions(self):
"""State of command devinition."""
return self.__definitions["states"]
@property
def active_states(self):
"""Get active states."""
return self.__active_states
def set_active_state(self, name, value):
"""Set active state."""
if name not in self.__active_states.keys():
self.__active_states[name] = value
if isinstance(self.__active_states[name], int) and isinstance(value, str):
# we get an update as str but current value is
# an int, try to convert
self.__active_states[name] = int(value)
elif isinstance(self.__active_states[name], float) and isinstance(value, str):
# we get an update as str but current value is
# a float, try to convert
self.__active_states[name] = float(value)
else:
self.__active_states[name] = value
def set_active_states(self, states):
"""Set active states to device."""
for state in states:
self.set_active_state(state["name"], state["value"])
@property
def type(self):
"""Get device type."""
return self.__type
@property
def url(self):
"""Get device url."""
return self.__url
@property
def uiclass(self):
"""Get device ui class."""
return self.__uiclass
@property
def widget(self):
"""Get device widget type."""
return self.__widget
@property
def command_def(self):
"""Get device widget type."""
return self.__command_def
@property
def states_def(self):
"""Get device widget type."""
return self.__states_def
# def execute_action(self, action):
# """Exceute action."""
# self.__protocol
class Action:
"""Represents an TaHoma Action."""
def __init__(self, data):
"""Initialize the TaHoma Action."""
self.__commands = []
if isinstance(data, dict):
self.__device_url = data["deviceURL"]
for cmd in data["commands"]:
if "parameters" in cmd.keys():
self.__commands.append(Command(cmd["name"], cmd["parameters"]))
else:
self.__commands.append(Command(cmd["name"]))
elif isinstance(data, str):
self.__device_url = data
else:
self.__device_url = ""
@property
def device_url(self):
"""Get device url of action."""
return self.__device_url
@device_url.setter
def device_url(self, url):
"""Set device url of action."""
self.__device_url = url
def add_command(self, cmd_name, *args):
"""Add command to action."""
self.__commands.append(Command(cmd_name, args))
@property
def commands(self):
"""Get commands."""
return self.__commands
def serialize(self):
"""Serialize action."""
commands = []
for cmd in self.commands:
commands.append(cmd.serialize())
out = {"commands": commands, "deviceURL": self.__device_url}
return out
def __str__(self):
"""Format to json."""
return json.dumps(
self.serialize(), indent=4, sort_keys=True, separators=(",", ": ")
)
def __repr__(self):
"""Format to json."""
return json.dumps(
self.serialize(), indent=None, sort_keys=True, separators=(",", ": ")
)
class Command:
"""Represents an TaHoma Command."""
def __init__(self, cmd_name, *args):
"""Initialize the TaHoma Command."""
self.__name = cmd_name
if len(args):
for arg in args[0]:
if (
isinstance(arg, str) is False
and isinstance(arg, int) is False
and isinstance(arg, float) is False
):
raise ValueError("Type '" + type(arg) + "' is not Int, bool or .")
self.__args = args[0]
else:
self.__args = []
@property
def name(self):
"""Get name of command."""
return self.__name
@property
def parameter(self):
"""Get parameter of command."""
return self.__args
def serialize(self):
"""Serialize command."""
return {"name": self.__name, "parameters": self.__args}
def __str__(self):
"""Format to json."""
return json.dumps(
self.serialize(), indent=4, sort_keys=True, separators=(",", ": ")
)
def __repr__(self):
"""Format to json."""
return json.dumps(
self.serialize(), indent=None, sort_keys=True, separators=(",", ": ")
)
class ActionGroup:
"""Represents an TaHoma Action Group."""
def __init__(self, data):
"""Initialize the TaHoma Action Group."""
if hasattr(data, "lastUpdateTime"):
self.__last_update = data["lastUpdateTime"]
else:
self.__last_update = -1
self.__name = data["label"]
self.__oid = data["oid"]
self.__actions = []
for cmd in data["actions"]:
self.__actions.append(Action(cmd))
@property
def last_update(self):
"""Get last update."""
return self.__last_update
@property
def name(self):
"""Get name of action group."""
return self.__name
@property
def oid(self):
"""Get oid of the action group."""
return self.__oid
@property
def actions(self):
"""Get list of actions."""
return self.__actions
class Event:
"""Represents an TaHoma Event."""
@staticmethod
def factory(data):
"""TaHoma Event factory."""
if data["name"] == "DeviceStateChangedEvent":
return DeviceStateChangedEvent(data)
elif data["name"] == "ExecutionStateChangedEvent":
return ExecutionStateChangedEvent(data)
elif data["name"] == "CommandExecutionStateChangedEvent":
return CommandExecutionStateChangedEvent(data)
else:
print("Unknown event '" + data["name"] + "' occurred.")
# raise ValueError("Unknown event '" + data['name'] + "' occurred.")
return None
class DeviceStateChangedEvent(Event):
"""Represents an TaHoma DeviceStateChangedEvent."""
def __init__(self, data):
"""Initialize the TaHoma DeviceStateChangedEvent."""
self.__device_url = data["deviceURL"]
self.__states = data["deviceStates"]
@property
def device_url(self):
"""Get device url."""
return self.__device_url
@property
def states(self):
"""Get list of states."""
return self.__states
class CommandExecutionStateChangedEvent(Event):
"""Represents an TaHoma CommandExecutionStateChangedEvent."""
def __init__(self, data):
"""Initialize the TaHoma CommandExecutionStateChangedEvent."""
self.__exec_id = data["execId"]
self.__device_url = data["deviceURL"]
try:
self.__state = EventState(int(data["newState"]))
except ValueError:
self.__state = EventState.Unknown
if self.__state == EventState.Failed:
self.__failure_type = data["failureType"]
else:
self.__failure_type = None
@property
def exec_id(self):
"""Get exec id."""
return self.__exec_id
@property
def device_url(self):
"""Get device url."""
return self.__device_url
@property
def state(self):
"""Get state."""
return self.__state
@property
def failure_type(self):
"""Get failure type."""
return self.__failure_type
class ExecutionStateChangedEvent(Event):
"""Represents an TaHoma ExecutionStateChangedEvent."""
def __init__(self, data):
"""Initialize the TaHoma ExecutionStateChangedEvent."""
self.__exec_id = data["execId"]
try:
self.__state = EventState(int(data["newState"]))
except ValueError:
self.__state = EventState.Unknown
if self.__state == EventState.Failed:
self.__failure_type = data["failureType"]
fail = data["failedCommands"]["command"]["deviceURL"]
self.__failed_device_url = fail
else:
self.__failure_type = None
self.__failed_device_url = None
@property
def exec_id(self):
"""Get exec id."""
return self.__exec_id
@property
def state(self):
"""Get state."""
return self.__state
@property
def failure_type(self):
"""Get failure url."""
return self.__failure_type
@property
def failure_device_url(self):
"""Get failure device url."""
return self.__failed_device_url
class EventState:
"""Represents an TaHoma EventState."""
def __init__(self, state):
"""Initialize the TaHoma EventState."""
if isinstance(state, int):
if state is EventState.Unknown0:
self.__state = EventState.Unknown0
elif state is EventState.NotTransmitted:
self.__state = EventState.NotTransmitted
elif state is EventState.Unknown2:
self.__state = EventState.Unknown2
elif state is EventState.Unknown3:
self.__state = EventState.Unknown3
elif state is EventState.Completed:
self.__state = EventState.Completed
elif state is EventState.Failed:
self.__state = EventState.Failed
elif state is EventState.Unknown:
self.__state = EventState.Unknown
else:
raise ValueError("Unknown state init " + str(state))
elif isinstance(state, str):
# more states are missing
if state == "NOT_TRANSMITTED":
self.__state = EventState.NotTransmitted
elif state == "TRANSMITTED":
self.__state = EventState.Transmitted
elif state == "IN_PROGRESS":
self.__state = EventState.InProgress
elif state == "COMPLETED":
self.__state = EventState.Completed
elif state == "FAILED":
self.__state = EventState.Failed
else:
raise ValueError("Unknown state init '" + state + "'")
else:
raise ValueError("EventState init can only be called with int or str.")
@property
| |
self.train_images = train_images
self.test_images = test_images
self.train_labels = one_hot(train_labels, self.n_classes)
self.test_labels = one_hot(test_labels, self.n_classes)
def cifar_100(self):
self.n_classes = 100 # labels
# training data & label
train_batch = self.unpickle("{0}/train".format(self.ds_path))
train_data = np.concatenate([train_batch[b'data']], axis=0)
train_labels = np.concatenate([train_batch[b'fine_labels']], axis=0)
train_images = np.swapaxes(train_data.reshape([-1,
self.height,
self.width,
self.channel], order='F'), 1, 2)
# test data & label
test_batch = self.unpickle("{0}/test".format(self.ds_path))
test_data = np.concatenate([test_batch[b'data']], axis=0)
test_labels = np.concatenate([test_batch[b'fine_labels']], axis=0)
test_images = np.swapaxes(test_data.reshape([-1,
self.height,
self.width,
self.channel], order='F'), 1, 2)
# split training data set into train / val
if self.use_split:
train_images, valid_images, train_labels, valid_labels = \
train_test_split(train_images, train_labels,
test_size=self.split_rate,
random_state=self.random_state)
self.valid_images = valid_images
self.valid_labels = one_hot(valid_labels, self.n_classes)
self.train_images = train_images
self.test_images = test_images
self.train_labels = one_hot(train_labels, self.n_classes)
self.test_labels = one_hot(test_labels, self.n_classes)
class CelebADataSet:
"""
This Class for CelebA & CelebA-HQ DataSets.
- saving images as .h5 file for more faster loading.
- Actually, CelebA-HQ DataSet is kinda encrypted. So if u wanna use it, decrypt first!
There're a few codes that download & decrypt CelebA-HQ DataSet.
"""
def __init__(self,
height=64, width=64, channel=3, attr_labels=(),
n_threads=30, use_split=False, split_rate=0.2, random_state=42,
ds_image_path=None, ds_label_path=None, ds_type="CelebA", use_img_scale=True, img_scale="-1,1",
use_save=False, save_type='to_h5', save_file_name=None,
use_concat_data=False):
"""
# General Settings
:param height: image height
:param width: image width
:param channel: image channel
- in case of CelebA, image size is 64 x 64 x 3 (HWC)
- in case of CelebA-HQ, image size is 1024 x 1024 x 3 (HWC)
:param attr_labels: attributes of CelebA DataSet
- in case of CelebA, the number of attributes is 40
# Pre-Processing Option
:param n_threads: the number of threads
:param use_split: splitting train DataSet into train/val
:param split_rate: image split rate (into train & val)
:param random_state: random seed for shuffling, default 42
# DataSet Settings
:param ds_image_path: DataSet's Image Path
:param ds_label_path: DataSet's Label Path
:param ds_type: which DataSet is
:param use_img_scale: using img scaling?
:param img_scale: img normalize
:param use_save: saving into another file format
:param save_type: file format to save
:param save_file_name: file name to save
:param use_concat_data: concatenate images & labels
"""
self.height = height
self.width = width
self.channel = channel
'''
# Available attributes
[
5_o_Clock_Shadow, Arched_Eyebrows, Attractive, Bags_Under_Eyes, Bald, Bangs, Big_Lips, Big_Nose, Black_Hair,
Blond_Hair, Blurry, Brown_Hair, Bushy_Eyebrows, Chubby, Double_Chin, Eyeglasses, Goatee, Gray_Hair,
Heavy_Makeup, High_Cheekbones, Male, Mouth_Slightly_Open, Mustache, Narrow_Eyes, No_Beard, Oval_Face,
Pale_Skin, Pointy_Nose, Receding_Hairline, Rosy_Cheeks, Sideburns, Smiling, Straight_Hair, Wavy_Hair,
Wearing_Earrings, Wearing_Hat, Wearing_Lipstick, Wearing_Necklace, Wearing_Necktie, Young
]
'''
self.attr_labels = attr_labels
self.image_shape = (self.height, self.width, self.channel) # (H, W, C)
self.n_threads = n_threads
self.use_split = use_split
self.split_rate = split_rate
self.random_state = random_state
self.attr = [] # loaded labels
self.images = []
self.labels = {}
"""
Expected DataSet's Path Example
CelebA : CelebA/ (sub-folder : Anno/..., Img/... )
CelebA-HQ : CelebA-HQ/ (sub-folder : ...npy, ...png )
Labels : CelebA/Anno/...txt
Expected DatSet's Type
'CelebA' or 'CelebA-HQ'
"""
self.ds_image_path = ds_image_path
self.ds_label_path = ds_label_path
self.ds_type = ds_type
self.use_img_scale = use_img_scale
self.img_scale = img_scale
try:
assert self.ds_image_path and self.ds_label_path
except AssertionError:
raise AssertionError("[-] CelebA/CelebA-HQ DataSets' Path is required! (%s)")
if self.ds_type == "CelebA":
self.num_images = 202599 # the number of CelebA images
elif self.ds_type == "CelebA-HQ":
self.num_images = 30000 # the number of CelebA-HQ images
tmp_path = self.ds_image_path + "/imgHQ00000."
if os.path.exists(tmp_path + "dat"):
raise FileNotFoundError("[-] You need to decrypt .dat file first!\n" +
"[-] plz, use original PGGAN repo or"
" this repo https://github.com/nperraud/download-celebA-HQ")
else:
raise NotImplementedError("[-] 'ds_type' muse be 'CelebA' or 'CelebA-HQ'")
self.use_save = use_save
self.save_type = save_type
self.save_file_name = save_file_name
self.use_concat_data = use_concat_data
try:
if self.use_save:
assert self.save_file_name
except AssertionError:
raise AssertionError("[-] save-file/folder-name is required!")
self.images = DataSetLoader(path=self.ds_image_path,
size=self.image_shape,
use_save=self.use_save,
name=self.save_type,
save_file_name=self.save_file_name,
use_image_scaling=use_img_scale,
image_scale=self.img_scale).raw_data # numpy arrays
self.labels = self.load_attr(path=self.ds_label_path)
if self.use_concat_data:
self.images = self.concat_data(self.images, self.labels)
# split training data set into train / val
if self.use_split:
self.train_images, self.valid_images, self.train_labels, self.valid_labels = \
train_test_split(self.images, self.labels,
test_size=self.split_rate,
random_state=self.random_state)
def load_attr(self, path):
with open(path, 'r') as f:
img_attr = []
self.num_images = int(f.readline().strip())
self.attr = (f.readline().strip()).split(' ')
print("[*] the number of images : %d" % self.num_images)
print("[*] the number of attributes : %d/%d" % (len(self.attr_labels), len(self.attr)))
for fn in f.readlines():
row = fn.strip().split()
# img_name = row[0]
attr = [int(x) for x in row[1:]]
tmp = [attr[self.attr.index(x)] for x in self.attr_labels]
tmp = [1. if x == 1 else 0. for x in tmp] # one-hot labeling
img_attr.append(tmp)
return np.asarray(img_attr)
def concat_data(self, img, label):
label = np.tile(np.reshape(label, [-1, 1, 1, len(self.attr_labels)]), [1, self.height, self.width, 1])
return np.concatenate([img, label], axis=3)
class Pix2PixDataSet:
def __init__(self, height=64, width=64, channel=3,
use_split=False, split_rate=0.15, random_state=42, n_threads=8,
ds_path=None, ds_name=None, use_save=False, save_type='to_h5', save_file_name=None):
"""
# General Settings
:param height: image height, default 64
:param width: image width, default 64
:param channel: image channel, default 3 (RGB)
# Pre-Processing Option
:param use_split: using DataSet split, default False
:param split_rate: image split rate (into train & test), default 0.2
:param random_state: random seed for shuffling, default 42
:param n_threads: the number of threads for multi-threading, default 8
# DataSet Option
:param ds_path: DataSet's Path, default None
:param ds_name: DataSet's Name, default None
:param use_save: saving into another file format
:param save_type: file format to save
:param save_file_name: file name to save
"""
self.height = height
self.width = width
self.channel = channel
self.image_shape = (self.height, self.width, self.channel)
self.use_split = use_split
self.split_rate = split_rate
self.random_state = random_state
self.n_threads = n_threads # change this value to the fitted value for ur system
"""
Expected ds_path : pix2pix/...
Expected ds_name : apple2orange
"""
self.ds_path = ds_path
self.ds_name = ds_name
# single grid : testA, testB, (trainA, trainB)
# double grid : train, val, (test, sample)
self.ds_single_grid = ['apple2orange', 'horse2zebra', 'monet2photo', 'summer2winter_yosemite', 'vangogh2photo',
'ae_photos', 'cezanne2photo', 'ukivoe2photo', 'iphone2dslr_flower']
self.ds_double_grid = ['cityscapes', 'edges2handbags', 'edges2shoes', 'facades', 'maps']
# Single Grid DatSet - the number of images
self.n_sg_images_a = 400
self.n_sg_images_b = 6287
# Double Grid DatSet - the number of images
self.n_dg_images_a = 0
self.n_dg_images_b = 0
self.use_save = use_save
self.save_type = save_type
self.save_file_name = save_file_name
try:
if self.use_save:
assert self.save_file_name
except AssertionError:
raise AssertionError("[-] save-file/folder-name is required!")
if self.ds_name in self.ds_single_grid:
self.images_a = DataSetLoader(path=self.ds_path + "/" + self.ds_name + "/trainA/",
size=self.image_shape,
use_save=self.use_save,
name=self.save_type,
save_file_name=self.save_file_name,
use_image_scaling=True,
image_scale='0,1').raw_data # numpy arrays
self.images_b = DataSetLoader(path=self.ds_path + "/" + self.ds_name + "/trainB/",
size=self.image_shape,
use_save=self.use_save,
name=self.save_type,
save_file_name=self.save_file_name,
use_image_scaling=True,
image_scale='0,1').raw_data # numpy arrays
self.n_images_a = self.n_sg_images_a
self.n_images_b = self.n_sg_images_b
elif self.ds_name in self.ds_double_grid:
# To-Do
# Implement this!
self.n_images_a = self.n_dg_images_a
self.n_images_b = self.n_dg_images_b
else:
raise NotImplementedError("[-] Not Implemented yet")
class ImageNetDataSet:
def __init__(self):
pass
class Div2KDataSet:
def __init__(self, hr_height=384, hr_width=384, lr_height=96, lr_width=96, channel=3,
use_split=False, split_rate=0.1, random_state=42, n_threads=8,
ds_path=None, ds_name=None, use_img_scale=True,
ds_hr_path=None, ds_lr_path=None,
use_save=False, save_type='to_h5', save_file_name=None):
"""
# General Settings
:param hr_height: input HR image height, default 384
:param hr_width: input HR image width, default 384
:param lr_height: input LR image height, default 96
:param lr_width: input LR image width, default 96
:param channel: input image channel, default 3 (RGB)
- in case of Div2K - ds x4, image size is 384 x 384 x 3 (HWC).
# Pre-Processing Option
:param split_rate: image split rate (into train & test), default 0.1
:param random_state: random seed for shuffling, default 42
:param n_threads: the number of threads for multi-threading, default 8
# DataSet Option
:param ds_path: DataSet's Path, default None
:param ds_name: DataSet's Name, default None
:param use_img_scale: using img scaling?
:param ds_hr_path: DataSet High Resolution path
:param ds_lr_path: DataSet Low Resolution path
:param use_save: saving into another file format
:param save_type: file format to save
:param save_file_name: file name to save
"""
self.hr_height = hr_height
self.hr_width = hr_width
self.lr_height = lr_height
self.lr_width = lr_width
self.channel = channel
self.hr_shape = (self.hr_height, self.hr_width, self.channel)
self.lr_shape = (self.lr_height, self.lr_width, self.channel)
self.use_split = use_split
self.split_rate = split_rate
self.random_state = random_state
self.num_threads = n_threads # change this value to the fitted value for ur system
"""
Expected ds_path : div2k/...
Expected ds_name : X4
"""
self.ds_path = ds_path
self.ds_name = ds_name
self.ds_hr_path = ds_hr_path
self.ds_lr_path = ds_lr_path
try:
assert self.ds_path
except AssertionError:
try:
assert self.ds_hr_path and self.ds_lr_path
except AssertionError:
raise AssertionError("[-] DataSet's | |
<reponame>blackdaemon/enso-launcher-continued
# vim:set tabstop=4 shiftwidth=4 expandtab:
#
# This code has been borrowed from Kupfer
#
# Homepage: http://kaizer.se/wiki/kupfer/
# Credits: Copyright 2007-2011 <NAME> <<EMAIL>>
# Licence: GNU General Public License v3 (or any later version)
__updated__ = "2017-03-01"
import logging
import os
import gio
import glib
import gtk
import pygtk
import xdg.BaseDirectory
import xdg.DesktopEntry
import xdg.Exceptions
from enso.contrib.open.platform.linux import (
desktop_parse,
kupferstring,
terminal,
)
pygtk.require('2.0')
__all__ = ['launch_app_info', 'spawn_app', 'spawn_app_id']
STARTUP_ENV = "DESKTOP_STARTUP_ID"
# TODO: Broadcast Gio's launched message on dbus
# NOTE: GDK's startup notification things that we use
# are really only sending xmessages. (roughly).
class SpawnError(Exception):
"Error starting application"
class ResourceLookupError(Exception):
"Unable to find resource"
class ResourceReadError(Exception):
"Unable to open resource"
def gtk_to_unicode(gtkstring):
"""Return unicode for a GTK/GLib string (bytestring or unicode)"""
if isinstance(gtkstring, unicode):
return gtkstring
return gtkstring.decode("UTF-8", "ignore")
def find_desktop_file(desk_id):
"""Find file for @desk_id or raise ResourceLookupError
Desktop files are found by appending /applications/ to
$XDG_DATA_DIRS, but if they are located in subdirs of that,
then additional 'subdirectory-' prefixes are used.
"""
if not desk_id:
raise ResourceLookupError("Empty id")
try:
return next(xdg.BaseDirectory.load_data_paths("applications", desk_id))
except StopIteration:
# it was not found as an immediate child of the data paths,
# so we split by the hyphens and search deeper
file_id = desk_id
directories = ['applications']
def lookup(path):
"""Return location for @path if exists, else none"""
return next(xdg.BaseDirectory.load_data_paths(*path), None)
def get_dir_id_depth(desk_id, depth):
"split 'hyph-example-id' at the nth hyphen"
parts = desk_id.split('-', depth)
return '-'.join(parts[:depth]), '-'.join(parts[depth:])
while 1:
# try the first parts of the id to see if it matches a directory
for x in xrange(1, 4):
dirname, rest_id = get_dir_id_depth(file_id, x)
if rest_id and lookup(directories + [dirname]):
file_id = rest_id
directories.append(dirname)
break
else:
# we did not reach break
break
desktop_file_path = lookup(directories + [file_id])
if desktop_file_path:
return desktop_file_path
raise ResourceLookupError("Cannot locate '%s'" % (desk_id,))
def read_desktop_info(desktop_file):
"""
Get the keys StartupNotify, Terminal, Exec, Path, Icon
Return dict with bool and unicode values
"""
# Return values in unicode
try:
de = xdg.DesktopEntry.DesktopEntry(desktop_file)
except xdg.Exceptions.Error:
raise ResourceReadError
if not de.getExec():
raise ResourceReadError("Invalid data: empty Exec key")
return {
"Terminal": de.getTerminal(),
"StartupNotify": de.getStartupNotify(),
"Exec": gtk_to_unicode(de.getExec()),
"Path": gtk_to_unicode(de.getPath()),
"Icon": gtk_to_unicode(de.getIcon()),
"Name": gtk_to_unicode(de.getName()),
}
def create_desktop_info(commandline, name, icon, work_dir, in_terminal, startup_notify):
return {
"Terminal": in_terminal,
"StartupNotify": startup_notify,
"Exec": commandline,
"Path": work_dir,
"Icon": icon,
"Name": name,
}
def replace_format_specs(argv, location, desktop_info, gfilelist):
"""
http://standards.freedesktop.org/desktop-entry-spec/latest/ar01s06.html
Replace format specifiers
%% literal %
%f file
%F list of files
%u URL
%U list of URLs
%i --icon <Icon key>
%c Translated name
%k location of .desktop file
deprecated are removed:
%d %D %n %N %v %m
apart from those, all other.. stay and are ignored
Like other implementations, we do actually insert
a local path for %u and %U if it exists.
Return (supports_single, added_at_end, argv)
supports_single: Launcher only supports a single file
caller has to re-call for each file
added_at_end: No format found for the file, it was added
at the end
"""
supports_single_file = False
files_added_at_end = False
class Flags(object):
did_see_small_f = False
did_see_large_f = False
fileiter = iter(gfilelist)
def get_file_path(gfile):
if not gfile:
return ""
return gfile.get_path() or gfile.get_uri()
def get_next_file_path():
try:
f = next(fileiter)
except StopIteration:
return ""
return get_file_path(f)
def replace_single_code(key):
"Handle all embedded format codes, including those to be removed"
deprecated = set(['%d', '%D', '%n', '%N', '%v', '%m'])
if key in deprecated:
return ""
if key == "%%":
return "%"
if key == "%f" or key == "%u":
if Flags.did_see_large_f or Flags.did_see_small_f:
logging.warn("Warning, multiple file format specs!")
return ""
Flags.did_see_small_f = True
return get_next_file_path()
if key == "%c":
return gtk_to_unicode(desktop_info["Name"] or location)
if key == "%k":
return location
else:
return None
def replace_array_format(elem):
"""
Handle array format codes -- only recognized as single arguments
Return flag, arglist
where flag is true if something was replaced
"""
if elem == "%U" or elem == "%F":
if Flags.did_see_large_f or Flags.did_see_small_f:
logging.warn("Warning, multiple file format specs!")
return True, []
Flags.did_see_large_f = True
return True, filter(bool, [get_file_path(f) for f in gfilelist])
if elem == "%i":
if desktop_info["Icon"]:
return True, ["--icon", desktop_info["Icon"]]
return True, []
else:
return False, elem
def two_part_unescaper(s, repfunc):
"""
Handle embedded format codes
Scan @s two characters at a time and replace using @repfunc
"""
if not s:
return s
def _inner():
it = iter(zip(s, s[1:]))
for cur, nex in it:
key = cur + nex
rep = repfunc(key)
if rep is not None:
yield rep
# skip a step in the iter
try:
it.next()
except StopIteration:
return
else:
yield cur
yield s[-1]
return ''.join(_inner())
new_argv = []
for x in argv:
if not x:
# the arg is an empty string, we don't need extra processing
new_argv.append(x)
continue
succ, newargs = replace_array_format(x)
if succ:
new_argv.extend(newargs)
else:
arg = two_part_unescaper(x, replace_single_code)
if arg:
new_argv.append(arg)
if len(gfilelist) > 1 and not Flags.did_see_large_f:
supports_single_file = True
if not Flags.did_see_small_f and not Flags.did_see_large_f and len(gfilelist):
files_added_at_end = True
new_argv.append(get_next_file_path())
return supports_single_file, files_added_at_end, new_argv
def _file_for_app_info(app_info):
try:
desktop_file = find_desktop_file(app_info.get_id())
except ResourceLookupError as e:
logging.error(e)
desktop_file = None
return desktop_file
def _info_for_desktop_file(desktop_file):
if not desktop_file:
return None
try:
desktop_info = read_desktop_info(desktop_file)
except ResourceReadError:
desktop_info = None
return desktop_info
def launch_app_info(app_info, gfiles=[], in_terminal=None, timestamp=None,
desktop_file=None, launch_cb=None, screen=None):
"""
Launch @app_info, opening @gfiles
@in_terminal: override Terminal flag
@timestamp: override timestamp
@desktop_file: specify location of desktop file
@launch_cb: Called once per launched process,
like ``spawn_app``
Will pass on exceptions from spawn_app
"""
desktop_file = desktop_file or _file_for_app_info(app_info)
desktop_info = _info_for_desktop_file(desktop_file)
if not desktop_file or not desktop_info:
# Allow in-memory app_info creations (without id or desktop file)
desktop_file = ""
desktop_info = create_desktop_info(app_info.get_commandline() or "",
app_info.get_name(),
"",
"",
False,
False)
# in this case, the command line is already primarily escaped
argv = desktop_parse.parse_argv(desktop_info["Exec"])
else:
# In the normal case, we must first escape one round
argv = desktop_parse.parse_unesc_argv(desktop_info["Exec"])
assert argv and argv[0]
# Now Resolve the %f etc format codes
multiple_needed, missing_format, launch_argv = \
replace_format_specs(argv, desktop_file, desktop_info, gfiles)
if not multiple_needed:
# Launch 1 process
launch_records = [(launch_argv, gfiles)]
else:
# Launch one process per file
launch_records = [(launch_argv, [gfiles[0]])]
for f in gfiles[1:]:
_ignore1, _ignore2, launch_argv = \
replace_format_specs(argv, desktop_file, desktop_info, [f])
launch_records.append((launch_argv, [f]))
notify = desktop_info["StartupNotify"]
workdir = desktop_info["Path"] or None
if in_terminal is None:
in_terminal = desktop_info["Terminal"]
if in_terminal:
term = terminal.get_configured_terminal()
notify = notify or term["startup_notify"]
for argv, gfiles in launch_records:
if in_terminal:
term = terminal.get_configured_terminal()
targv = list(term["argv"])
if term["exearg"]:
targv.append(term["exearg"])
argv = targv + argv
ret = spawn_app(app_info, argv, gfiles, workdir, notify,
timestamp=timestamp, launch_cb=launch_cb,
screen=screen)
if not ret:
return False
return True
def spawn_app_id(app_id, argv, workdir=None, startup_notify=True, screen=None):
"""
Spawn @argv trying to notify it as if it is app_id
"""
try:
app_info = get_info_for_id(app_id)
except RuntimeError:
app_info = None
startup_notify = False
return spawn_app(app_info, argv, [], workdir, startup_notify, screen=screen)
def spawn_app(app_info, argv, filelist, workdir=None, startup_notify=True,
timestamp=None, launch_cb=None, screen=None):
"""
Spawn app.
@argv: argument list including files
@workdir: where to set workdir if not cwd
@app_info: Used for startup notification, if @startup_notify is True
@filelist: Used for startup notification
@startup_notify: Use startup notification
@timestamp: Event timestamp
@launch_cb: Called if successful with
(argv, pid, notify_id, filelist, timestamp)
@screen: GdkScreen on which to put the application
return pid if successful
raise SpawnError on error
"""
notify_id = None
if startup_notify:
ctx = gtk.gdk.AppLaunchContext()
ctx.set_timestamp(timestamp or gtk.get_current_event_time())
if screen:
ctx.set_screen(screen)
# This not only returns the string ID but
# it actually starts the startup notification!
notify_id = ctx.get_startup_notify_id(app_info, filelist)
child_env_add = {STARTUP_ENV: notify_id}
else:
child_env_add = {}
if screen:
child_env_add["DISPLAY"] = screen.make_display_name()
if not workdir or not os.path.exists(workdir):
workdir = "."
argv = list(locale_encode_argv(argv))
try:
(pid, _ig1, _ig2, _ig3) = glib.spawn_async( # IGNORE:E1101 @UndefinedVariable Keep PyLint and PyDev happy
argv,
working_directory=workdir,
# IGNORE:E1101 @UndefinedVariable Keep PyLint and PyDev happy
flags=glib.SPAWN_SEARCH_PATH | glib.SPAWN_STDOUT_TO_DEV_NULL,
child_setup=child_setup,
user_data=child_env_add
)
logging.debug("Launched '%s'; notify_id: %s; pid: %d", argv, notify_id, pid)
except glib.GError as exc: # IGNORE:E1101 @UndefinedVariable Keep PyLint and PyDev happy
logging.error("Error Launching '%s'; %s", argv, unicode(exc))
if notify_id:
gtk.gdk.notify_startup_complete_with_id(notify_id)
raise SpawnError(unicode(exc))
finally:
if launch_cb:
launch_cb(argv, pid, notify_id, filelist, timestamp)
return pid
def child_setup(add_environ):
"""Called to setup the child process before exec()
@add_environ is | |
<gh_stars>10-100
import math, copy
from functools import reduce
from tensorflow.keras import layers
from pyradox.modules import *
from tensorflow.keras.activations import swish
from tensorflow.nn import relu6
def relu(x):
return layers.ReLU()(x)
def hard_sigmoid(x):
return layers.ReLU(6.0)(x + 3.0) * (1.0 / 6.0)
def hard_swish(x):
return layers.Multiply()([hard_sigmoid(x), x])
class ResNet(layers.Layer):
"""Customized Implementation of ResNet
Args:
resnet_config (list of tuples of 2 int): filters of the bottleneck layer in a block, blocks in the stacked blocks
epsilon: (float): Small float added to variance to avoid dividing by zero in
batch normalisation, default: 1.001e-5
activation (keras Activation): activation applied after batch normalization, default: relu
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
kwargs (keyword arguments): the arguments for Convolution Layer
"""
def __init__(
self,
resnet_config,
epsilon=1.001e-5,
activation="relu",
use_bias=False,
**kwargs,
):
super().__init__()
self.resnet_config = resnet_config
self.epsilon = epsilon
self.activation = activation
self.use_bias = use_bias
self.kwargs = kwargs
def __call__(self, inputs):
x = inputs
x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)))(x)
x = layers.Conv2D(64, 7, strides=2, use_bias=self.use_bias, **self.kwargs)(x)
x = layers.BatchNormalization(epsilon=self.epsilon)(x)
x = layers.Activation(self.activation)(x)
x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)))(x)
x = layers.MaxPooling2D(3, strides=2)(x)
for i, (filters, blocks) in enumerate(self.resnet_config):
if i == 0:
x = ResNetBlock(
filters=filters,
epsilon=self.epsilon,
activation=self.activation,
use_bias=self.use_bias,
stride=2,
**self.kwargs,
)(x)
for _ in range(2, blocks + 1):
x = ResNetBlock(
filters=filters,
epsilon=self.epsilon,
activation=self.activation,
use_bias=self.use_bias,
conv_shortcut=False,
**self.kwargs,
)(x)
else:
x = ResNetBlock(
filters=filters,
epsilon=self.epsilon,
activation=self.activation,
use_bias=self.use_bias,
**self.kwargs,
)(x)
for _ in range(2, blocks + 1):
x = ResNetBlock(
filters=filters,
epsilon=self.epsilon,
activation=self.activation,
use_bias=self.use_bias,
conv_shortcut=False,
**self.kwargs,
)(x)
return x
class ResNet50(ResNet):
"""Customized Implementation of ResNet50
Args:
epsilon: (float): Small float added to variance to avoid dividing by zero in
batch normalisation, default: 1.001e-5
activation (keras Activation): activation applied after batch normalization, default: relu
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
kwargs (keyword arguments): the arguments for Convolution Layer
"""
def __init__(self, epsilon=1.001e-5, activation="relu", use_bias=False, **kwargs):
super().__init__(
resnet_config=[(64, 3), (128, 4), (256, 6), (512, 3)],
epsilon=epsilon,
activation=activation,
use_bias=use_bias,
**kwargs,
)
class ResNet101(ResNet):
"""Customized Implementation of ResNet101
Args:
epsilon: (float): Small float added to variance to avoid dividing by zero in
batch normalisation, default: 1.001e-5
activation (keras Activation): activation applied after batch normalization, default: relu
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
kwargs (keyword arguments): the arguments for Convolution Layer
"""
def __init__(self, epsilon=1.001e-5, activation="relu", use_bias=False, **kwargs):
super().__init__(
resnet_config=[(64, 3), (128, 4), (256, 23), (512, 3)],
epsilon=epsilon,
activation=activation,
use_bias=use_bias,
**kwargs,
)
class ResNet152(ResNet):
"""Customized Implementation of ResNet152
Args:
epsilon: (float): Small float added to variance to avoid dividing by zero in
batch normalisation, default: 1.001e-5
activation (keras Activation): activation applied after batch normalization, default: relu
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
kwargs (keyword arguments): the arguments for Convolution Layer
"""
def __init__(self, epsilon=1.001e-5, activation="relu", use_bias=False, **kwargs):
super().__init__(
resnet_config=[(64, 3), (128, 8), (256, 36), (512, 3)],
epsilon=epsilon,
activation=activation,
use_bias=use_bias,
**kwargs,
)
class ResNetV2(layers.Layer):
"""Customized Implementation of ResNetV2
Args:
resnet_config (list of tuples of 2 int): filters of the bottleneck layer in a block, blocks in the stacked blocks
epsilon: (float): Small float added to variance to avoid dividing by zero in
batch normalisation, default: 1.001e-5
activation (keras Activation): activation applied after batch normalization, default: relu
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
kwargs (keyword arguments): the arguments for Convolution Layer
"""
def __init__(
self,
resnet_config,
epsilon=1.001e-5,
activation="relu",
use_bias=False,
**kwargs,
):
super().__init__()
self.resnet_config = resnet_config
self.epsilon = epsilon
self.activation = activation
self.use_bias = use_bias
self.kwargs = kwargs
def __call__(self, inputs):
x = inputs
x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)))(x)
x = layers.Conv2D(64, 7, strides=2, use_bias=self.use_bias, **self.kwargs)(x)
x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)))(x)
x = layers.MaxPooling2D(3, strides=2)(x)
for i, (filters, blocks) in enumerate(self.resnet_config):
if i == len(self.resnet_config) - 1:
x = ResNetV2Block(
filters=filters,
epsilon=self.epsilon,
activation=self.activation,
use_bias=self.use_bias,
**self.kwargs,
)(x)
for _ in range(2, blocks + 1):
x = ResNetV2Block(
filters=filters,
epsilon=self.epsilon,
activation=self.activation,
use_bias=self.use_bias,
conv_shortcut=False,
**self.kwargs,
)(x)
x = ResNetV2Block(
filters=filters,
epsilon=self.epsilon,
activation=self.activation,
use_bias=self.use_bias,
stride=1,
**self.kwargs,
)(x)
else:
x = ResNetV2Block(
filters=filters,
epsilon=self.epsilon,
activation=self.activation,
use_bias=self.use_bias,
**self.kwargs,
)(x)
for _ in range(2, blocks + 1):
x = ResNetV2Block(
filters=filters,
epsilon=self.epsilon,
activation=self.activation,
use_bias=self.use_bias,
conv_shortcut=False,
**self.kwargs,
)(x)
x = ResNetV2Block(
filters=filters,
epsilon=self.epsilon,
activation=self.activation,
use_bias=self.use_bias,
stride=2,
**self.kwargs,
)(x)
x = layers.BatchNormalization(epsilon=self.epsilon)(x)
x = layers.Activation(self.activation)(x)
return x
class ResNet50V2(ResNetV2):
"""Customized Implementation of ResNet50V2
Args:
epsilon: (float): Small float added to variance to avoid dividing by zero in
batch normalisation, default: 1.001e-5
activation (keras Activation): activation applied after batch normalization, default: relu
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
kwargs (keyword arguments): the arguments for Convolution Layer
"""
def __init__(self, epsilon=1.001e-5, activation="relu", use_bias=False, **kwargs):
super().__init__(
resnet_config=[(64, 3), (128, 4), (256, 6), (512, 3)],
epsilon=epsilon,
activation=activation,
use_bias=use_bias,
**kwargs,
)
class ResNet101V2(ResNetV2):
"""Customized Implementation of ResNet101V2
Args:
epsilon: (float): Small float added to variance to avoid dividing by zero in
batch normalisation, default: 1.001e-5
activation (keras Activation): activation applied after batch normalization, default: relu
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
kwargs (keyword arguments): the arguments for Convolution Layer
"""
def __init__(self, epsilon=1.001e-5, activation="relu", use_bias=False, **kwargs):
super().__init__(
resnet_config=[(64, 3), (128, 4), (256, 23), (512, 3)],
epsilon=epsilon,
activation=activation,
use_bias=use_bias,
**kwargs,
)
class ResNet152V2(ResNetV2):
"""Customized Implementation of ResNet152V2
Args:
epsilon: (float): Small float added to variance to avoid dividing by zero in
batch normalisation, default: 1.001e-5
activation (keras Activation): activation applied after batch normalization, default: relu
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
kwargs (keyword arguments): the arguments for Convolution Layer
"""
def __init__(self, epsilon=1.001e-5, activation="relu", use_bias=False, **kwargs):
super().__init__(
resnet_config=[(64, 3), (128, 8), (256, 36), (512, 3)],
epsilon=epsilon,
activation=activation,
use_bias=use_bias,
**kwargs,
)
class ResNeXt(layers.Layer):
"""Customized Implementation of ResNeXt
Args:
resnet_config (list of tuples of 2 int): filters of the bottleneck layer in a block, blocks in the stacked blocks
epsilon: (float): Small float added to variance to avoid dividing by zero in
batch normalisation, default: 1.001e-5
activation (keras Activation): activation applied after batch normalization, default: relu
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
kwargs (keyword arguments): the arguments for Convolution Layer
"""
def __init__(
self,
resnet_config,
epsilon=1.001e-5,
activation="relu",
use_bias=False,
**kwargs,
):
super().__init__()
self.resnet_config = resnet_config
self.epsilon = epsilon
self.activation = activation
self.use_bias = use_bias
self.kwargs = kwargs
def __call__(self, inputs):
x = inputs
x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)))(x)
x = layers.Conv2D(64, 7, strides=2, use_bias=self.use_bias, **self.kwargs)(x)
x = layers.BatchNormalization(epsilon=self.epsilon)(x)
x = layers.Activation(self.activation)(x)
x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)))(x)
x = layers.MaxPooling2D(3, strides=2)(x)
for i, (filters, blocks) in enumerate(self.resnet_config):
x = ResNeXtBlock(
filters=filters,
epsilon=self.epsilon,
activation=self.activation,
use_bias=self.use_bias,
stride=2,
**self.kwargs,
)(x)
for _ in range(2, blocks + 1):
x = ResNeXtBlock(
filters=filters,
epsilon=self.epsilon,
activation=self.activation,
use_bias=self.use_bias,
conv_shortcut=False,
**self.kwargs,
)(x)
return x
class ResNeXt50(ResNeXt):
"""Customized Implementation of ResNeXt50
Args:
epsilon: (float): Small float added to variance to avoid dividing by zero in
batch normalisation, default: 1.001e-5
activation (keras Activation): activation applied after batch normalization, default: relu
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
kwargs (keyword arguments): the arguments for Convolution Layer
"""
def __init__(self, epsilon=1.001e-5, activation="relu", use_bias=False, **kwargs):
super().__init__(
resnet_config=[(64, 3), (128, 4), (256, 6), (512, 3)],
epsilon=epsilon,
activation=activation,
use_bias=use_bias,
**kwargs,
)
class ResNeXt101(ResNeXt):
"""Customized Implementation of ResNeXt101
Args:
epsilon: (float): Small float added to variance to avoid dividing by zero in
batch normalisation, default: 1.001e-5
activation (keras Activation): activation applied after batch normalization, default: relu
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
kwargs (keyword arguments): the arguments for Convolution Layer
"""
def __init__(self, epsilon=1.001e-5, activation="relu", use_bias=False, **kwargs):
super().__init__(
resnet_config=[(64, 3), (128, 4), (256, 23), (512, 3)],
epsilon=epsilon,
activation=activation,
use_bias=use_bias,
**kwargs,
)
class ResNeXt152(ResNeXt):
"""Customized Implementation of ResNeXt152
Args:
epsilon: (float): Small float added to variance to avoid dividing by zero in
batch normalisation, default: 1.001e-5
activation (keras Activation): activation applied after batch normalization, default: relu
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
kwargs (keyword arguments): the arguments for Convolution Layer
"""
def __init__(self, epsilon=1.001e-5, activation="relu", use_bias=False, **kwargs):
super().__init__(
resnet_config=[(64, 3), (128, | |
#!/usr/bin/python
"""
HBC by FinlayDaG33k under the MIT License
"""
import wx
import wx.grid
import wx.lib.agw.aui as aui
import Pyro4
import os
import threading
from encoder_cfg import pyro_host, pyro_port, ftp_host, ftp_port, ftp_user, ftp_pass
from ftplib import FTP
import textwrap
statusMapping = {
'Encoding':'a',
'Pending':'b',
'Finished':'c',
'Cancelled':'d',
'Error':'e',
}
class TaskTable(wx.grid.PyGridTableBase):
def __init__(self, data, rowLabels=None, colLabels=None):
wx.grid.PyGridTableBase.__init__(self)
self.data = data
#Mapping to keep track of which task occupies which row
self.rowMapping = {}
self.rowLabels = rowLabels
self.colLabels = colLabels
def sortData(self,data):
# Sorting goes Encoding Tasks > Pending Tasks > Finished > Cancelled > Error
# Tasks in the same state are then compared by the datetime they were added
# to the server.
data = sorted(data, key=lambda x: statusMapping[x[1]]+str(x[6]))
return data
def updateData(self,data,grid):
# Old number of rows
start = len(self.data)
# New number of rows
end = len(data)
# Names we actually hit this time -- used to delete old references later
updatedNames = []
# Currently selected tasks in the table
selectedNames = []
for row in grid.GetSelectedRows():
selectedNames.append(self.data[row][0])
data = self.sortData(data)
# Batch mode ensures all updates happen at once
grid.BeginBatch()
# Map each row from the new data object onto the current data object
# and set the corresponding values on the wx.Grid
for row, info in enumerate(data):
updatedNames.append(info[0])
if row < len(self.data):
# Nothing has changed, don't waste cycles remapping the same data
if data[row] == self.data[row]:
continue
self.rowMapping[info[0]] = row
# We're short some rows, append the data instead of mapping
if row >= len(self.data):
self.data.append(info)
else:
self.data[row] = info
# Map this row onto the grid
for i in xrange(0,6):
self.SetValue(row,i,info[i])
# Grid updates done, end the batch
grid.EndBatch()
# Delete unused references in the rowMapping dictionary
for name in self.rowMapping.keys():
if name not in updatedNames:
del self.rowMapping[name]
# Clear selection and reselect any previously selected
# tasks which still exist somewhere in the grid
grid.ClearSelection()
for name in selectedNames:
if name in self.rowMapping:
grid.SelectRow(self.rowMapping[name],True)
# We've got too many rows, inform the grid that it needs to delete some
if end < start:
for row in xrange(end,start):
del self.data[row]
msg = wx.grid.GridTableMessage(self,wx.grid.GRIDTABLE_NOTIFY_ROWS_DELETED,end,start-end)
grid.ProcessTableMessage(msg)
# We're short one or more rows, inform the grid it needs to add some
elif end > start:
msg = wx.grid.GridTableMessage(self,wx.grid.GRIDTABLE_NOTIFY_ROWS_APPENDED,end-start)
grid.ProcessTableMessage(msg)
def GetNumberRows(self):
return len(self.data)
def GetNumberCols(self):
return len(self.colLabels)
def GetColLabelValue(self,col):
if self.colLabels:
return self.colLabels[col]
def GetRowLabelValue(self,row):
if self.rowLabels:
return self.rowLabels[row]
return ''
def IsEmptyCell(self,row,col):
return False
def getRows(self,rows):
ret = []
for row in rows:
ret.append(self.data[row])
return ret
def GetValue(self,row,col):
if not self.data:
return ''
if col == 0:
self.rowMapping[self.data[row][col]]=row
return self.data[row][col]
def SetValue(self,row,col,value):
pass
class TaskGrid(wx.grid.Grid):
def __init__(self,parent,data,rowLabels=None,colLabels=None):
wx.grid.Grid.__init__(self,parent,-1)
self.rowLabels = rowLabels
self.colLabels = colLabels
# We don't give the table the data initially as all the logic
# for correctly sorting + mapping is in the setdata function
self.SetTable(TaskTable([],self.rowLabels,self.colLabels))
self.SetData(data)
# TODO -- Revisit these sizes, kind of ugly on Linux
self.SetColSize(0,300)
self.SetColSize(1,50)
self.SetColSize(2,150)
self.SetColSize(3,70)
self.SetColSize(4,150)
self.SetColSize(5,150)
# We have no row labels and they take up a good amount of space by default
self.SetRowLabelSize(0)
# You cannot edit, you lose, good day, sir.
self.EnableEditing(False)
# This will select whole rows at a time, which makes sense for our needs
# since each row is a task and tasks are the atomic unit
self.SetSelectionMode(1)
def getRows(self,rows):
return self.GetTable().getRows(rows)
def SetData(self,data):
self.GetTable().updateData(data,self)
# Grid will not reflect changes made until forcerefresh is called
self.ForceRefresh()
class taskViewDialog(wx.Dialog):
""" A quick dialog to that gives a detailed view of selected task(s)
This view provides extra data compared to that displayed directly
in the table:
- encoder
- large file support
- quality setting
- format
- Task added datetime
"""
def __init__(self,parent,rows,table):
self._parent = parent
self._table = table
self.rows = rows
# Nothing to show, just quit
if not rows or not table:
self.Close()
# Grab the selected rows from the table so we know what to display
rows = table.getRows(rows)
tasks = []
# Grab the global list of tasks from the central server
# TODO -- This is a little hackish, maybe the table could
# hold a mapping that contains the actual task objects
# that way we could avoid the extra call to the server
# which is already happening every few seconds anyway
tasksIn = self._parent.central.getTasks()
# Grab the task objects matching the selected rows
for row in rows:
if row[0] in tasksIn:
tasks.append(row[0])
# Got no tasks, they may have been removed, either way
# we've got nothing, close 'er up
if not tasks:
self.Close()
label = 'Detailed Task View'
wx.Dialog.__init__(self,parent,-1,label,wx.DefaultPosition,wx.Size(310,400))
mainBox = wx.BoxSizer(wx.VERTICAL)
# TODO -- Sizing is ugly in Linux
self.taskChooser = wx.ListBox(self,-1,style=wx.LB_HSCROLL|wx.LB_SINGLE,size=wx.Size(250,100))
# General Task Info
taskPanel = wx.Panel(self,-1)
taskSizer = wx.FlexGridSizer(rows=12,cols=2,hgap=10,vgap=5)
taskNameLabel = wx.StaticText(taskPanel,label='Name:')
self.taskName = wx.StaticText(taskPanel,label='')
taskOutNameLabel = wx.StaticText(taskPanel,label='Output Name:')
self.taskOutName = wx.StaticText(taskPanel,label='')
taskAddedLabel = wx.StaticText(taskPanel,label='Added:')
self.taskAdded = wx.StaticText(taskPanel,label='')
taskStatusLabel = wx.StaticText(taskPanel,label='Status:')
self.taskStatus = wx.StaticText(taskPanel,label='')
taskEncoderLabel = wx.StaticText(taskPanel,label='Assigned Encoder:')
self.taskEncoder = wx.StaticText(taskPanel,label='')
taskCompletedLabel = wx.StaticText(taskPanel,label='Completed:')
self.taskCompleted = wx.StaticText(taskPanel,label='')
taskStartedLabel = wx.StaticText(taskPanel,label='Started:')
self.taskStarted = wx.StaticText(taskPanel,label='')
taskFinishedLabel = wx.StaticText(taskPanel,label='Finished:')
self.taskFinished = wx.StaticText(taskPanel,label='')
### HB Settings
taskEncLabel = wx.StaticText(taskPanel,label='Encoder:')
self.taskEnc = wx.StaticText(taskPanel,label='')
taskFormatLabel = wx.StaticText(taskPanel,label='Format:')
self.taskFormat = wx.StaticText(taskPanel,label='')
taskLargeLabel = wx.StaticText(taskPanel,label='Large:')
self.taskLarge = wx.StaticText(taskPanel,label='')
taskQualityLabel = wx.StaticText(taskPanel,label='Quality:')
self.taskQuality = wx.StaticText(taskPanel,label='')
taskSizer.AddMany([taskNameLabel,self.taskName,taskOutNameLabel,self.taskOutName,
taskAddedLabel,self.taskAdded,
taskStatusLabel,self.taskStatus,taskEncoderLabel,self.taskEncoder,
taskCompletedLabel,self.taskCompleted,
taskStartedLabel,self.taskStarted,taskFinishedLabel,self.taskFinished,
taskEncLabel,self.taskEnc,taskFormatLabel,self.taskFormat,
taskLargeLabel,self.taskLarge,taskQualityLabel,self.taskQuality,
])
taskPanel.SetSizer(taskSizer)
self.tasksIn = tasksIn
close = wx.Button(self,-1,'Close')
mainBox.Add(self.taskChooser)
mainBox.Add(taskPanel)
mainBox.Add(close)
self.SetSizer(mainBox)
for task in tasks:
self.taskChooser.Insert(task,0)
self.tasks = tasks
self.changed()
self.taskChooser.SetSelection(0)
self.Bind(wx.EVT_BUTTON,self.close,close)
self.Bind(wx.EVT_LISTBOX,self.changed,self.taskChooser)
def changed(self,evt=None):
# Which task is selected?
id = self.tasks[self.taskChooser.GetSelection()]
task,encoder,status = self.tasksIn[id]
# Display task's info
self.taskName.SetLabel('\n'.join(textwrap.wrap(id,35)))
if task.getOutputName():
self.taskOutName.SetLabel('\n'.join(textwrap.wrap(task.getOutputName(),35)))
self.taskAdded.SetLabel(str(task.getAdded()))
self.taskStatus.SetLabel(status)
if encoder:
self.taskEncoder.SetLabel(encoder)
self.taskCompleted.SetLabel(str(task.getCompleted()))
if task.getStarted():
self.taskStarted.SetLabel(str(task.getStarted()))
if task.getFinished():
self.taskFinished.SetLabel(str(task.getFinished()))
self.taskEnc.SetLabel(task.getEncoder())
self.taskFormat.SetLabel(task.getFormat())
self.taskLarge.SetLabel(str(task.getLarge()))
self.taskQuality.SetLabel(task.getQuality())
def close(self,evt=None):
self.Close()
class addEncodeDialog(wx.Dialog):
""" A quick dialog for adding new encode tasks to the server
Allows you to add multiple videos at once and specify
a few HB encoding settings -- just a few for now
"""
def __init__(self,parent):
self._parent = parent
label = 'Add Encode'
self.vids = []
self.dir = None
wx.Dialog.__init__(self,parent,-1,label,wx.DefaultPosition,wx.Size(325,430))
buttonPanel = wx.Panel(self,-1,size=(-1,32))
filePanel = wx.Panel(self,-1)
fileBox = wx.BoxSizer(wx.VERTICAL)
self.vidBox = wx.ListBox(filePanel,-1,style=wx.LB_EXTENDED|wx.LB_HSCROLL,size=wx.Size(300,200))
fileButtonPanel = wx.Panel(filePanel,-1)
fileButtonBox = wx.BoxSizer(wx.HORIZONTAL)
addVids = wx.Button(fileButtonPanel,-1,'Browse')
clearVids = wx.Button(fileButtonPanel,-1,'Clear')
fileButtonBox.Add(addVids)
fileButtonBox.Add(clearVids)
fileButtonPanel.SetSizer(fileButtonBox)
fileBox.Add(self.vidBox)
fileBox.Add(fileButtonPanel)
filePanel.SetSizer(fileBox)
add = wx.Button(buttonPanel,-1,'Add')
cancel = wx.Button(buttonPanel,-1,'Cancel')
mainBox = wx.BoxSizer(wx.VERTICAL)
encodePanel = wx.Panel(self,-1)
formSizer = wx.FlexGridSizer(rows=4,cols=2,hgap=10,vgap=5)
encoderLabel = wx.StaticText(encodePanel,label='Encoder')
self.encoder = wx.ComboBox(encodePanel,-1,value='x264',choices=['x264','ffmpeg','theora'],style=wx.CB_READONLY|wx.CB_SORT|wx.CB_DROPDOWN)
formatLabel = wx.StaticText(encodePanel,label='Format')
self.format = wx.ComboBox(encodePanel,-1,value='mp4',choices=['mp4','mkv'])
largeLabel = wx.StaticText(encodePanel,label='Large file')
self.large = wx.CheckBox(encodePanel,-1,'Large Files')
qualityLabel = wx.StaticText(encodePanel,label='Quality')
choices = [str(x) for x in xrange(0,52)]
self.quality = wx.ComboBox(encodePanel,-1,value='20',choices=choices,style=wx.CB_READONLY|wx.CB_SORT|wx.CB_DROPDOWN)
formSizer.AddMany([encoderLabel,self.encoder,formatLabel,
self.format,largeLabel,self.large,qualityLabel,self.quality])
encodePanel.SetSizer(formSizer)
buttonBox = wx.BoxSizer(wx.HORIZONTAL)
buttonBox.Add(add,0,wx.BOTTOM|wx.RIGHT|wx.TOP,5)
buttonBox.Add(cancel,0,wx.BOTTOM|wx.RIGHT|wx.TOP,5)
buttonPanel.SetSizer(buttonBox)
fileGroup = wx.StaticBox(self,-1,'Files')
fileGroupSizer = wx.StaticBoxSizer(fileGroup,wx.VERTICAL)
fileGroupSizer.Add(filePanel)
mainBox.Add(fileGroupSizer,flag=wx.EXPAND)
encodeGroup = wx.StaticBox(self,-1,'Encode Options')
encodeGroupSizer = wx.StaticBoxSizer(encodeGroup,wx.VERTICAL)
encodeGroupSizer.Add(encodePanel)
mainBox.Add(encodeGroupSizer,flag=wx.EXPAND)
mainBox.Add(buttonPanel)
self.SetSizer(mainBox)
self.Bind(wx.EVT_BUTTON,self.close,cancel)
self.Bind(wx.EVT_BUTTON,self.add,add)
self.Bind(wx.EVT_BUTTON,self.addVid,addVids)
self.Bind(wx.EVT_BUTTON,self.clearVid,clearVids)
def add(self,event):
encoder = self.encoder.GetValue()
format = self.format.GetValue()
large = self.large.IsChecked()
quality = self.quality.GetValue()
files = self.vids
dir = self.dir
self.Close()
if self.vids and self.dir:
self._parent.addVideos(encoder,format,large,quality,files,dir)
def addVid(self,event):
# TODO -- This currently clears out any videos already selected, it would probably
# make more sense for this to just append -- the clear button should handle clearing
# TODO -- Get a more extensive list of support videos based on HB support and add it to one
# file that's just called Video Files
diag = wx.FileDialog(self,'Select video(s) to add',style=wx.FD_OPEN|wx.FD_MULTIPLE,
wildcard="Videos files(*.flv)|*.flv|AVI files(*.avi)|*.avi|MKV files(*.mkv)|*.mkv")
if diag.ShowModal() == wx.ID_OK:
self.vids = diag.GetFilenames()
self.dir = diag.GetDirectory()
self.changeList()
def close(self,event=None):
self.Close()
def clearVid(self,event):
self.vids=[]
self.dir = None
self.changeList()
def changeList(self,event=None):
""" populates the listbox with the current list of added video files """
self.vidBox.Clear()
for item in self.vids:
self.vidBox.Insert(item,0,None)
class EncoderFrame(wx.Frame):
""" The Main UI element
"""
def __init__(self,parent,ID,title,position,size):
wx.Frame.__init__(self,parent,ID,title,position,size)
self.mgr = aui.AuiManager(self)
taskPanel = wx.Panel(self,-1,size=(350,300))
taskBox = wx.BoxSizer(wx.VERTICAL)
# Check in with the central dispatch (should be defined in encoder_cfg.py)
# if you aren't running a server, terrible things will happen here
self.central = Pyro4.Proxy('PYRONAME:central.encoding@{0}:{1}'.format(pyro_host,pyro_port))
self.ids = {
'taskList': wx.NewId(),
'addFolder': wx.NewId(),
'addVideo': wx.NewId(),
'taskTimer': | |
<gh_stars>1-10
from __future__ import print_function
from collections import defaultdict, OrderedDict, namedtuple
import networkx as nx
import igraph as ig
import numpy as np
import pandas as pd
from scipy.sparse import coo_matrix,csc_matrix
from scipy.stats import hypergeom
import time
import warnings
from .network import Network, NxNetwork, IgNetwork
from .tables import Genes, Snps
from .propagation import random_walk_rst, get_common_indices, heat_diffusion
from .utils import get_neighbors, binarize, neg_log_val, calculate_alpha
class Nbgwas(object):
"""Interface to Network Boosted GWAS
Parameters
----------
snp_level_summary : pd.DataFrame
A DataFrame object that holds the snp level summary or a file that
points to a text file
gene_level_summary : pd.DataFrame
A DataFrame object that holds the gene level summary or a file that
points to a text file
network : networkx object
The network to propagate the p-value over.
protein_coding_table : str or pd.DataFrame
A DataFrame object that defines the start and end position and
chromosome number for each coding gene. This mapping will be used for
the snp to gene assignment
Note
----
Please be aware the interface is very unstable and will be changed.
TODO
----
- Standardize SNP and gene level input and protein coding region (file
format)
- Document what columns are needed for each of the dataframe
- Factor out the numpy to pandas code after all diffusion functions
- Missing utility functions (Manhanttan plots)
- Include logging
"""
def __init__(
self,
snp_level_summary=None,
gene_level_summary=None,
network = None,
protein_coding_table=None,
snp_chrom_col='hg18chr',
bp_col='bp',
snp_pval_col='pval',
gene_pval_col='TopSNP P-Value',
gene_col='Gene',
pc_chrom_col='Chrom',
start_col='Start',
end_col='End',
node_name="name",
validate = True,
verbose=True
):
self.verbose = verbose
self.validate = validate
self.genes = Genes(
gene_level_summary,
pval_col=gene_pval_col,
name_col=gene_col,
)
self.snps = Snps(
snp_level_summary,
protein_coding_table,
snp_chrom_col=snp_chrom_col,
snp_bp_col=bp_col,
pval_col=snp_pval_col,
pc_chrom_col=pc_chrom_col,
start_col=start_col,
end_col=end_col
)
self._node_name = node_name # The attribute contains the gene name
# on the network
self.network = network
def __repr__(self):
contains = []
if self.genes.table is not None:
contains.append('"genes table"')
if self.snps.snp_table is not None:
contains.append('"SNP table"')
if self.network.network is not None:
contains.append('"network"')
if not contains:
contains = 'Nothing'
else:
contains = ', '.join(contains)
return f'{self.__class__.__name__} object containing {contains}'
@property
def network(self):
"""networkx Graph object : Network object used for graph diffusion
node_names attribute is automatically created if the network is a
networkx object. If a node has a `self.node_name` attribute, that name
is used for node_names. Otherwise, the node id itself is used as the
name.
"""
if self._network is None:
return None
return self._network
@network.setter
def network(self, network):
if network is None:
self._network = NxNetwork(None, node_name=self._node_name)
elif isinstance(network, nx.Graph):
self._network = NxNetwork(network, node_name=self._node_name)
elif isinstance(network, ig.Graph):
self._network = IgNetwork(network, node_name=self._node_name)
elif isinstance(network, Network):
self._network = network
else:
raise ValueError("Graph type is not understood. Must be a networkx object or an igraph object")
#TODO: Need to change were self.graphs point to (to Network maybe?)
if not hasattr(self, "graphs"):
self.graphs = {'full_network': network}
def map_snps_to_genes(
self,
window_size=0,
agg_method='min',
):
"""Maps SNP p-values to genes
This is a convenience function for the functionality within the `Snps`
object. The output is forced to be a `Genes` object and is automatically
assigned to Nbgwas.genes.
See `Snps.assign_snps_to_genes` for documentation.
"""
self.genes = self.snps.assign_snps_to_genes(
window_size=window_size,
agg_method=agg_method,
to_Gene=True
)
return self
def map_to_node_table(self, columns=None, update_node_attributes=False, fillna=0):
"""Maps information from gene table to network
Parameter
---------
columns : str or list of str or None
If None, all columns will be added
"""
if columns is None:
columns = list(self.genes.table.columns)
if isinstance(columns, str):
columns = [columns]
# Remove extra column merge seems to include
remove=False
if self.genes.name_col not in self.network.node_table.columns:
remove=True
self.network.node_table = self.network.node_table.merge(
self.genes.table[[self.genes.name_col] + columns],
left_on = self.network.node_name,
right_on = self.genes.name_col,
how='left'
)
if remove and self.genes.name_col in self.network.node_table.columns:
self.network.node_table.drop(columns=self.genes.name_col, inplace=True)
self.network.node_table.fillna(fillna, inplace=True)
if update_node_attributes:
self.network.set_node_attributes(tmp.to_dict(), namespace='nodenames')
self.network.refresh_node_table()
return self
def map_to_gene_table(self, columns=None, fill_value=0):
"""Maps columns from node_table to gene table"""
def tmp_func(x):
if pd.notnull(x[0]):
return x[0]
elif pd.notnull(x[1]):
return x[1]
#else:
# raise ValueError("Cannot both be Null")
if isinstance(columns, str):
columns = [columns]
elif columns is None:
columns = self.network.node_table.columns
# Remove extra column merge seems to include
remove=False
if self.network.node_name not in self.genes.table.columns:
remove=True
self.genes.table = self.genes.table.merge(
self.network.node_table[[self.network.node_name] + columns],
left_on=self.genes.name_col,
right_on=self.network.node_name,
how='outer'
)
self.genes.table[self.genes.name_col] = self.genes.table[
[self.genes.name_col, self.network.node_name]
].agg(tmp_func, axis=1)
if remove and self.network.node_name in self.genes.table.columns:
self.genes.table.drop(columns=self.network.node_name, inplace=True)
return self
def diffuse(
self,
method="random_walk",
node_attribute="Heat",
result_name="Diffused Heat",
update_node_attributes=False,
**kwargs
):
"""Wrapper for the various diffusion methods available
Calls one of the three diffusion methods and add the results to the
heat attribute.
Parameters
----------
method : str
Must be one of the following: `random_walk`,
`random_walk_with_kernel`, `heat_diffusion`. Each method calls the
corresponding method.
name : str
Column name of the result
replace : bool
If replace is True, any previous results are overwritten. If False,
the current data will be added to the previous dataframe.
kwargs
Any additional keyword arguments for each of the diffusion function.
See the individual function documentation.
TODO
----
* Factor out various setup and tear-down code
"""
allowed = ["random_walk", "random_walk_with_kernel", "heat_diffusion"]
if method not in allowed:
raise ValueError(
"method must be one of the following: %s" % allowed
)
if self.network is None:
raise RuntimeError("Network was given!")
if method == "random_walk":
df = self.random_walk(
node_attribute=node_attribute,
**kwargs
)
elif method == "random_walk_with_kernel":
df = self.random_walk_with_kernel(
node_attribute=node_attribute,
**kwargs
)
elif method == "heat_diffusion":
df = self.heat_diffusion(
node_attribute=node_attribute,
**kwargs
)
else:
raise RuntimeError("Unexpected method name!")
sorted_idx = self.network.node_table.index.sort_values()
self.network.node_table.loc[sorted_idx, result_name] = df
self.network.node_table.sort_values(
by=result_name,
ascending=False,
inplace=True
)
if update_node_attributes:
self.network.refresh_node_attributes()
return self
def random_walk(
self,
node_attribute='Heat',
alpha='optimal',
normalize=True,
axis=1,
):
"""Runs random walk iteratively
Parameters
----------
alpha : float
The restart probability
normalize : bool
If true, the adjacency matrix will be row or column normalized
according to axis
axis : int
0 row normalize, 1 col normalize. (The integers are different
than convention because the axis is the axis that is summed
over)
TODO
----
* Allow for diffusing multiple heat columns
"""
if not isinstance(node_attribute, list):
node_attribute = [node_attribute]
if isinstance(alpha, str):
alpha = calculate_alpha(len(self.network.edges()))
sorted_idx = self.network.node_table.index.sort_values()
F0 = self.network.node_table.loc[sorted_idx, node_attribute].values.T
A = self.network.adjacency_matrix
out = random_walk_rst(F0, A, alpha, normalize=normalize, axis=axis)
return np.array(out.todense()).ravel()
def heat_diffusion(self, node_attribute="Heat", t=0.1):
"""Runs heat diffusion without a pre-computed kernel
Parameters
----------
heat: str
Indicate which column to use as starting heat for diffusion.
t : float
Total time of diffusion. t controls the amount of signal is allowed
to diffuse over the network.
"""
if not isinstance(node_attribute, list):
node_attribute = [node_attribute]
sorted_idx = self.network.node_table.index.sort_values()
out_vector = heat_diffusion(
self.network.laplacian_matrix,
self.network.node_table.loc[sorted_idx, node_attribute].values.ravel(),
start=0,
end=t
)
return out_vector
def hypergeom(
self,
gold,
column,
table='gene',
top=100,
ngenes=20000,
ascending=False
):
"""Run hypergemoetric test
Parameters
----------
gold : list
An iterable of genes
top : int
The number of ranked genes to select.
ngenes : int
The number of genes to be considered as the global background.
rank_col : str
The name of the heat column to be determined for significance.
If the rank_col is None, the p-value is used.
"""
if table == 'gene':
table_df = self.genes.table
name_col = self.genes.name_col
elif table == 'network':
table_df = self.network.node_table
name_col = self.network.node_name
sorted_genes = table_df.sort_values(by=column, ascending=ascending)
sorted_genes = sorted_genes[name_col].values
genes = sorted_genes[:top]
intersect = set(genes).intersection(set(gold))
score = len(intersect)
M, n, N = ngenes, len(gold), top
pvalue = 1 - hypergeom.cdf(score, M, n, N)
Hypergeom = namedtuple('Hypergeom',
['pvalue', 'n_intersect', 'common_items']
)
return Hypergeom(pvalue, score, intersect)
def check_significance(
self,
gold,
column,
table='gene',
top=100,
threshold=0.05,
ascending=False
):
"""Check if the top N genes are significant
Parameters
----------
gold : dict
A gene to p-value dictionary. If a gene cannot be found in the
dictionary, the default value is 1.
top | |
define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_conversations_messages_inbound_open(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param OpenNormalizedMessage body: NormalizedMessage (required)
:return: OpenNormalizedMessage
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_conversations_messages_inbound_open" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_conversations_messages_inbound_open`")
resource_path = '/api/v2/conversations/messages/inbound/open'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='OpenNormalizedMessage',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def post_conversations_messaging_integrations_facebook(self, body, **kwargs):
"""
Create a Facebook Integration
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_conversations_messaging_integrations_facebook(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param FacebookIntegrationRequest body: FacebookIntegrationRequest (required)
:return: FacebookIntegration
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_conversations_messaging_integrations_facebook" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_conversations_messaging_integrations_facebook`")
resource_path = '/api/v2/conversations/messaging/integrations/facebook'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FacebookIntegration',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def post_conversations_messaging_integrations_line(self, body, **kwargs):
"""
Create a LINE messenger Integration
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_conversations_messaging_integrations_line(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param LineIntegrationRequest body: LineIntegrationRequest (required)
:return: LineIntegration
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_conversations_messaging_integrations_line" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_conversations_messaging_integrations_line`")
resource_path = '/api/v2/conversations/messaging/integrations/line'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LineIntegration',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def post_conversations_messaging_integrations_open(self, body, **kwargs):
"""
Create an Open messaging integration
See https://developer.genesys.cloud/api/digital/openmessaging/ for more information.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_conversations_messaging_integrations_open(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param OpenIntegrationRequest body: OpenIntegrationRequest (required)
:return: OpenIntegration
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_conversations_messaging_integrations_open" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_conversations_messaging_integrations_open`")
resource_path = '/api/v2/conversations/messaging/integrations/open'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='OpenIntegration',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def post_conversations_messaging_integrations_twitter(self, body, **kwargs):
"""
Create a Twitter Integration
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_conversations_messaging_integrations_twitter(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param TwitterIntegrationRequest body: TwitterIntegrationRequest (required)
:return: TwitterIntegration
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_conversations_messaging_integrations_twitter" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_conversations_messaging_integrations_twitter`")
resource_path = '/api/v2/conversations/messaging/integrations/twitter'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TwitterIntegration',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def post_conversations_messaging_integrations_whatsapp(self, body, **kwargs):
"""
Create a WhatsApp Integration
You must be approved by WhatsApp to use this feature. Your approved e164-formatted phone number and valid WhatsApp certificate for your number are required. Your WhatsApp certificate must have valid base64 encoding. Please paste carefully and do not add any leading or trailing spaces. Do not alter any characters. An integration must be activated within 7 days of certificate generation. If you cannot complete the addition and activation of the number within 7 days, please obtain a new certificate before creating the integration. Integrations created with an invalid number or certificate may immediately incur additional integration fees. Please carefully enter your number and certificate as described.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_conversations_messaging_integrations_whatsapp(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param WhatsAppIntegrationRequest body: WhatsAppIntegrationRequest (required)
:return: WhatsAppIntegration
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_conversations_messaging_integrations_whatsapp" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_conversations_messaging_integrations_whatsapp`")
resource_path | |
from __future__ import annotations
import socket
import logging
import re
import threading
import pyaudio
import array
from typing import List, Optional, Tuple, Union
logging.basicConfig(filename='audiostream_logs.log', level=logging.INFO)
_MSG_PREFIX = b'PRE'
_MSG_SUFIX = b'SUF'
def _compose_message(message: bytes) -> bytes:
""" Composes a message that is ready to be sent over the socket or be joined with other messages.
Should not be called directly.
:param message: message to be sent over the socket
:return: a message that is ready to be sent over the socket or be joined with other messages.
"""
return _MSG_PREFIX + message + _MSG_SUFIX
def _decompose_message(message: bytes) -> List[bytes]:
""" Reads the raw socket message and decomposes it into a list of understandable messages or audio frames.
Should not be called directly.
:param message: raw socket message
:return: list of messages or audio frames
"""
pattern = _MSG_PREFIX + b'(.*?)' + _MSG_SUFIX
pat = re.compile(pattern)
return pat.findall(message)
class AudioProperties:
""" Class representing the audio properties of an audio file, it requires sampling, channels and length or
the number of frames available in the audio file as one can be derived from the other.
The class offers a cast to string using __str__ which will provide a representation that is useful purely for logging purposes.
It also offers a conversion to a bytes message :func:`~audiostream.AudioProperties.to_bytes_message` which is used when transfering the class over the sockets.
Apart from standard initialization the class can also be instantiated using the static :func:`~audiostream.AudioProperties.from_bytes_message`
method which will return an instance of AudioProperties class created using a socket message made by :func:`~audiostream.AudioProperties.to_bytes_message` method.
:param sampling: sampling rate of the associated audio file
:param channels: number of channels used by the associated audio file
:param length: length of the audio file in seconds
:param frames_in_audio: number of frames making up the audio file
"""
_SAMPLING_RATE = b'SAMPLING_RATE_'
_CHANNELS = b'CHANNELS_'
_LENGTH = b'LENGTH_'
_FRAMES_IN_AUDIO = b'FRAMES_IN_AUDIO_'
def __init__(self, sampling, channels, length=None, frames_in_audio=None):
self.sampling = sampling
self.channels = channels
if not length and not frames_in_audio:
raise AssertionError("Length or frames_in_audio must be provided")
if length and frames_in_audio:
self.length = length
self.frames_in_audio = frames_in_audio
elif length:
self.length = length
self.frames_in_audio = self.sampling * self.length
elif frames_in_audio:
self.frames_in_audio = frames_in_audio
self.length = self.frames_in_audio / self.sampling
def __str__(self):
return f"\nNumber of frames: {self.frames_in_audio}" \
f"\nSampling rate: {self.sampling}" \
f"\nLength in seconds: {self.length}" \
f"\nNumber of channels: {self.channels}"
def to_bytes_message(self) -> List[bytes]:
""" Converts the instance of the class into a list of byte messages which can be sent over the socket"""
return [self._SAMPLING_RATE + b'%d' % int(self.sampling),
self._CHANNELS + b'%d' % int(self.channels),
self._LENGTH + b'%d' % int(self.length),
self._FRAMES_IN_AUDIO + b'%d' % int(self.frames_in_audio)]
@staticmethod
def from_bytes_message(byte_messages: List[bytes]) -> AudioProperties:
""" Creates and returns an instance of AudioProperties class out of a bytes message created using
:func:`~audiostream.AudioStreamClient.to_bytes_message` method
:param byte_messages: list of bytes containing the data about sample rate, channels
and length or frames in audio of an audio file.
:type byte_messages: List[bytes]
"""
sampling_rate = None
channels = None
length = None
frames_in_audio = None
for byte_message in byte_messages:
if AudioProperties._SAMPLING_RATE in byte_message:
sampling_rate = int(byte_message[len(AudioProperties._SAMPLING_RATE):].decode('utf-8'))
if AudioProperties._CHANNELS in byte_message:
channels = int(byte_message[len(AudioProperties._CHANNELS):].decode('utf-8'))
if AudioProperties._LENGTH in byte_message:
length = int(byte_message[len(AudioProperties._LENGTH):].decode('utf-8'))
if AudioProperties._FRAMES_IN_AUDIO in byte_message:
frames_in_audio = int(byte_message[len(AudioProperties._FRAMES_IN_AUDIO):].decode('utf-8'))
return AudioProperties(sampling=sampling_rate, channels=channels, length=length, frames_in_audio=frames_in_audio)
class MessageType:
""" Class determining possible types of messages sent from client to server and generic answers
the server might return apart from audio data
"""
GIVE = b'GIVE_'
STREAM = b'STREAM_'
ENDOFMESSAGE = b'END OF SOCKET MESSAGE'
ENDOFAUDIOFILE = b'END OF AUDIO FILE'
class MessageCommand:
""" Class determining possible extensions of basic message types """
AUDIO_PROPERTIES = b'AUDIO_PROPERTIES_'
AUDIOFILESLIST = b'AUDIO_FILES_LIST'
class AudioStreamClient:
""" This is client class used for communicating with the server.
It provides the means for requesting audio files list, retrieving an audio files properties and
requesting a stream of an audio file. It contains a basic implementation of an audio output allowing for playing the
streamed data using the pyaudio library in the form of :func:`~audiostream.AudioStreamClient.initialize_audio_playback` and
:func:`~audiostream.AudioStreamClient.play_streamed_data` methods which can be overriden.
"""
def __init__(self):
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
""" Needed to save them for auto reconnection """
self.__host = None
self.__port = None
self._stream_message_size = 0
self._py_audio = None
self._stream = None
def initialize_audio_playback(self, audio_properties: AudioProperties) -> None:
""" Method for initializing the audio playback based on provided audio properties,
should be overridden along with :func:`~audiostream.AudioStreamClient.play_streamed_data`
if custom audio playback is created.
:param audio_properties: instance of audio properties used for initializing audio playback
:type audio_properties: AudioProperties
"""
self._py_audio = pyaudio.PyAudio()
self._stream = self._py_audio.open(format=pyaudio.paInt32,
channels=audio_properties.channels,
rate=audio_properties.sampling,
output=True)
def play_streamed_data(self, audio_frames: List[bytes]) -> None:
""" Method for playing the audio based on provided audio frame,
should be overridden along with :func:`~audiostream.AudioStreamClient.play_streamed_data`
if custom audio playback is created.
:param audio_frames: a list of audio frames each containing one discrete time step in the audio file.
:type audio_frames: list
"""
intel = array.array('i')
for audio_frame in audio_frames:
intel.append(int(audio_frame))
self._stream.write(intel.tobytes())
def connect(self, host: str, port: int) -> None:
""" Attempts connection to a server at a given host and port
:param host: ip address of the host, should be a string in the format of '127.0.0.1'
:type host: str
:param port: port over which the connection should be made
:type port: int
"""
logging.info(f'Connecting to {host}:{port}')
try:
self._socket.connect((host, port))
except ConnectionRefusedError:
logging.error(f"Couldn't connect to server, make sure it's running before connecting")
raise
self.__host = host
self.__port = port
def close(self) -> None:
""" Closes the socket, no need to notify server of the event.
"""
self._socket.close()
def _read_message(self, data_size: int = 1024) -> List[bytes]:
""" Retrieves the specified number of bytes from the socket and returns a list of decomposed messages.
Should not be called directly.
:param data_size: number of bytes read from socket
:return: list of decomposed messages from the read socket bytes
"""
message = self._socket.recv(data_size)
return _decompose_message(message)
def _send_message(self, message_type: bytes, message_command: Optional[bytes] = None, audio_file: Optional[str] = None) -> None:
""" Sends a :class:`audiostream.MessageCommand` of the specified :class:`audiostream.MessageType` or
associated with specified audio file to the server. Should not be called directly.
:param message_type: attribute from the :class:`audiostream.MessageType` class
:param message_command: optional attribute from the :class:`audiostream.MessageCommand` class
:param audio_file: optional name of the audio file associated with the request
"""
if not message_command and not audio_file:
raise AssertionError("Can't send a message using only its type")
message = message_type
if message_command:
message += message_command
if audio_file:
if type(audio_file) is not bytes:
audio_file = audio_file.encode('utf-8')
message += audio_file
message = _compose_message(message)
logging.info(f"Sending message to server {message}")
self._socket.send(message)
def _request_feature(self, command: bytes, audio_file: Optional[str] = None, size: int = 1024) -> List[bytes]:
""" Requests a feature from the client specified by the :class:`audiostream.MessageCommand` class and
an optional associated audio file, after sending a request for the feature
it awaits until server sends the response. Should not be called directly.
:param command: attribute from the :class:`audiostream.MessageCommand` class
:param audio_file: optional name of the audio file associated with the request
:param size: expected size of the response message
:return: decomposed server response
"""
self._send_message(MessageType.GIVE, message_command=command, audio_file=audio_file)
return self._await_messages(size)
def _await_messages(self, size: int = 1024) -> List[bytes]:
""" Awaits incoming message from the server and returns a decomposed list of actual messages
contained in the socket message. Should not be called directly.
:param size: size of the incoming message
:return: list of decomposed messages
"""
self._socket.setblocking(True)
messages_retrieved = self._read_message(size)
self._socket.setblocking(False)
return messages_retrieved
def retrieve_audio_files_list(self) -> List[str]:
""" Asks the server for list of audio files available for streaming
:return: list of audio files
"""
logging.info("Requesting audio files from server")
self._send_message(MessageType.GIVE, message_command=MessageCommand.AUDIOFILESLIST)
audio_file_list = []
while True:
try:
unpacked = self._read_message()
except socket.error:
continue
for unpacked_message in unpacked:
if MessageType.ENDOFMESSAGE in unpacked_message:
logging.info(f"Retrieved audio files {audio_file_list}")
return audio_file_list
audio_file_list.append(unpacked_message.decode('utf-8'))
def retrieve_audio_file_properties(self, audio_file: str) -> AudioProperties:
""" Asks the server for properties of provided audio file
:param audio_file: the audio file for which we want to retrieve properties
:type audio_file: str
:return: audio properties of the requested audio file
"""
| |
"phoneNumber"
PHONE_NUMBER_DESC = "phoneNumber desc"
SHIPMENT_METHOD_ID = "shipmentMethodId"
SHIPMENT_METHOD_ID_DESC = "shipmentMethodId desc"
TAX_AREA_DISPLAY_NAME = "taxAreaDisplayName"
TAX_AREA_DISPLAY_NAME_DESC = "taxAreaDisplayName desc"
TAX_AREA_ID = "taxAreaId"
TAX_AREA_ID_DESC = "taxAreaId desc"
TAX_LIABLE = "taxLiable"
TAX_LIABLE_DESC = "taxLiable desc"
TAX_REGISTRATION_NUMBER = "taxRegistrationNumber"
TAX_REGISTRATION_NUMBER_DESC = "taxRegistrationNumber desc"
TYPE = "type"
TYPE_DESC = "type desc"
WEBSITE = "website"
WEBSITE_DESC = "website desc"
class Enum59(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ADDRESS = "address"
BLOCKED = "blocked"
CURRENCY_CODE = "currencyCode"
CURRENCY_ID = "currencyId"
DISPLAY_NAME = "displayName"
EMAIL = "email"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NUMBER = "number"
PAYMENT_METHOD_ID = "paymentMethodId"
PAYMENT_TERMS_ID = "paymentTermsId"
PHONE_NUMBER = "phoneNumber"
SHIPMENT_METHOD_ID = "shipmentMethodId"
TAX_AREA_DISPLAY_NAME = "taxAreaDisplayName"
TAX_AREA_ID = "taxAreaId"
TAX_LIABLE = "taxLiable"
TAX_REGISTRATION_NUMBER = "taxRegistrationNumber"
TYPE = "type"
WEBSITE = "website"
CURRENCY = "currency"
PAYMENT_METHOD = "paymentMethod"
PAYMENT_TERM = "paymentTerm"
PICTURE = "picture"
SHIPMENT_METHOD = "shipmentMethod"
class Enum6(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
BLOCKED = "blocked"
BLOCKED_DESC = "blocked desc"
CATEGORY = "category"
CATEGORY_DESC = "category desc"
DISPLAY_NAME = "displayName"
DISPLAY_NAME_DESC = "displayName desc"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc"
NUMBER = "number"
NUMBER_DESC = "number desc"
SUB_CATEGORY = "subCategory"
SUB_CATEGORY_DESC = "subCategory desc"
class Enum60(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
CURRENCY = "currency"
PAYMENT_METHOD = "paymentMethod"
PAYMENT_TERM = "paymentTerm"
PICTURE = "picture"
SHIPMENT_METHOD = "shipmentMethod"
class Enum61(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ADDRESS = "address"
BLOCKED = "blocked"
CURRENCY_CODE = "currencyCode"
CURRENCY_ID = "currencyId"
DISPLAY_NAME = "displayName"
EMAIL = "email"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NUMBER = "number"
PAYMENT_METHOD_ID = "paymentMethodId"
PAYMENT_TERMS_ID = "paymentTermsId"
PHONE_NUMBER = "phoneNumber"
SHIPMENT_METHOD_ID = "shipmentMethodId"
TAX_AREA_DISPLAY_NAME = "taxAreaDisplayName"
TAX_AREA_ID = "taxAreaId"
TAX_LIABLE = "taxLiable"
TAX_REGISTRATION_NUMBER = "taxRegistrationNumber"
TYPE = "type"
WEBSITE = "website"
CURRENCY = "currency"
PAYMENT_METHOD = "paymentMethod"
PAYMENT_TERM = "paymentTerm"
PICTURE = "picture"
SHIPMENT_METHOD = "shipmentMethod"
class Enum62(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
CURRENCY = "currency"
PAYMENT_METHOD = "paymentMethod"
PAYMENT_TERM = "paymentTerm"
PICTURE = "picture"
SHIPMENT_METHOD = "shipmentMethod"
class Enum63(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
AMOUNT_DECIMAL_PLACES = "amountDecimalPlaces"
AMOUNT_ROUNDING_PRECISION = "amountRoundingPrecision"
CODE = "code"
DISPLAY_NAME = "displayName"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
SYMBOL = "symbol"
class Enum64(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CODE = "code"
DISPLAY_NAME = "displayName"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
class Enum65(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CALCULATE_DISCOUNT_ON_CREDIT_MEMOS = "calculateDiscountOnCreditMemos"
CODE = "code"
DISCOUNT_DATE_CALCULATION = "discountDateCalculation"
DISCOUNT_PERCENT = "discountPercent"
DISPLAY_NAME = "displayName"
DUE_DATE_CALCULATION = "dueDateCalculation"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
class Enum66(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
CONTENT = "content"
CONTENT_DESC = "content desc"
CONTENT_TYPE = "contentType"
CONTENT_TYPE_DESC = "contentType desc"
HEIGHT = "height"
HEIGHT_DESC = "height desc"
WIDTH = "width"
WIDTH_DESC = "width desc"
class Enum67(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CONTENT = "content"
CONTENT_TYPE = "contentType"
HEIGHT = "height"
WIDTH = "width"
class Enum68(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CONTENT = "content"
CONTENT_TYPE = "contentType"
HEIGHT = "height"
WIDTH = "width"
class Enum69(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CODE = "code"
DISPLAY_NAME = "displayName"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
class Enum7(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
BLOCKED = "blocked"
CATEGORY = "category"
DISPLAY_NAME = "displayName"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NUMBER = "number"
SUB_CATEGORY = "subCategory"
class Enum70(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
CODE = "code"
CODE_DESC = "code desc"
DISPLAY_NAME = "displayName"
DISPLAY_NAME_DESC = "displayName desc"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc"
class Enum71(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CODE = "code"
DISPLAY_NAME = "displayName"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
DIMENSION_VALUES = "dimensionValues"
class Enum72(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
DIMENSION_VALUES = "dimensionValues"
class Enum73(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CODE = "code"
DISPLAY_NAME = "displayName"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
DIMENSION_VALUES = "dimensionValues"
class Enum74(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
DIMENSION_VALUES = "dimensionValues"
class Enum75(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
CODE = "code"
CODE_DESC = "code desc"
DISPLAY_NAME = "displayName"
DISPLAY_NAME_DESC = "displayName desc"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc"
class Enum76(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CODE = "code"
DISPLAY_NAME = "displayName"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
class Enum77(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CODE = "code"
DISPLAY_NAME = "displayName"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
class Enum78(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
CODE = "code"
CODE_DESC = "code desc"
DISPLAY_NAME = "displayName"
DISPLAY_NAME_DESC = "displayName desc"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc"
class Enum79(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CODE = "code"
DISPLAY_NAME = "displayName"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
class Enum8(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
BLOCKED = "blocked"
CATEGORY = "category"
DISPLAY_NAME = "displayName"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NUMBER = "number"
SUB_CATEGORY = "subCategory"
class Enum80(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CODE = "code"
DISPLAY_NAME = "displayName"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
class Enum81(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
ADDRESS = "address"
ADDRESS_DESC = "address desc"
BIRTH_DATE = "birthDate"
BIRTH_DATE_DESC = "birthDate desc"
DISPLAY_NAME = "displayName"
DISPLAY_NAME_DESC = "displayName desc"
EMAIL = "email"
EMAIL_DESC = "email desc"
EMPLOYMENT_DATE = "employmentDate"
EMPLOYMENT_DATE_DESC = "employmentDate desc"
GIVEN_NAME = "givenName"
GIVEN_NAME_DESC = "givenName desc"
JOB_TITLE = "jobTitle"
JOB_TITLE_DESC = "jobTitle desc"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc"
MIDDLE_NAME = "middleName"
MIDDLE_NAME_DESC = "middleName desc"
MOBILE_PHONE = "mobilePhone"
MOBILE_PHONE_DESC = "mobilePhone desc"
NUMBER = "number"
NUMBER_DESC = "number desc"
PERSONAL_EMAIL = "personalEmail"
PERSONAL_EMAIL_DESC = "personalEmail desc"
PHONE_NUMBER = "phoneNumber"
PHONE_NUMBER_DESC = "phoneNumber desc"
STATISTICS_GROUP_CODE = "statisticsGroupCode"
STATISTICS_GROUP_CODE_DESC = "statisticsGroupCode desc"
STATUS = "status"
STATUS_DESC = "status desc"
SURNAME = "surname"
SURNAME_DESC = "surname desc"
TERMINATION_DATE = "terminationDate"
TERMINATION_DATE_DESC = "terminationDate desc"
class Enum82(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ADDRESS = "address"
BIRTH_DATE = "birthDate"
DISPLAY_NAME = "displayName"
EMAIL = "email"
EMPLOYMENT_DATE = "employmentDate"
GIVEN_NAME = "givenName"
JOB_TITLE = "jobTitle"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
MIDDLE_NAME = "middleName"
MOBILE_PHONE = "mobilePhone"
NUMBER = "number"
PERSONAL_EMAIL = "personalEmail"
PHONE_NUMBER = "phoneNumber"
STATISTICS_GROUP_CODE = "statisticsGroupCode"
STATUS = "status"
SURNAME = "surname"
TERMINATION_DATE = "terminationDate"
PICTURE = "picture"
class Enum83(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
PICTURE = "picture"
class Enum84(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ADDRESS = "address"
BIRTH_DATE = "birthDate"
DISPLAY_NAME = "displayName"
EMAIL = "email"
EMPLOYMENT_DATE = "employmentDate"
GIVEN_NAME = "givenName"
JOB_TITLE = "jobTitle"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
MIDDLE_NAME = "middleName"
MOBILE_PHONE = "mobilePhone"
NUMBER = "number"
PERSONAL_EMAIL = "personalEmail"
PHONE_NUMBER = "phoneNumber"
STATISTICS_GROUP_CODE = "statisticsGroupCode"
STATUS = "status"
SURNAME = "surname"
TERMINATION_DATE = "terminationDate"
PICTURE = "picture"
class Enum85(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
PICTURE = "picture"
class Enum86(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
CONTENT = "content"
CONTENT_DESC = "content desc"
CONTENT_TYPE = "contentType"
CONTENT_TYPE_DESC = "contentType desc"
HEIGHT = "height"
HEIGHT_DESC = "height desc"
WIDTH = "width"
WIDTH_DESC = "width desc"
class Enum87(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CONTENT = "content"
CONTENT_TYPE = "contentType"
HEIGHT = "height"
WIDTH = "width"
class Enum88(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CONTENT = "content"
CONTENT_TYPE = "contentType"
HEIGHT = "height"
WIDTH = "width"
class Enum89(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
ACCOUNT_ID = "accountId"
ACCOUNT_ID_DESC = "accountId desc"
ACCOUNT_NUMBER = "accountNumber"
ACCOUNT_NUMBER_DESC = "accountNumber desc"
CREDIT_AMOUNT = "creditAmount"
CREDIT_AMOUNT_DESC = "creditAmount desc"
DEBIT_AMOUNT = "debitAmount"
DEBIT_AMOUNT_DESC = "debitAmount desc"
DESCRIPTION = "description"
DESCRIPTION_DESC = "description desc"
DOCUMENT_NUMBER = "documentNumber"
DOCUMENT_NUMBER_DESC = "documentNumber desc"
DOCUMENT_TYPE = "documentType"
DOCUMENT_TYPE_DESC = "documentType desc"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc"
POSTING_DATE = "postingDate"
POSTING_DATE_DESC = "postingDate desc"
class Enum9(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
AGED_AS_OF_DATE = "agedAsOfDate"
AGED_AS_OF_DATE_DESC = "agedAsOfDate desc"
BALANCE_DUE = "balanceDue"
BALANCE_DUE_DESC = "balanceDue desc"
CURRENCY_CODE = "currencyCode"
CURRENCY_CODE_DESC = "currencyCode desc"
CURRENT_AMOUNT = "currentAmount"
CURRENT_AMOUNT_DESC = "currentAmount desc"
NAME = "name"
NAME_DESC = "name desc"
PERIOD1_AMOUNT = "period1Amount"
PERIOD1_AMOUNT_DESC = "period1Amount desc"
PERIOD2_AMOUNT = "period2Amount"
PERIOD2_AMOUNT_DESC = "period2Amount desc"
PERIOD3_AMOUNT = "period3Amount"
PERIOD3_AMOUNT_DESC = "period3Amount desc"
PERIOD_LENGTH_FILTER = "periodLengthFilter"
PERIOD_LENGTH_FILTER_DESC = "periodLengthFilter desc"
VENDOR_NUMBER = "vendorNumber"
VENDOR_NUMBER_DESC = "vendorNumber desc"
class Enum90(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ACCOUNT_ID = "accountId"
ACCOUNT_NUMBER = "accountNumber"
CREDIT_AMOUNT = "creditAmount"
DEBIT_AMOUNT = "debitAmount"
DESCRIPTION = "description"
DOCUMENT_NUMBER = "documentNumber"
DOCUMENT_TYPE = "documentType"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
POSTING_DATE = "postingDate"
ACCOUNT = "account"
class Enum91(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
ACCOUNT = "account"
class Enum92(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ACCOUNT_ID = "accountId"
ACCOUNT_NUMBER = "accountNumber"
CREDIT_AMOUNT = "creditAmount"
DEBIT_AMOUNT = "debitAmount"
DESCRIPTION = "description"
DOCUMENT_NUMBER = "documentNumber"
DOCUMENT_TYPE = "documentType"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
POSTING_DATE = "postingDate"
| |
# Enter a parse tree produced by PlSqlParser#container_data_clause.
def enterContainer_data_clause(self, ctx:PlSqlParser.Container_data_clauseContext):
pass
# Exit a parse tree produced by PlSqlParser#container_data_clause.
def exitContainer_data_clause(self, ctx:PlSqlParser.Container_data_clauseContext):
pass
# Enter a parse tree produced by PlSqlParser#drop_index.
def enterDrop_index(self, ctx:PlSqlParser.Drop_indexContext):
pass
# Exit a parse tree produced by PlSqlParser#drop_index.
def exitDrop_index(self, ctx:PlSqlParser.Drop_indexContext):
pass
# Enter a parse tree produced by PlSqlParser#grant_statement.
def enterGrant_statement(self, ctx:PlSqlParser.Grant_statementContext):
pass
# Exit a parse tree produced by PlSqlParser#grant_statement.
def exitGrant_statement(self, ctx:PlSqlParser.Grant_statementContext):
pass
# Enter a parse tree produced by PlSqlParser#container_clause.
def enterContainer_clause(self, ctx:PlSqlParser.Container_clauseContext):
pass
# Exit a parse tree produced by PlSqlParser#container_clause.
def exitContainer_clause(self, ctx:PlSqlParser.Container_clauseContext):
pass
# Enter a parse tree produced by PlSqlParser#create_view.
def enterCreate_view(self, ctx:PlSqlParser.Create_viewContext):
pass
# Exit a parse tree produced by PlSqlParser#create_view.
def exitCreate_view(self, ctx:PlSqlParser.Create_viewContext):
pass
# Enter a parse tree produced by PlSqlParser#view_options.
def enterView_options(self, ctx:PlSqlParser.View_optionsContext):
pass
# Exit a parse tree produced by PlSqlParser#view_options.
def exitView_options(self, ctx:PlSqlParser.View_optionsContext):
pass
# Enter a parse tree produced by PlSqlParser#view_alias_constraint.
def enterView_alias_constraint(self, ctx:PlSqlParser.View_alias_constraintContext):
pass
# Exit a parse tree produced by PlSqlParser#view_alias_constraint.
def exitView_alias_constraint(self, ctx:PlSqlParser.View_alias_constraintContext):
pass
# Enter a parse tree produced by PlSqlParser#object_view_clause.
def enterObject_view_clause(self, ctx:PlSqlParser.Object_view_clauseContext):
pass
# Exit a parse tree produced by PlSqlParser#object_view_clause.
def exitObject_view_clause(self, ctx:PlSqlParser.Object_view_clauseContext):
pass
# Enter a parse tree produced by PlSqlParser#inline_constraint.
def enterInline_constraint(self, ctx:PlSqlParser.Inline_constraintContext):
pass
# Exit a parse tree produced by PlSqlParser#inline_constraint.
def exitInline_constraint(self, ctx:PlSqlParser.Inline_constraintContext):
pass
# Enter a parse tree produced by PlSqlParser#inline_ref_constraint.
def enterInline_ref_constraint(self, ctx:PlSqlParser.Inline_ref_constraintContext):
pass
# Exit a parse tree produced by PlSqlParser#inline_ref_constraint.
def exitInline_ref_constraint(self, ctx:PlSqlParser.Inline_ref_constraintContext):
pass
# Enter a parse tree produced by PlSqlParser#out_of_line_ref_constraint.
def enterOut_of_line_ref_constraint(self, ctx:PlSqlParser.Out_of_line_ref_constraintContext):
pass
# Exit a parse tree produced by PlSqlParser#out_of_line_ref_constraint.
def exitOut_of_line_ref_constraint(self, ctx:PlSqlParser.Out_of_line_ref_constraintContext):
pass
# Enter a parse tree produced by PlSqlParser#out_of_line_constraint.
def enterOut_of_line_constraint(self, ctx:PlSqlParser.Out_of_line_constraintContext):
pass
# Exit a parse tree produced by PlSqlParser#out_of_line_constraint.
def exitOut_of_line_constraint(self, ctx:PlSqlParser.Out_of_line_constraintContext):
pass
# Enter a parse tree produced by PlSqlParser#constraint_state.
def enterConstraint_state(self, ctx:PlSqlParser.Constraint_stateContext):
pass
# Exit a parse tree produced by PlSqlParser#constraint_state.
def exitConstraint_state(self, ctx:PlSqlParser.Constraint_stateContext):
pass
# Enter a parse tree produced by PlSqlParser#create_tablespace.
def enterCreate_tablespace(self, ctx:PlSqlParser.Create_tablespaceContext):
pass
# Exit a parse tree produced by PlSqlParser#create_tablespace.
def exitCreate_tablespace(self, ctx:PlSqlParser.Create_tablespaceContext):
pass
# Enter a parse tree produced by PlSqlParser#permanent_tablespace_clause.
def enterPermanent_tablespace_clause(self, ctx:PlSqlParser.Permanent_tablespace_clauseContext):
pass
# Exit a parse tree produced by PlSqlParser#permanent_tablespace_clause.
def exitPermanent_tablespace_clause(self, ctx:PlSqlParser.Permanent_tablespace_clauseContext):
pass
# Enter a parse tree produced by PlSqlParser#tablespace_encryption_spec.
def enterTablespace_encryption_spec(self, ctx:PlSqlParser.Tablespace_encryption_specContext):
pass
# Exit a parse tree produced by PlSqlParser#tablespace_encryption_spec.
def exitTablespace_encryption_spec(self, ctx:PlSqlParser.Tablespace_encryption_specContext):
pass
# Enter a parse tree produced by PlSqlParser#logging_clause.
def enterLogging_clause(self, ctx:PlSqlParser.Logging_clauseContext):
pass
# Exit a parse tree produced by PlSqlParser#logging_clause.
def exitLogging_clause(self, ctx:PlSqlParser.Logging_clauseContext):
pass
# Enter a parse tree produced by PlSqlParser#extent_management_clause.
def enterExtent_management_clause(self, ctx:PlSqlParser.Extent_management_clauseContext):
pass
# Exit a parse tree produced by PlSqlParser#extent_management_clause.
def exitExtent_management_clause(self, ctx:PlSqlParser.Extent_management_clauseContext):
pass
# Enter a parse tree produced by PlSqlParser#segment_management_clause.
def enterSegment_management_clause(self, ctx:PlSqlParser.Segment_management_clauseContext):
pass
# Exit a parse tree produced by PlSqlParser#segment_management_clause.
def exitSegment_management_clause(self, ctx:PlSqlParser.Segment_management_clauseContext):
pass
# Enter a parse tree produced by PlSqlParser#flashback_mode_clause.
def enterFlashback_mode_clause(self, ctx:PlSqlParser.Flashback_mode_clauseContext):
pass
# Exit a parse tree produced by PlSqlParser#flashback_mode_clause.
def exitFlashback_mode_clause(self, ctx:PlSqlParser.Flashback_mode_clauseContext):
pass
# Enter a parse tree produced by PlSqlParser#temporary_tablespace_clause.
def enterTemporary_tablespace_clause(self, ctx:PlSqlParser.Temporary_tablespace_clauseContext):
pass
# Exit a parse tree produced by PlSqlParser#temporary_tablespace_clause.
def exitTemporary_tablespace_clause(self, ctx:PlSqlParser.Temporary_tablespace_clauseContext):
pass
# Enter a parse tree produced by PlSqlParser#tablespace_group_clause.
def enterTablespace_group_clause(self, ctx:PlSqlParser.Tablespace_group_clauseContext):
pass
# Exit a parse tree produced by PlSqlParser#tablespace_group_clause.
def exitTablespace_group_clause(self, ctx:PlSqlParser.Tablespace_group_clauseContext):
pass
# Enter a parse tree produced by PlSqlParser#undo_tablespace_clause.
def enterUndo_tablespace_clause(self, ctx:PlSqlParser.Undo_tablespace_clauseContext):
pass
# Exit a parse tree produced by PlSqlParser#undo_tablespace_clause.
def exitUndo_tablespace_clause(self, ctx:PlSqlParser.Undo_tablespace_clauseContext):
pass
# Enter a parse tree produced by PlSqlParser#tablespace_retention_clause.
def enterTablespace_retention_clause(self, ctx:PlSqlParser.Tablespace_retention_clauseContext):
pass
# Exit a parse tree produced by PlSqlParser#tablespace_retention_clause.
def exitTablespace_retention_clause(self, ctx:PlSqlParser.Tablespace_retention_clauseContext):
pass
# Enter a parse tree produced by PlSqlParser#datafile_specification.
def enterDatafile_specification(self, ctx:PlSqlParser.Datafile_specificationContext):
pass
# Exit a parse tree produced by PlSqlParser#datafile_specification.
def exitDatafile_specification(self, ctx:PlSqlParser.Datafile_specificationContext):
pass
# Enter a parse tree produced by PlSqlParser#tempfile_specification.
def enterTempfile_specification(self, ctx:PlSqlParser.Tempfile_specificationContext):
pass
# Exit a parse tree produced by PlSqlParser#tempfile_specification.
def exitTempfile_specification(self, ctx:PlSqlParser.Tempfile_specificationContext):
pass
# Enter a parse tree produced by PlSqlParser#datafile_tempfile_spec.
def enterDatafile_tempfile_spec(self, ctx:PlSqlParser.Datafile_tempfile_specContext):
pass
# Exit a parse tree produced by PlSqlParser#datafile_tempfile_spec.
def exitDatafile_tempfile_spec(self, ctx:PlSqlParser.Datafile_tempfile_specContext):
pass
# Enter a parse tree produced by PlSqlParser#redo_log_file_spec.
def enterRedo_log_file_spec(self, ctx:PlSqlParser.Redo_log_file_specContext):
pass
# Exit a parse tree produced by PlSqlParser#redo_log_file_spec.
def exitRedo_log_file_spec(self, ctx:PlSqlParser.Redo_log_file_specContext):
pass
# Enter a parse tree produced by PlSqlParser#autoextend_clause.
def enterAutoextend_clause(self, ctx:PlSqlParser.Autoextend_clauseContext):
pass
# Exit a parse tree produced by PlSqlParser#autoextend_clause.
def exitAutoextend_clause(self, ctx:PlSqlParser.Autoextend_clauseContext):
pass
# Enter a parse tree produced by PlSqlParser#maxsize_clause.
def enterMaxsize_clause(self, ctx:PlSqlParser.Maxsize_clauseContext):
pass
# Exit a parse tree produced by PlSqlParser#maxsize_clause.
def exitMaxsize_clause(self, ctx:PlSqlParser.Maxsize_clauseContext):
pass
# Enter a parse tree produced by PlSqlParser#build_clause.
def enterBuild_clause(self, ctx:PlSqlParser.Build_clauseContext):
pass
# Exit a parse tree produced by PlSqlParser#build_clause.
def exitBuild_clause(self, ctx:PlSqlParser.Build_clauseContext):
pass
# Enter a parse tree produced by PlSqlParser#parallel_clause.
def enterParallel_clause(self, ctx:PlSqlParser.Parallel_clauseContext):
pass
# Exit a parse tree produced by PlSqlParser#parallel_clause.
def exitParallel_clause(self, ctx:PlSqlParser.Parallel_clauseContext):
pass
# Enter a parse tree produced by PlSqlParser#create_materialized_view_log.
def enterCreate_materialized_view_log(self, ctx:PlSqlParser.Create_materialized_view_logContext):
pass
# Exit a parse tree produced by PlSqlParser#create_materialized_view_log.
def exitCreate_materialized_view_log(self, ctx:PlSqlParser.Create_materialized_view_logContext):
pass
# Enter a parse tree produced by PlSqlParser#new_values_clause.
def enterNew_values_clause(self, ctx:PlSqlParser.New_values_clauseContext):
pass
# Exit a parse tree produced by PlSqlParser#new_values_clause.
def exitNew_values_clause(self, ctx:PlSqlParser.New_values_clauseContext):
pass
# Enter a parse tree produced by PlSqlParser#mv_log_purge_clause.
def enterMv_log_purge_clause(self, ctx:PlSqlParser.Mv_log_purge_clauseContext):
pass
# Exit a parse tree produced by PlSqlParser#mv_log_purge_clause.
def exitMv_log_purge_clause(self, ctx:PlSqlParser.Mv_log_purge_clauseContext):
pass
# Enter a parse tree produced by PlSqlParser#create_materialized_view.
def enterCreate_materialized_view(self, ctx:PlSqlParser.Create_materialized_viewContext):
pass
# Exit a parse tree produced by PlSqlParser#create_materialized_view.
def exitCreate_materialized_view(self, ctx:PlSqlParser.Create_materialized_viewContext):
pass
# Enter a parse tree produced by PlSqlParser#create_mv_refresh.
def enterCreate_mv_refresh(self, ctx:PlSqlParser.Create_mv_refreshContext):
pass
# Exit a parse tree produced by PlSqlParser#create_mv_refresh.
def exitCreate_mv_refresh(self, ctx:PlSqlParser.Create_mv_refreshContext):
pass
# Enter a parse tree produced by PlSqlParser#create_table.
def enterCreate_table(self, ctx:PlSqlParser.Create_tableContext):
pass
# Exit a parse tree produced by PlSqlParser#create_table.
def exitCreate_table(self, ctx:PlSqlParser.Create_tableContext):
pass
# Enter a parse tree produced by PlSqlParser#xmltype_table.
def enterXmltype_table(self, ctx:PlSqlParser.Xmltype_tableContext):
pass
# Exit a parse tree produced by PlSqlParser#xmltype_table.
def exitXmltype_table(self, ctx:PlSqlParser.Xmltype_tableContext):
pass
# Enter a parse tree produced by PlSqlParser#xmltype_virtual_columns.
def enterXmltype_virtual_columns(self, ctx:PlSqlParser.Xmltype_virtual_columnsContext):
pass
# Exit a parse tree produced by PlSqlParser#xmltype_virtual_columns.
def exitXmltype_virtual_columns(self, ctx:PlSqlParser.Xmltype_virtual_columnsContext):
pass
# Enter a parse tree produced by PlSqlParser#xmltype_column_properties.
def enterXmltype_column_properties(self, ctx:PlSqlParser.Xmltype_column_propertiesContext):
pass
# Exit a parse tree produced by PlSqlParser#xmltype_column_properties.
def exitXmltype_column_properties(self, ctx:PlSqlParser.Xmltype_column_propertiesContext):
pass
# Enter a parse tree produced by PlSqlParser#xmltype_storage.
def enterXmltype_storage(self, ctx:PlSqlParser.Xmltype_storageContext):
pass
# Exit a parse tree produced by PlSqlParser#xmltype_storage.
def exitXmltype_storage(self, ctx:PlSqlParser.Xmltype_storageContext):
pass
# Enter a parse tree produced by PlSqlParser#xmlschema_spec.
def enterXmlschema_spec(self, ctx:PlSqlParser.Xmlschema_specContext):
pass
# Exit a parse tree produced by PlSqlParser#xmlschema_spec.
def exitXmlschema_spec(self, ctx:PlSqlParser.Xmlschema_specContext):
pass
# Enter a parse tree produced by PlSqlParser#object_table.
def enterObject_table(self, ctx:PlSqlParser.Object_tableContext):
pass
# Exit a parse tree produced by PlSqlParser#object_table.
def exitObject_table(self, ctx:PlSqlParser.Object_tableContext):
pass
# Enter a parse tree produced by PlSqlParser#oid_index_clause.
def enterOid_index_clause(self, ctx:PlSqlParser.Oid_index_clauseContext):
pass
# Exit a parse tree produced by PlSqlParser#oid_index_clause.
def exitOid_index_clause(self, ctx:PlSqlParser.Oid_index_clauseContext):
pass
# Enter a parse tree produced by PlSqlParser#oid_clause.
def enterOid_clause(self, ctx:PlSqlParser.Oid_clauseContext):
pass
# Exit a parse tree produced by PlSqlParser#oid_clause.
def exitOid_clause(self, ctx:PlSqlParser.Oid_clauseContext):
pass
# Enter a parse tree produced by PlSqlParser#object_properties.
def enterObject_properties(self, ctx:PlSqlParser.Object_propertiesContext):
pass
# Exit a parse tree produced by PlSqlParser#object_properties.
def exitObject_properties(self, ctx:PlSqlParser.Object_propertiesContext):
pass
# Enter a parse tree produced by PlSqlParser#object_table_substitution.
def enterObject_table_substitution(self, ctx:PlSqlParser.Object_table_substitutionContext):
pass
# Exit a parse tree produced by PlSqlParser#object_table_substitution.
def exitObject_table_substitution(self, ctx:PlSqlParser.Object_table_substitutionContext):
pass
# Enter a parse tree produced by PlSqlParser#relational_table.
def enterRelational_table(self, ctx:PlSqlParser.Relational_tableContext):
pass
# Exit a parse tree produced by PlSqlParser#relational_table.
def exitRelational_table(self, ctx:PlSqlParser.Relational_tableContext):
pass
# Enter a parse tree produced by PlSqlParser#relational_properties.
def enterRelational_properties(self, ctx:PlSqlParser.Relational_propertiesContext):
pass
# Exit a parse tree produced by PlSqlParser#relational_properties.
def exitRelational_properties(self, ctx:PlSqlParser.Relational_propertiesContext):
pass
# Enter a parse tree produced by PlSqlParser#table_partitioning_clauses.
def enterTable_partitioning_clauses(self, ctx:PlSqlParser.Table_partitioning_clausesContext):
pass
# Exit a parse tree produced by PlSqlParser#table_partitioning_clauses.
def exitTable_partitioning_clauses(self, ctx:PlSqlParser.Table_partitioning_clausesContext):
pass
# Enter a parse tree produced by PlSqlParser#table_range_partition_by_clause.
def enterTable_range_partition_by_clause(self, ctx:PlSqlParser.Table_range_partition_by_clauseContext):
pass
# Exit a parse tree produced by PlSqlParser#table_range_partition_by_clause.
def exitTable_range_partition_by_clause(self, ctx:PlSqlParser.Table_range_partition_by_clauseContext):
pass
# Enter a parse tree produced by PlSqlParser#datatype_null_enable.
def enterDatatype_null_enable(self, ctx:PlSqlParser.Datatype_null_enableContext):
pass
# Exit a parse tree produced by PlSqlParser#datatype_null_enable.
def exitDatatype_null_enable(self, ctx:PlSqlParser.Datatype_null_enableContext):
pass
# Enter a parse tree produced by PlSqlParser#size_clause.
| |
{
'command': 'no ipv6 address {ipv6}',
'doc': 'Unset IPv6 address',
'arguments': [
{
'name': 'ipv6',
'doc': 'X:X::X:X/M Interface IPv6 address',
},
],
},
{
'command': 'ipv6 address {ipv6} secondary',
'doc': 'Set secondary IPv6 address',
'arguments': [
{
'name': 'ipv6',
'doc': 'X:X::X:X/M Interface IPv6 address',
},
],
},
{
'command': 'no ipv6 address {ipv6} secondary',
'doc': 'Unset IPv6 address',
'arguments': [
{
'name': 'ipv6',
'doc': 'X:X::X:X/M Interface IPv6 address',
},
],
},
{
'command': 'shutdown',
'doc': 'Enable an interface.',
'arguments': [],
},
{
'command': 'no shutdown',
'doc': 'Disable an interface.',
'arguments': [],
},
]
}),
('config_interface_loopback', {
'doc': 'Loopback interface configuration.',
'arguments': [
{
'name': 'loopback_id',
'doc': 'Loopback id within range <1-2147483647> '
}
],
'pre_commands':
['config terminal', 'interface loopback {loopback_id}'],
'post_commands': ['end'],
'commands': [
{
'command': 'ip address {ipv4}',
'doc': 'Set IPv4 address for loopback',
'arguments': [
{
'name': 'ipv4',
'doc': 'A.B.C.D/M Loopback IP address.',
},
],
},
{
'command': 'no ip address {ipv4}',
'doc': 'Unset IPv4 address for loopback',
'arguments': [
{
'name': 'ipv4',
'doc': 'A.B.C.D/M Loopback IP address.',
},
],
},
{
'command': 'vrf attach {vrf_name}',
'doc': 'Mapping port to vrf',
'arguments': [
{
'name': 'vrf_name',
'doc': 'Mapping the port to vrf.',
},
],
},
{
'command': 'no vrf attach {vrf_name}',
'doc': 'Unmapping port from vrf',
'arguments': [
{
'name': 'vrf_name',
'doc': 'Unmapping the port from vrf.',
},
],
},
{
'command': 'ipv6 address {ipv6}',
'doc': 'Set IPv6 address on Loopback',
'arguments': [
{
'name': 'ipv6',
'doc': 'X:X::X:X/M Loopback IPv6 address',
},
],
},
{
'command': 'no ipv6 address {ipv6}',
'doc': 'Unset IPv6 address on loopback interface',
'arguments': [
{
'name': 'ipv6',
'doc': 'X:X::X:X/M Loopback IPv6 address',
},
],
},
]
}),
('config_interface_lag', {
'doc': 'Configure link-aggregation parameters.',
'arguments': [
{
'name': 'lag',
'doc': 'LAG number ranges from 1 to 2000.'
}
],
'pre_commands': ['config terminal', 'interface lag {lag}'],
'post_commands': ['end'],
'commands': [
{
'command': 'ip address {ipv4}',
'doc': 'Set IP address',
'arguments': [
{
'name': 'ipv4',
'doc': 'A.B.C.D/M Interface IP address.',
},
],
},
{
'command': 'no ip address {ipv4}',
'doc': 'Unset IP address',
'arguments': [
{
'name': 'ipv4',
'doc': 'A.B.C.D/M Interface IP address.',
},
],
},
{
'command': 'ip address {ipv4} secondary',
'doc': 'Set secondary IP address',
'arguments': [
{
'name': 'ipv4',
'doc': 'A.B.C.D/M Interface IP address.',
},
],
},
{
'command': 'no ip address {ipv4} secondary',
'doc': 'Unset secondary IP address',
'arguments': [
{
'name': 'ipv4',
'doc': 'A.B.C.D/M Interface IP address.',
},
],
},
{
'command': 'vrf attach {vrf_name}',
'doc': 'Mapping port to vrf',
'arguments': [
{
'name': 'vrf_name',
'doc': 'Mapping the port to vrf.',
},
],
},
{
'command': 'no vrf attach {vrf_name}',
'doc': 'Unmapping port from vrf',
'arguments': [
{
'name': 'vrf_name',
'doc': 'Unmapping the port from vrf.',
},
],
},
{
'command': 'ipv6 address {ipv6}',
'doc': 'Set IPv6 address',
'arguments': [
{
'name': 'ipv6',
'doc': 'X:X::X:X/M Interface IPv6 address',
},
],
},
{
'command': 'no ipv6 address {ipv6}',
'doc': 'Unset IPv6 address',
'arguments': [
{
'name': 'ipv6',
'doc': 'X:X::X:X/M Interface IPv6 address',
},
],
},
{
'command': 'ipv6 address {ipv6} secondary',
'doc': 'Set secondary IPv6 address',
'arguments': [
{
'name': 'ipv6',
'doc': 'X:X::X:X/M Interface IPv6 address',
},
],
},
{
'command': 'no ipv6 address {ipv6} secondary',
'doc': 'Unset IPv6 address',
'arguments': [
{
'name': 'ipv6',
'doc': 'X:X::X:X/M Interface IPv6 address',
},
],
},
{
'command': 'shutdown',
'doc': 'Enable an interface.',
'arguments': [],
},
{
'command': 'no shutdown',
'doc': 'Disable an interface.',
'arguments': [],
},
{
'command': 'routing',
'doc': 'Configure interface as L3.',
'arguments': [],
},
{
'command': 'no routing',
'doc': 'Unconfigure interface as L3.',
'arguments': [],
},
{
'command': 'vlan access {vlan_id}',
'doc': 'Access configuration',
'arguments': [
{
'name': 'vlan_id',
'doc': '<1-4094> VLAN identifier'
}
],
},
{
'command': 'no vlan access {vlan_id}',
'doc': 'Remove vlan access',
'arguments': [
{
'name': 'vlan_id',
'doc': '<1-4094> VLAN identifier'
}
],
},
{
'command': 'vlan trunk allowed {vlan_id}',
'doc': 'Allow VLAN on the trunk port',
'arguments': [
{
'name': 'vlan_id',
'doc': '<1-4094> VLAN identifier'
}
],
},
{
'command': 'no vlan trunk allowed {vlan_id}',
'doc': 'Disallow VLAN on the trunk port',
'arguments': [
{
'name': 'vlan_id',
'doc': '<1-4094> VLAN identifier'
}
],
},
{
'command': 'vlan trunk native tag',
'doc': 'Tag configuration on the trunk port',
'arguments': [],
},
{
'command': 'no vlan trunk native tag',
'doc': 'Remove tag configuration on the trunk port',
'arguments': [],
},
{
'command': 'vlan trunk native {vlan_id}',
'doc': 'Native VLAN on the trunk port',
'arguments': [
{
'name': 'vlan_id',
'doc': '<1-4094> VLAN identifier'
}
],
},
{
'command': 'no vlan trunk native {vlan_id}',
'doc': 'Remove native VLAN on the trunk port',
'arguments': [
{
'name': 'vlan_id',
'doc': '<1-4094> VLAN identifier'
}
],
},
{
'command': 'lacp mode passive',
'doc': 'Sets an interface as LACP passive.',
'arguments': [],
},
{
'command': 'no lacp mode passive',
'doc': 'Sets an LACP passive interface off.',
'arguments': [],
},
{
'command': 'lacp mode active',
'doc': 'Sets an interface as LACP active.',
'arguments': [],
},
{
'command': 'no lacp mode active',
'doc': 'Sets an LACP active interface off.',
'arguments': [],
},
{
'command': 'lacp fallback',
'doc': 'Enable LACP fallback mode.',
'arguments': [],
},
{
'command': 'no lacp fallback',
'doc': 'Disable LACP fallback mode.',
'arguments': [],
},
{
'command': 'lacp fallback mode priority',
'doc': 'Set fallback mode to priority.',
'arguments': [],
},
{
'command': 'lacp fallback mode all_active',
'doc': 'Set fallback mode to all_active.',
'arguments': [],
},
{
'command': 'no lacp fallback mode all_active',
'doc': 'Set fallback mode to priority.',
'arguments': [],
},
{
'command': 'lacp fallback timeout {timeout}',
'doc': 'Set LACP fallback timeout.',
'arguments': [
{
'name': 'timeout',
'doc': '<1-900> LACP fallback timeout'
}
],
},
{
'command': 'no lacp fallback timeout {timeout}',
'doc': 'Set LACP fallback timeout to zero.',
'arguments': [
{
'name': 'timeout',
'doc': '<1-900> LACP fallback timeout'
}
],
},
{
'command': 'hash l2-src-dst',
'doc': 'Base the hash on l2-src-dst.',
'arguments': [],
},
{
'command': 'hash l3-src-dst',
'doc': 'Base the hash on l3-src-dst.',
'arguments': [],
},
{
'command': 'hash l4-src-dst',
'doc': 'Base the hash on l4-src-dst.',
'arguments': [],
},
{
'command': 'lacp rate fast',
'doc': 'Set LACP heartbeats are requested at the rate '
'of one per second.',
'arguments': [],
},
{
'command': 'no lacp rate fast',
'doc': 'Set LACP heartbeats slow which is once every '
' 30 seconds.',
'arguments': [],
},
{
'command': 'apply qos schedule-profile \
{schedule_profile_name}',
'doc': 'Apply qos profiles on an interface.',
'arguments': [
{
'name': 'schedule_profile_name',
'doc': 'The schedule profile to apply.'
}
],
},
{
'command': 'no apply qos schedule-profile',
'doc': 'Clears qos profiles from an interface.',
'arguments': [
{
'name': 'schedule_profile_name',
'doc': 'The schedule profile to clear.',
'optional': True
}
],
},
{
'command': 'qos dscp {dscp_map_index}',
'doc': 'Set the dscp override for the port.',
'arguments': [
{
'name': 'dscp_map_index',
'doc': 'The index into the dscp map.'
}
],
},
{
'command': 'no qos dscp',
'doc': 'Remove the dscp override for the port.',
'arguments': [],
},
{
'command': 'qos trust {value}',
'doc': 'Set the qos trust mode for the port.',
'arguments': [
{
'name': 'value',
'doc': 'The qos trust mode to set.'
}
],
},
{
'command': 'no qos trust',
'doc': 'Remove the qos trust mode for the port.',
'arguments': [],
},
{
'command': 'apply access-list {type} {access_list} '
'{direction}',
'doc': 'Apply ACL on interface',
'arguments': [
{
'name': 'type',
'doc': 'Access-list type (e.g., ip or ipv6).'
},
{
'name': 'access_list',
'doc': 'Access-list name.'
},
{
'name': 'direction',
'doc': 'Apply to this traffic direction (in | out).'
}
],
},
{
'command': 'no apply access-list {type} {access_list} '
'{direction}',
'doc': 'Remove ACL from interface',
'arguments': [
{
'name': 'type',
'doc': 'Access-list type (e.g., ip or ipv6).'
},
{
'name': 'access_list',
'doc': 'Access-list name.'
},
{
'name': 'direction',
'doc': 'Apply to this traffic direction (in | out).'
}
],
},
{
'command': 'apply access-list ip {acl_name} | |
import numpy as np
import matplotlib
matplotlib.use('Agg')
import networkx as nx
import csv
from numpy import ma
import dynesty
import corner
import copy
import matplotlib.pyplot as plt
import random as rnd
from multiprocessing import Pool, cpu_count
import INoDS_convenience_functions as nf
import warnings
import scipy.stats as ss
import time
import itertools
import pandas as pd
from dynesty import plotting as dyplot
from dynesty.utils import resample_equal
from dynesty import utils as dyfunc
from dynesty.dynamicsampler import stopping_function, weight_function, _kld_error
np.seterr(invalid='ignore')
np.seterr(divide='ignore')
warnings.simplefilter("ignore")
warnings.warn("deprecated", DeprecationWarning)
#########################################################################
def diagnosis_adjustment(G, network, p, nodelist,contact_daylist, recovery_prob, max_recovery_time, node_health_new, health_data_new, seed_date, network_min_date):
###ensure that the proposal do not include 0 and are <1
diag_list = [min(max(num,0.000001),1) for num in p['diag_lag'][0]]
##compute lagged time for each infection time
lag_dict = [(node, time1, time2, int(ss.randint.ppf(diag_lag, 0, len(contact_daylist[network][(node, time1, time2)])))) for (node, time1, time2), diag_lag in zip(sorted(contact_daylist[network]), diag_list)]
## pick out corresponding date from contact_daylist
new_infection_time= [(node, time1, time2, contact_daylist[network][(node, time1, time2)][lag_pos]) for (node, time1, time2, lag_pos) in lag_dict]
##order = node, old infection time, old recovery time, new infection time and new recovery time
new_infect_recovery_time = [(node, time1, time2, new_time1, time2) for (node, time1, time2, new_time1) in new_infection_time]
#########################################################
# imputing recovery date##
##########################################################
if recovery_prob:
###ensure that the proposal recovery times do not include 0 and are <1
recovery_list = [min(max(num,0.000001),1) for num in p['gamma'][0]]
## pick out corresponding recovery date (+1 to include period after time2 and time including max_recovery_time)
new_infect_recovery_time = [(node, time1, time2, new_time1, int(ss.randint.ppf(recovery_param, time2, max_recovery_time[(node, time1, time2)]+1))) for (node, time1, time2, new_time1, new_time2), recovery_param in zip(sorted(new_infect_recovery_time), recovery_list)]
##########################################################
for (node, time1, time2, new_time1, new_time2) in new_infect_recovery_time:
node_health_new[node][1].remove((time1, time2))
node_health_new[node][1].append((new_time1, new_time2))
health_data_new[node] = {day: 1 for day in range(new_time1, new_time2+1)}
infected_strength={}
infected_strength[network] = {node:{time: calculate_infected_strength(node, time, health_data_new, G) for time in G.keys()} for node in nodelist}
healthy_nodelist = return_healthy_nodelist(node_health_new ,seed_date, network_min_date)
#create infection date list
infection_date = [(node, new_time1) for (node, time1, time2, new_time1, new_time2) in new_infect_recovery_time if new_time1!= seed_date and new_time1 > network_min_date]
infection_date = sorted(infection_date)
return infected_strength, healthy_nodelist, infection_date
#######################################################################
def log_likelihood(parameters, data, infection_date, infected_strength, healthy_nodelist, null_comparison, diagnosis_lag, recovery_prob, nsick_param, contact_daylist, max_recovery_time, network_min_date, parameter_estimate):
r"""Computes the log-likelihood of network given infection data """
if null_comparison:
G_raw, health_data, node_health, nodelist, truth, time_min, time_max, seed_date,parameter_estimate = data
health_data_new = copy.deepcopy(health_data)
node_health_new = copy.deepcopy(node_health)
p = to_params(parameters, null_comparison, diagnosis_lag, nsick_param, recovery_prob, parameter_estimate)
network =round(p['model'][0],2)
G = G_raw[network]
else:
G_raw, health_data, node_health, nodelist, truth, time_min, time_max, seed_date = data
health_data_new = copy.deepcopy(health_data)
node_health_new = copy.deepcopy(node_health)
p = to_params(parameters, null_comparison, diagnosis_lag, nsick_param, recovery_prob, parameter_estimate)
network = 0
G= G_raw[network]
###############################################################################################
##diagnosis lag==
##impute true infection date and recovery date (if SIR/SIS...)
## infection_date = date picked as a day between last healthy report and first sick report
## and when the degree of node was >0 the previous day
##recovery_date = date picked as day with uniform probability between first reported sick day and first
##healthy date after sick report
##################################################################################################
if diagnosis_lag:
infected_strength, healthy_nodelist, infection_date = diagnosis_adjustment(G, network, p, nodelist, contact_daylist, recovery_prob, max_recovery_time, node_health_new, health_data_new, seed_date, network_min_date)
################################################################
##Calculate rate of learning for all sick nodes at all sick #
## dates, but not when sick day is the seed date (i.e., the #
## first report of the infection in the network #
################################################################
overall_learn_raw = np.array([calculate_lambda1(p['beta'][0], p['epsilon'][0], infected_strength[network], focal_node, sick_day) for (focal_node, sick_day) in infection_date])
overall_learn = np.log(np.maximum(overall_learn_raw, 0.000001))
################################################################
##Calculate rate of NOT learning for all the days the node was #
## (either reported or inferred) healthy #
################################################################
overall_not_learn_raw = not_learned_rate(healthy_nodelist, p['beta'][0],p['epsilon'][0], infected_strength[network], seed_date, network_min_date)
overall_not_learn_raw = np.maximum(overall_not_learn_raw, 0.000001)
overall_not_learn = np.log(overall_not_learn_raw)
###########################################################
## Calculate overall log likelihood #
###########################################################
loglike = overall_learn.sum() + overall_not_learn.sum()
#print (p['beta'][0], p['epsilon'][0], network, loglike),
if np.isinf(loglike) or np.isnan(loglike) or (loglike==0):return -np.inf
else: return loglike
#############################################################################
def not_learned_rate(healthy_nodelist, beta, epsilon, infected_strength_network, seed_date, network_min_date):
r""" Calculate 1- lambda for all uninfected days and returns
sum of log(1-lambdas)"""
return np.array([1-calculate_lambda1(beta, epsilon, infected_strength_network, focal_node, date) for (focal_node, date) in healthy_nodelist])
##############################################################################
def return_healthy_nodelist(node_health1, seed_date, network_min_date):
r""" healthy_nodelist is a list. Format = [(node1, day1, day2),...]
where node1 is a node reported health and day1-day2 are the days
when the node is uninfected"""
healthy_nodelist = [(node, date1) for node in node_health1 if 0 in node_health1[node] for date1 in [date for (hd1, hd2) in node_health1[node][0] for date in range(hd1, hd2+1)] if date1!=seed_date and date1>network_min_date]
return healthy_nodelist
###############################################################################
def calculate_lambda1(beta1, epsilon1, infected_strength_network, focal_node, date):
r""" This function calculates the infection potential of the
focal_node based on (a) its infected_strength at the previous time step (date-1),
and (b) tranmission potential unexplained by the individual's network connections."""
try:
return 1-(np.exp(-(beta1*infected_strength_network[focal_node][date-1] + epsilon1)))
except KeyError:
print ("Could not calculate lambda for node and date", focal_node, date-1)
################################################################################
def calculate_infected_strength(node, time1, health_data_new, G):
r""" This function calculates the infected strength of focal node = node
as the sum of the weighted edge connections of the node at time=time1. Only
those nodes are considered that are reported as sick (= 1) at time1."""
## infected strength is sum of all edge weights of focal nodes connecting to infected nodes
## NOTE: health_data_new[node_i].get(time1) checks if time1 is present in health_data[node_i] AND if the value is 1
if time1 in G and node in G[time1].nodes():
strength = [G[time1][node][node_i]["weight"] for node_i in G[time1].neighbors(node) if (node_i in health_data_new and health_data_new[node_i].get(time1))]
else: strength=[]
return sum(strength)
################################################################################
def to_params(arr, null_comparison, diagnosis_lag, nsick_param, recovery_prob, parameter_estimate):
r""" Converts a numpy array into a array with named fields"""
# Note gamma is estimated only when there is a diagnosis lag
if diagnosis_lag and recovery_prob:
if null_comparison:
arr2 = np.array(parameter_estimate+ list(arr))
return arr2.view(np.dtype([('beta', np.float),
('epsilon', np.float),
('gamma', np.float, nsick_param),
('diag_lag', np.float, nsick_param),
('model', np.float)]))
return arr.view(np.dtype([('beta', np.float),
('epsilon', np.float),
('gamma', np.float, nsick_param),
('diag_lag', np.float, nsick_param)]))
elif diagnosis_lag:
if null_comparison:
arr2 = np.array(parameter_estimate+ list(arr))
return arr2.view(np.dtype([('beta', np.float),
('epsilon', np.float),
('diag_lag', np.float, nsick_param),
('model', np.float)]))
return arr.view(np.dtype([('beta', np.float),
('epsilon', np.float),
('diag_lag', np.float, nsick_param)]))
if null_comparison:
arr2 = np.array(parameter_estimate+ list(arr))
return arr2.view(np.dtype([('beta', np.float),
('epsilon', np.float),
('model', np.float)]))
return arr.view(np.dtype([('beta', np.float),
('epsilon', np.float)]))
#############################################################################
def prior_transform(parameters):
"""Transforms our unit cube samples `u` to a flat prior between in each variable."""
#min and max for beta and epsilon
##although beta and epsilon does not have an upper bound, specify an large upper bound to prevent runaway samplers
aprime = np.array(parameters[0:2])
amin = 0
amax = 10
##min max for other param estimates
bprime = np.array(parameters[2:])
bmin = 0
bmax = 1
a = aprime*(amax-amin) + amin # convert back to a
b = bprime*(bmax-bmin) + bmin # convert back to a
return tuple(list(a)+list(b))
#############################################################################
def prior_transform_null(parameter):
"""Transforms our unit cube samples `u` to a flat prior between in each variable."""
#min and max for beta and epsilon
##although beta and epsilon does not have an upper bound, specify an large upper bound to prevent runaway samplers
aprime = parameter
amin = 0.1
amax = 1
a = aprime*(amax-amin) + amin # convert back to a
return tuple(a)
#############################################################################
def prior_transform_alternate(parameter):
"""Transforms our unit cube samples `u` to a flat prior between in each variable."""
#min and max for beta and epsilon
##although beta and epsilon does not have an upper bound, specify an large upper bound to prevent runaway samplers
aprime = parameter
amin = 0.0
amax = 0.01
a = aprime*(amax-amin) + amin # convert back to a
#print (parameter, a)
return tuple(a)
#######################################################################
def start_sampler(data, recovery_prob, verbose, contact_daylist, max_recovery_time, nsick_param, output_filename, diagnosis_lag=False, null_comparison=False, **kwargs3):
r"""Sampling performed using emcee """
parameter_estimate=None
##############################################################################
G_raw, health_data, node_health, nodelist, true_value, time_min, time_max, seed_date =data
######################################
### Set number of parameters to estimate
######################################
ndim_base = 2
if recovery_prob: ndim_base += nsick_param
ndim = ndim_base+nsick_param
################################################################################
##calculating infection date and infection strength outside loglik to speed up #
##computations
################################################################################
network_min_date = min(G_raw.keys())
if not diagnosis_lag:
######################################################################
infection_date = [(node, time1) for node in node_health if 1 in node_health[node] for (time1,time2) in node_health[node][1]]
## remove days in infection_date if the day is either the seed_date or before network_min_date
infection_date = [(node, time1) for (node, time1) in infection_date if time1!=seed_date and time1 > network_min_date]
infection_date = sorted(infection_date)
######################################################################
##for parameter estimate step we need data for the empirical network only
infected_strength = {0:{node:{time: calculate_infected_strength(node, time, health_data, G_raw[0]) for time in range(time_min, time_max+1)} for node in nodelist}}
else:
infection_date = None
infected_strength=None
healthy_nodelist = return_healthy_nodelist(node_health, seed_date, network_min_date)
################################################################################
pool = Pool()
if ndim<3:
sampler = dynesty.DynamicNestedSampler(log_likelihood, prior_transform, ndim=ndim, pool=pool, queue_size=cpu_count()-1, use_pool={'propose_point': False}, logl_args =[data, infection_date, infected_strength, healthy_nodelist, null_comparison, diagnosis_lag, recovery_prob, nsick_param, contact_daylist, max_recovery_time, network_min_date, parameter_estimate] )
sampler.run_nested(print_progress = verbose)
else:
thresh = 0.01
maxc = 10000
sampler = dynesty.DynamicNestedSampler(log_likelihood, prior_transform, ndim=ndim, pool=pool, queue_size=cpu_count()-1, use_pool={'update_bound': False}, dlogz=thresh,logl_args =[data, infection_date, infected_strength, healthy_nodelist, null_comparison, diagnosis_lag, recovery_prob, nsick_param, contact_daylist, max_recovery_time, network_min_date, parameter_estimate])
ncall = sampler.ncall
niter = sampler.it - 1
for results in sampler.sample_initial(maxcall=maxc):
ncall += results[9]
niter += 1
delta_logz = results[-1]
#print('dlogz ' + str(delta_logz), 'thresh ' + str(thresh), 'nc ' + str(ncall), 'niter ' + str(niter), "log", results[3])
pass
stop, stop_vals = stopping_function(sampler.results, args = { 'post_thresh': 0.05}, return_vals=True)
while True:
stop, stop_vals = stopping_function(sampler.results, return_vals=True) # evaluate stop
if not stop:
logl_bounds = weight_function(sampler.results) # derive | |
handler.sync_with_handlers(logging.root.handlers)
handler.close()
break
# Redefine the handler to None so it can be garbage collected
setup_temp_handler.__handler__ = None
# Override the python's logging logger class as soon as this module is imported
if logging.getLoggerClass() is not SaltLoggingClass:
# Import pip._internal which itself will install it's own custom logging handler
# we want to override that handler with ours
try:
import pip._internal.utils._log as pip_log_module # pylint: disable=no-name-in-module,import-error
except ImportError:
pip_log_module = None
logging.setLoggerClass(SaltLoggingClass)
logging.addLevelName(QUIET, "QUIET")
logging.addLevelName(PROFILE, "PROFILE")
logging.addLevelName(TRACE, "TRACE")
logging.addLevelName(GARBAGE, "GARBAGE")
if pip_log_module is not None:
# Let's make newer versions of pip work by patching SaltLoggingClass to
# add a verbose method which is what pip expects
SaltLoggingClass.verbose = SaltLoggingClass.debug
if not logging.root.handlers:
# No configuration to the logging system has been done so far.
# Set the root logger at the lowest level possible
logging.root.setLevel(GARBAGE)
# Add a permanent null handler so that we never get messages like:
# No handlers could be found for logger 'foo'
setup_temp_handler()
logging.root.addHandler(get_temp_handler())
# Now that we defined the default logging logger class, we can instantiate our logger
# DO NOT MOVE THIS
log = logging.getLogger(__name__)
def get_console_handler():
"""
Get the console stream handler
"""
try:
return setup_console_handler.__handler__
except AttributeError:
return
def is_console_handler_configured():
"""
Is the console stream handler configured
"""
return get_console_handler() is not None
def shutdown_console_handler():
"""
Shutdown the console stream handler
"""
console_handler = get_console_handler()
if console_handler is not None:
logging.root.removeHandler(console_handler)
console_handler.close()
setup_console_handler.__handler__ = None
atexit.unregister(shutdown_console_handler)
def setup_console_handler(log_level=None, log_format=None, date_format=None):
"""
Setup the console stream handler
"""
if is_console_handler_configured():
log.warning("Console logging already configured")
return
atexit.register(shutdown_console_handler)
log.trace(
"Setting up console logging: %s",
dict(log_level=log_level, log_format=log_format, date_format=date_format),
)
if log_level is None:
log_level = logging.WARNING
log_level = get_logging_level_from_string(log_level)
set_log_record_factory(SaltColorLogRecord)
handler = None
for handler in logging.root.handlers:
if handler is get_temp_handler():
continue
if not hasattr(handler, "stream"):
# Not a stream handler, continue
continue
if handler.stream is sys.stderr:
# There's already a logging handler outputting to sys.stderr
break
else:
handler = StreamHandler(sys.stderr)
handler.setLevel(log_level)
# Set the default console formatter config
if not log_format:
log_format = DFLT_LOG_FMT_CONSOLE
if not date_format:
date_format = DFLT_LOG_DATEFMT
formatter = logging.Formatter(log_format, datefmt=date_format)
handler.setFormatter(formatter)
logging.root.addHandler(handler)
setup_console_handler.__handler__ = handler
def get_logfile_handler():
"""
Get the log file handler
"""
try:
return setup_logfile_handler.__handler__
except AttributeError:
return
def is_logfile_handler_configured():
"""
Is the log file handler configured
"""
return get_logfile_handler() is not None
def shutdown_logfile_handler():
"""
Shutdown the log file handler
"""
logfile_handler = get_logfile_handler()
if logfile_handler is not None:
logging.root.removeHandler(logfile_handler)
logfile_handler.close()
setup_logfile_handler.__handler__ = None
atexit.unregister(shutdown_logfile_handler)
def setup_logfile_handler(
log_path,
log_level=None,
log_format=None,
date_format=None,
max_bytes=0,
backup_count=0,
user=None,
):
"""
Setup the log file handler
Since version 0.10.6 we support logging to syslog, some examples:
tcp://localhost:514/LOG_USER
tcp://localhost/LOG_DAEMON
udp://localhost:5145/LOG_KERN
udp://localhost
file:///dev/log
file:///dev/log/LOG_SYSLOG
file:///dev/log/LOG_DAEMON
The above examples are self explanatory, but:
<file|udp|tcp>://<host|socketpath>:<port-if-required>/<log-facility>
Thinking on doing remote logging you might also be thinking that
you could point Salt's logging to the remote syslog. **Please Don't!**
An issue has been reported when doing this over TCP where the logged lines
get concatenated. See #3061.
The preferred way to do remote logging is setup a local syslog, point
Salt's logging to the local syslog(unix socket is much faster) and then
have the local syslog forward the log messages to the remote syslog.
"""
if is_logfile_handler_configured():
log.warning("Logfile logging already configured")
return
atexit.register(shutdown_logfile_handler)
log.trace(
"Setting up log file logging: %s",
dict(
log_path=log_path,
log_level=log_level,
log_format=log_format,
date_format=date_format,
max_bytes=max_bytes,
backup_count=backup_count,
user=user,
),
)
if log_path is None:
log.warning("log_path setting is set to `None`. Nothing else to do")
return
if log_level is None:
log_level = logging.WARNING
log_level = get_logging_level_from_string(log_level)
parsed_log_path = urllib.parse.urlparse(log_path)
if parsed_log_path.scheme in ("tcp", "udp", "file"):
syslog_opts = {
"facility": SysLogHandler.LOG_USER,
"socktype": socket.SOCK_DGRAM,
}
if parsed_log_path.scheme == "file" and parsed_log_path.path:
path = pathlib.Path(parsed_log_path.path)
facility_name = path.stem.upper()
try:
if not facility_name.startswith("LOG_"):
# The user is not specifying a syslog facility
facility_name = "LOG_USER" # Syslog default
syslog_opts["address"] = str(path.resolve())
else:
# The user has set a syslog facility, let's update the path to
# the logging socket
syslog_opts["address"] = str(path.resolve().parent)
except OSError as exc:
raise LoggingRuntimeError(
"Failed to setup the Syslog logging handler: {}".format(exc)
) from exc
elif parsed_log_path.path:
# In case of udp or tcp with a facility specified
path = pathlib.Path(parsed_log_path.path)
facility_name = path.stem.upper()
if not facility_name.startswith("LOG_"):
# Logging facilities start with LOG_ if this is not the case
# fail right now!
raise LoggingRuntimeError(
"The syslog facility '{}' is not known".format(facility_name)
)
else:
# This is the case of udp or tcp without a facility specified
facility_name = "LOG_USER" # Syslog default
facility = getattr(SysLogHandler, facility_name, None)
if facility is None:
# This python syslog version does not know about the user provided
# facility name
raise LoggingRuntimeError(
"The syslog facility '{}' is not known".format(facility_name)
)
syslog_opts["facility"] = facility
if parsed_log_path.scheme in ("tcp", "udp"):
syslog_opts["address"] = (
parsed_log_path.hostname,
parsed_log_path.port or logging.handlers.SYSLOG_UDP_PORT,
)
if parsed_log_path.scheme == "tcp":
syslog_opts["socktype"] = socket.SOCK_STREAM
elif parsed_log_path.scheme == "file":
syslog_opts.pop("socktype", None)
try:
# Et voilá! Finally our syslog handler instance
handler = SysLogHandler(**syslog_opts)
except OSError as exc:
raise LoggingRuntimeError(
"Failed to setup the Syslog logging handler: {}".format(exc)
) from exc
else:
# make sure, the logging directory exists and attempt to create it if necessary
if user is None:
import salt.utils.user
user = salt.utils.user.get_user()
import salt.utils.files
import salt.utils.verify
# Logfile is not using Syslog, verify
with salt.utils.files.set_umask(0o027):
salt.utils.verify.verify_log_files([log_path], user)
try:
# Logfile logging is UTF-8 on purpose.
# Since salt uses YAML and YAML uses either UTF-8 or UTF-16, if a
# user is not using plain ASCII, their system should be ready to
# handle UTF-8.
if max_bytes > 0:
handler = RotatingFileHandler(
log_path,
mode="a",
maxBytes=max_bytes,
backupCount=backup_count,
encoding="utf-8",
delay=0,
)
else:
handler = WatchedFileHandler(
log_path, mode="a", encoding="utf-8", delay=0
)
except OSError:
log.warning(
"Failed to open log file, do you have permission to write to %s?",
log_path,
)
# Do not proceed with any more configuration since it will fail, we
# have the console logging already setup and the user should see
# the error.
return
handler.setLevel(log_level)
if not log_format:
log_format = DFLT_LOG_FMT_LOGFILE
if not date_format:
date_format = DFLT_LOG_DATEFMT_LOGFILE
formatter = logging.Formatter(log_format, datefmt=date_format)
handler.setFormatter(formatter)
logging.root.addHandler(handler)
setup_logfile_handler.__handler__ = handler
def get_extended_logging_handlers():
"""
Get the extended logging handlers
"""
try:
return setup_extended_logging.__handlers__
except AttributeError:
return
def is_extended_logging_configured():
"""
Are the extended logging handlers configured
"""
extended_logging_handlers = get_extended_logging_handlers()
if extended_logging_handlers is None:
return False
return True
def shutdown_extended_logging():
"""
Shutdown the extended logging handlers
"""
extended_logging_handlers = get_extended_logging_handlers()
if extended_logging_handlers:
for handler in extended_logging_handlers:
logging.root.removeHandler(handler)
handler.close()
atexit.unregister(shutdown_extended_logging)
setup_extended_logging.__handlers__ = None
def setup_extended_logging(opts):
"""
Setup the extended logging handlers, internal or external
"""
if is_extended_logging_configured() is True:
# Don't re-configure external loggers
return
# Explicit late import of Salt's loader
import salt.loader
# Be sure to always shutdown extened logging on process termination
atexit.register(shutdown_extended_logging)
# Let's keep a reference to the current logging handlers
initial_handlers = logging.root.handlers[:]
# Load any additional logging handlers
providers = salt.loader.log_handlers(opts)
# Let's keep track of the new logging handlers so we can sync the stored
# log records with them
additional_handlers = []
for name, get_handlers_func in providers.items():
log.info("Processing 'log_handlers.%s'", name)
# Keep a reference to the logging handlers count before getting the
# possible additional ones.
initial_handlers_count = len(logging.root.handlers)
handlers = get_handlers_func()
if isinstance(handlers, types.GeneratorType):
handlers = list(handlers)
elif handlers is False or handlers == [False]:
# A false return value means not configuring any logging handler on
# purpose
log.info(
"The `log_handlers.%s.setup_handlers()` function returned "
"`False` which means no logging handler was configured on "
"purpose. Continuing...",
name,
)
continue
else:
# Make sure we have an iterable
handlers = [handlers]
for handler in handlers:
if not handler and len(logging.root.handlers) == initial_handlers_count:
log.info(
"The `log_handlers.%s`, did not return any handlers and the "
"global handlers count did not increase. This could be a sign "
"that `log_handlers.%s` is not working as expected.",
name,
name,
)
continue
log.debug("Adding the '%s' provided logging handler: '%s'", name, handler)
additional_handlers.append(handler)
logging.root.addHandler(handler)
for handler in logging.root.handlers:
if handler in initial_handlers:
continue
additional_handlers.append(handler)
setup_extended_logging.__handlers__ = additional_handlers
def setup_log_granular_levels(log_granular_levels):
"""
Get the extended | |
<gh_stars>0
import functools
import os
import ssl
import uuid
from typing import Any, Dict, List, Literal, Optional, Tuple
import jinja2
import pydantic
import yaml
from .exceptions import TplBuildException, TplBuildTemplateException
RESERVED_PROFILE_KEYS = {
"begin_stage",
"platform",
}
def _normalize_rel_path(path: str) -> str:
"""Normalize and coerce a path into a relative path."""
return f".{os.path.sep}{os.path.normpath(os.path.join(os.path.sep, path))[1:]}"
class TplContextConfig(pydantic.BaseModel):
"""
Config model representing a build context.
"""
#: The base directory (relative to the config base directory) of
#: the build context. This must be a relative path and cannot point
#: above the config base directory.
base_dir: str = "."
#: The umask as a three digit octal string. This may also be set to
#: None if the context permissions should be passed through directly.
umask: Optional[str] = "022"
#: The ignore_file to load patterns from. If this and :attr:`ignore`
#: are both None then this will attempt to load ".dockerignore", using
#: an empty list of patterns if that cannot be loaded.
ignore_file: Optional[str] = None
#: Ignore file string. If present this will be used over :attr:`ignore_file`.
ignore: Optional[str] = None
@pydantic.validator("umask")
def umask_valid_octal(cls, v):
"""Ensure that umask is three-digit octal sequence"""
if v is None:
return v
if 0 <= int(v, 8) <= 0o777:
raise ValueError("umask out of range")
return v
@pydantic.validator("base_dir")
def normalize_base_dir(cls, v):
"""Normalize the base directory"""
return _normalize_rel_path(v)
class ClientCommand(pydantic.BaseModel):
"""
Configuration to invoke an external build command.
Typically both :attr:`args` and the values of :attr:`environment` will be
subject to keyword substitutions. For instance build commands will substitute
any instance of the string "{image}" with the desired image tag. This is to
be similar to the typical Python format implementation (although does not
use `str.format` for security reasons).
"""
#: A jinja template used to construct invoke arguments and environment
#: variables based on the template arguments passed. Depending on the
#: build command different template arguments may be passed. All templates
#: are passed an `args` list and an `environment` dict that they should
#: populate with the command arguments and environment variables used
#: to invoke the build command. The output of the template will be ignored.
template: str
def render(
self,
jinja_env: jinja2.Environment,
params: Dict[str, str],
) -> Tuple[List[str], Dict[str, str]]:
"""Return the list of arguments after being rendered with the given params"""
args: List[str] = []
environment: Dict[str, str] = {}
try:
for _ in jinja_env.from_string(self.template).generate(
**params,
args=args,
environment=environment,
):
pass
except jinja2.TemplateError as exc:
raise TplBuildTemplateException(
"Failed to render command template"
) from exc
if not args:
print(self.template)
raise TplBuildException("command template rendered no command arguments")
return args, environment
class ClientConfig(pydantic.BaseModel):
"""
Configuration of commands to perform various container operations. This is
meant to be a generic interface that could plug into a variety of container
build systems. Typically you can just set :attr:`UserConfig.client_type` to
select from preconfigured client configurations.
"""
#: Build command config template. This should render an appropriate command
#: to build an image using a dockerfile named "Dockerfile" and build
#: context provided by stdin. The output should be tagged as the passed
#: argument `image`.
#:
#: Arguments:
#: image: str - The image name to tag the output
#: platform: str? - The build platform to use if known.
build: ClientCommand
#: Tag command config template. This should tag an existing image with
#: a new image name.
#:
#: Arguments:
#: source_image: str - The source image name
#: dest_image: str - The new name to tag `source_image` as
tag: ClientCommand
#: Pull command config template. This should pull the named image from
#: the remote registry into local storage.
#:
#: Arguments:
#: image: str - The name of the image to pull
pull: Optional[ClientCommand] = None
#: Push command config template. This should push the named image to
#: the remote registry from local storage.
#:
#: Arguments:
#: image: str - The name of the image to push
push: ClientCommand
#: Un-tag command config template. This should untag the named image
#: allowing data referenced by the image to be reclaimed.
#:
#: Arguments:
#: image: str - The name of the image to untag
untag: ClientCommand
#: Command that should print out the default build platform for the client.
#: This template is passed no additional arguments. If this command is not
#: available the default build platform will be calculated using the local
#: client platform instead. The output will be normalized to convert
#: e.g. "linux/x64_64" to "linux/amd64". This will only be used for
#: platform aware build configurations.
platform: Optional[ClientCommand] = None
UNSET_CLIENT_CONFIG = ClientConfig(
build=ClientCommand(template=""),
tag=ClientCommand(template=""),
push=ClientCommand(template=""),
untag=ClientCommand(template=""),
)
@functools.lru_cache
def get_builtin_configs() -> Dict[str, ClientConfig]:
"""
Return a cached mapping of preconfigured clients.
"""
path = os.path.join(os.path.dirname(__file__), "builtin_clients.yml")
with open(path, "r", encoding="utf-8") as fdata:
configs = yaml.safe_load(fdata)
return {
config_name: ClientConfig(**config_data)
for config_name, config_data in configs.items()
}
class UserSSLContext(pydantic.BaseModel):
"""Custom SSL context used to contact registries"""
#: Disable SSL/TLS verification
insecure: bool = False
#: File path to load CA certificates to trust.
cafile: Optional[str] = None
#: Folder container CA certificate files to trust.
capath: Optional[str] = None
#: Raw certificate data to trust.
cadata: Optional[str] = None
#: If True default system certs will be loaded in addition to any certs
#: implied by `cafile`, `capath`, or `cadata`. Normally these will only be
#: loaded if those are all unset.
load_default_certs: bool = False
def create_context(self) -> ssl.SSLContext:
"""Returns a SSLContext constructed from the passed options"""
ctx = ssl.create_default_context(
cafile=self.cafile,
capath=self.capath,
cadata=self.cadata,
)
if self.insecure:
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
if self.load_default_certs:
ctx.load_default_certs()
return ctx
class StageConfig(pydantic.BaseModel):
"""Configuration data for a named build stage"""
#: Is the stage a base stage
base: bool = False
#: All image names to assign to the built image. Must be empty for base images.
image_names: List[str] = []
#: All image names to assign and then push to remote registries.
#: Must be empty for base images.
push_names: List[str] = []
@pydantic.validator("image_names")
def image_names_empty_for_base(cls, v, values):
"""Ensure base images have no image_names"""
if v and values["base"]:
raise ValueError("image_names must be empty for base images")
return v
@pydantic.validator("push_names")
def push_names_empty_for_base(cls, v, values):
"""Ensure base images have no push_names"""
if v and values["base"]:
raise ValueError("push_names must be empty for base images")
return v
class UserConfig(pydantic.BaseModel):
"""User settings controlling tplbuild behavior"""
#: Must be "1.0"
version: Literal["1.0"] = "1.0"
#: If :attr:`client` is None this field will be used to set the client
#: configuration. Supported values are currently "docker" and "podman".
#: If :attr:`client` is not None this field is ignored.
client_type: str = "docker"
#: Client commands to use to perform different container actions. If unset
#: a default configuration will be provided based on the value of
#: :attr:`client_type`. If you wish to use a different builder or supply
#: additional arguments to the build this would be the place to do it.
client: ClientConfig = UNSET_CLIENT_CONFIG
#: Maximum number of concurrent build jbs. If set to 0 this will be set to
#: `os.cpu_count()`.
build_jobs: int = 0
#: Maximum number of concurrent push or pull jobs.
push_jobs: int = 4
#: Maximum number of concurrent tag jobs.
tag_jobs: int = 8
#: Maximum number of times a build will be retried before failing a build.
build_retry: int = 0
#: Maximum number of times a push or pull will be retried before failing a build.
push_retry: int = 0
#: Configure the SSL context used to contact registries. This only controls
#: accesses made by tplbuild itself. The client builder will need to be
#: configured separately.
ssl_context: UserSSLContext = UserSSLContext()
#: The path to the container auth configuration file to use when contacting
#: registries. By default this will check the default search paths and conform
#: to the syntax described in
#: https://github.com/containers/image/blob/main/docs/containers-auth.json.5.md.
auth_file: Optional[str] = None
@pydantic.validator("build_jobs", always=True)
def build_jobs_valid(cls, v):
"""ensure build_jobs is non-negative"""
| |
<filename>pyannote/audio/applications/pyannote_audio.py
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2019-2020 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# <NAME> - http://herve.niderb.fr
"""
Neural building blocks for speaker diarization
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Usage:
pyannote-audio (sad | scd | ovl | emb | dom) train [--cpu | --gpu] [options] <root> <protocol>
pyannote-audio (sad | scd | ovl | emb | dom) validate [--cpu | --gpu] [options] <train> <protocol>
pyannote-audio (sad | scd | ovl | emb | dom) apply [--cpu | --gpu] [options] <validate> <protocol>
pyannote-audio -h | --help
pyannote-audio --version
This command line tool can be used to train, validate, and apply neural networks
for the following blocks of a speaker diarization pipeline:
* (sad) speech activity detection consists in detecting speech regions in
an audio recording.
* (scd) speaker change detection consists in detecting timestamps of
speaker change point.
* (ovl) overlapped speech detection consists in detection regions with two
or more simultaneous speakers.
* (emb) speaker embedding consists in projecting audio chunk into a
(usually high-dimensional) vector space where same speaker
embeddings are close to each other, and different speaker embeddings
are not.
* (dom) domain classification consists in predicting the domain of an
audio recording
Running a complete speech activity detection experiment on the provided
"debug" dataset would go like this:
* Run experiment on this pyannote.database protocol
$ export DATABASE=Debug.SpeakerDiarization.Debug
* This directory will contain experiments artifacts:
$ mkdir my_experiment && cd my_experiment
* A unique configuration file describes the experiment hyper-parameters
(see "Configuration file" below for details):
$ edit config.yml
* This will train the model on the training set:
$ pyannote-audio sad train ${PWD} ${DATABASE}
* Training artifacts (including model weights) are stored in a sub-directory
whose name makes it clear which dataset and subset (train, by default)
were used for training the model.
$ cd train/${DATABASE}.train
* This will validate the model on the development set:
$ pyannote-audio sad validate ${PWD} ${DATABASE}
* Validation artifacts (including the selection of the best epoch) are
stored in a sub-directory named after the dataset and subset (development,
by default) used for validating the model.
$ cd validate/${DATABASE}.development
* This will apply the best model (according to the validation step) to the
test set:
$ pyannote-audio sad apply ${PWD} ${DATABASE}
* Inference artifacts are stored in a sub-directory whose name makes it
clear which epoch has been used (e.g. apply/0125). Artifacts include:
* raw output of the best model (one numpy array per file than can be
loaded with pyannote.audio.features.Precomputed API and handled with
pyannote.core.SlidingWindowFeature API)
* (depending on the task) a file "${DATABASE}.test.rttm" containing the
post-processing of raw output.
* (depending on the task) a file "${DATABASE}.test.eval" containing the
evaluation result computed with pyannote.metrics.
pyannote.database support
~~~~~~~~~~~~~~~~~~~~~~~~~
PYANNOTE_DATABASE_CONFIG=
Configuration file <root>/config.yml
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Reproducible research is facilitated by the systematic use of configuration
files stored in <root>/config.yml in YAML format.
.......................... <root>/config.yml ..........................
task:
name:
params:
feature_extraction:
name:
params:
data_augmentation:
name:
params:
architecture:
name:
params:
scheduler:
name:
params:
preprocessors:
callbacks:
...................................................................
File <root>/config.yml is mandatory, unless option --pretrained is used.
When fine-tuning a model with option --pretrained=<model>, one can omit it
and the original <model> configuration file is used instead. If (a possibly
partial) <root>/config.yml file is provided anyway, it is used to override
<model> configuration file.
Tensorboard support
~~~~~~~~~~~~~~~~~~~
A bunch of metrics are logged during training and validation (e.g. loss,
learning rate, computation time, validation metric). They can be visualized
using tensorboard:
$ tensorboard --logdir=<root>
Common options
~~~~~~~~~~~~~~
<root> Experiment root directory. Should contain config.yml
configuration file, unless --pretrained option is
used (for which config.yml is optional).
<protocol> Name of protocol to use for training, validation, or
inference. Have a look at pyannote.database
documentation for instructions on how to define a
protocol with your own dataset:
https://github.com/pyannote/pyannote-database#custom-protocols
<train> Path to <root> sub-directory containing training
artifacts (e.g. <root>/train/<protocol>.train)
<validate> Path to <train> sub-directory containing validation
artifacts (e.g. <train>/validate/<protocol>.development)
In case option --pretrained=<model> is used, the
output of the pretrained model is dumped into the
<validate> directory.
--subset=<subset> Subset to use for training (resp. validation,
inference). Defaults to "train" (resp. "development",
"test") for strict enforcement of machine learning
good practices.
--gpu Run on GPU. When multiple GPUs are available, use
CUDA_VISIBLE_DEVICES environment variable to force
using a specific one. Defaults to using CPU if no GPU
is available.
--cpu Run on CPU. Defaults to using GPU when available.
--debug Run using PyTorch's anomaly detection. This will throw
an error if a NaN value is produced, and the stacktrace
will point to the origin of it. This option can
considerably slow execution.
--from=<epoch> Start training (resp. validating) at epoch <epoch>.
Use --from=last to start from last available epoch at
launch time. Not used for inference [default: 0].
--to=<epoch> End training (resp. validating) at epoch <epoch>.
Use --end=last to validate until last available epoch
at launch time. Not used for inference [default: 100].
--batch=<size> Set batch size used for validation and inference.
Has no effect when training as this parameter should
be defined in the configuration file [default: 32].
--step=<ratio> Ratio of audio chunk duration used as step between
two consecutive audio chunks [default: 0.25]
--parallel=<n_jobs> Use at most that many threads for generating training
samples or validating files. Defaults to using all
CPUs but one.
Speaker embedding
~~~~~~~~~~~~~~~~~
--duration=<duration> Use audio chunks with that duration. Defaults to the
fixed duration used during training, when available.
--metric=<metric> Use this metric (e.g. "cosine" or "euclidean") to
compare embeddings. Defaults to the metric defined in
<root>/config.yml configuration file.
Pretrained model options
~~~~~~~~~~~~~~~~~~~~~~~~
--pretrained=<model> Warm start training with pre-trained model. Can be
either a path to an existing checkpoint (e.g.
<train>/weights/0050.pt) or the name of a model
available in torch.hub.list('pyannote/pyannote.audio')
This option can also be used to apply a pretrained
model. See description of <validate> for more details.
Validation options
~~~~~~~~~~~~~~~~~~
--every=<epoch> Validate model every <epoch> epochs [default: 1].
--evergreen Prioritize validation of most recent epoch.
For speech activity and overlapped speech detection, validation consists in
looking for the value of the detection threshold that maximizes the f-score
of recall and precision.
For speaker change detection, validation consists in looking for the value of
the peak detection threshold that maximizes the f-score of purity and
coverage:
--diarization Use diarization purity and coverage instead of
(default) segmentation purity and coverage.
For speaker embedding and verification protocols, validation runs the actual
speaker verification experiment (representing each recording by its average
embedding) and reports equal error rate.
For speaker embedding and diarization protocols, validation runs a speaker
diarization pipeline based on oracle segmentation and "pool-linkage"
agglomerative clustering of speech turns (represented by their average
embedding), and looks for the threshold that maximizes the f-score of purity
and coverage.
"""
import sys
import warnings
from docopt import docopt
from pathlib import Path
import multiprocessing
import torch
from .base import apply_pretrained
from .speech_detection import SpeechActivityDetection
from .change_detection import SpeakerChangeDetection
from .overlap_detection import OverlapDetection
from .speaker_embedding import SpeakerEmbedding
from .domain_classification import DomainClassification
def main():
# TODO: update version automatically
arg = docopt(__doc__, version="pyannote-audio 2.0")
params = {}
if arg["sad"]:
Application = | |
<reponame>SkyLined/mDateTime<filename>cDate.py
import calendar, datetime, math, re;
gbDebugOutput = False;
rDate = re.compile(
r"^\s*" +
r"(\d{4})" +
r"[\-\/]" +
r"(\d{1,2})" +
r"[\-\/]" +
r"(\d{1,2})" +
r"\s*$"
);
asMonths = [
"January", "February", "March", "April", "May", "June",
"July", "August", "September", "October", "November", "December"
];
asOrdinalPostfixes = [
"st", "nd", "rd", "th", "th", "th", "th", "th", "th", "th", # 1-10
"th", "th", "th", "th", "th", "th", "th", "th", "th", "th", # 11-20
"st", "nd", "rd", "th", "th", "th", "th", "th", "th", "th", # 21-30
"st" # 31
];
def fbIsValidInteger(uValue, uMinValueInclusive = None, uMaxValueExclusive = None):
return (
isinstance(uValue, int)
and (uValue % 1 == 0)
and (uValue >= uMinValueInclusive if uMinValueInclusive is not None else True)
and (uValue < uMaxValueExclusive if uMaxValueExclusive is not None else True)
);
class cDate(object):
# Static methods
@staticmethod
def fbIsValidYear(uValue):
return fbIsValidInteger(uValue, 0);
@staticmethod
def fbIsValidMonth(uValue):
return fbIsValidInteger(uValue, 1, 13);
@staticmethod
def fbIsValidDay(uValue):
return fbIsValidInteger(uValue, 1, 32); # We do not know the month but 32 is never valid.
@staticmethod
def fbIsValidDate(uYear, uMonth, uDay):
return (
cDate.fbIsValidYear(uYear)
and cDate.fbIsValidMonth(uMonth)
and fbIsValidInteger(uDay, 1, cDate.fuGetLastDayInMonth(uYear, uMonth) + 1)
);
@staticmethod
def fsGetDateString(uYear, uMonth, uDay):
return "%04d-%02d-%02d" % (uYear, uMonth, uDay);
@staticmethod
def fuGetLastDayInMonth(uYear, uMonth):
return calendar.monthrange(uYear, uMonth)[1];
@staticmethod
def fo0FromPyDate(oDate):
return None if oDate is None else cDate.foFromPyDate(oDate);
@staticmethod
def foFromPyDate(oDate):
return cDate(oDate.year, oDate.month, oDate.day);
@staticmethod
def fo0FromJSON(s0Date):
return None if s0Date is None else cDate.foFromJSON(s0Date);
@staticmethod
def foFromJSON(sDate):
# JSON encoding uses the "string value" of cDate.
return cDate.foFromString(sDate);
@staticmethod
def fo0FromMySQL(s0Date):
return None if s0Date is None else cDate.foFromMySQL(s0Date);
@staticmethod
def foFromMySQL(sDate):
# MySQL encoding uses the "string value" of cDate.
return cDate.foFromMySQL(sDate);
@staticmethod
def fo0FromMySQLDateTime(s0DateTime):
return None if s0DateTime is None else cDate.foFromMySQLDateTime(s0DateTime);
@staticmethod
def foFromMySQLDateTime(sDateTime):
# MySQL format is "YYYY-MM-DD hh:mm:ss", so we can just split it at the space and use the first part:
return cDate.foFromMySQL(sDateTime.split(" ")[0]);
@staticmethod
def fbIsValidDateString(sDate):
return isinstance(sDate, str) and rDate.match(sDate) is not None;
@staticmethod
def fo0FromString(s0Date):
return None if s0Date is None else cDate.foFromString(s0Date);
@staticmethod
def foFromString(sDate):
oDateMatch = rDate.match(sDate) if isinstance(sDate, str) else None;
if oDateMatch is None: raise ValueError("Invalid date string " + repr(sDate) + ".");
return cDate(int(oDateMatch.group(1)), int(oDateMatch.group(2)), int(oDateMatch.group(3)));
@staticmethod
def foNow():
return cDate.foFromPyDate(datetime.datetime.now());
@staticmethod
def foNowUTC():
return cDate.foFromPyDate(datetime.datetime.utcnow());
# Constructor
def __init__(oSelf, uYear, uMonth, uDay):
if not cDate.fbIsValidDate(uYear, uMonth, uDay): raise ValueError("Invalid date (%s, %s, %s)." % (repr(uYear), repr(uMonth), uDay));
oSelf.__uYear = uYear;
oSelf.__uMonth = uMonth; # 1 = January
oSelf.__uDay = uDay; # 1 = first day of month
# Properties
@property
def uYear(oSelf):
return oSelf.__uYear;
@uYear.setter
def uYear(oSelf, uYear):
if not cDate.fbIsValidYear(uYear): raise ValueError("Invalid year " + repr(uYear) + ".");
if not cDate.fbIsValidDate(uYear, oSelf.__uMonth, oSelf.__uDay): raise ValueError("Invalid year in date %s." % cDate.fsGetDateString(uYear, oSelf.__uMonth, oSelf.__uDay));
oSelf.__uYear = uYear;
@property
def uMonth(oSelf):
return oSelf.__uMonth;
@uMonth.setter
def uMonth(oSelf, uMonth):
if not cDate.fbIsValidMonth(uMonth): raise ValueError("Invalid month " + repr(uMonth) + ".");
if not cDate.fbIsValidDate(oSelf.__uYear, uMonth, oSelf.__uDay): raise ValueError("Invalid month in date %s." % cDate.fsGetDateString(oSelf.__uYear, uMonth, oSelf.__uDay));
oSelf.__uMonth = uMonth;
@property
def uDay(oSelf):
return oSelf.__uDay;
@uDay.setter
def uDay(oSelf, uDay):
if not cDate.fbIsValidDay(uDay): raise ValueError("Invalid day " + repr(uDay) + ".");
if not cDate.fbIsValidDate(oSelf.__uYear, oSelf.__uMonth, uDay): raise ValueError("Invalid day in date %s." % cDate.fsGetDateString(uYear, uMonth, uDay));
oSelf.__uDay = uDay;
#methods
def foClone(oSelf):
return cDate(oSelf.__uYear, oSelf.__uMonth, oSelf.__uDay);
def fSet(oSelf, uYear, uMonth, uDay):
if not cDate.fbIsValidDate(uYear, uMonth, uDay): raise ValueError("Invalid date (%s, %s, %s)." % (repr(uYear), repr(uMonth), uDay));
oSelf.__uYear = uYear;
oSelf.__uMonth = uMonth;
oSelf.__uDay = uDay;
def foGetEndDateForDuration(oSelf, oDuration):
# Note that this code ignores the time (if any) in oDuration
# Add the year and month:
iNewYear = oSelf.__uYear + oDuration.iYears;
iNewMonth0Based = oSelf.__uMonth - 1 + oDuration.iMonths;
if gbDebugOutput: print("year %s %s %s => %s, month (base 0) %s %s %s => %s" % (
repr(oSelf.__uYear), "-" if oDuration.iYears < 0 else "+", abs(oDuration.iYears), repr(iNewYear),
repr(oSelf.__uMonth - 1), "-" if oDuration.iMonths < 0 else "+", abs(oDuration.iMonths), repr(iNewMonth0Based),
));
# If uNewMonth < 0 or > 11, convert the excess/shortage to years and add it.
iMonthsExcessOrShortageInYears = math.floor(iNewMonth0Based / 12);
iNewYear += iMonthsExcessOrShortageInYears;
assert iNewYear > 0, \
"Year cannot be < 0 (%s)" % iNewYear;
uNewYear = iNewYear;
uNewMonth0Based = iNewMonth0Based % 12;
uNewMonth = uNewMonth0Based + 1;
if gbDebugOutput: print("year %s= %s => %s, month (base 0) %s => %s => base 1: %s" % (
"-" if iMonthsExcessOrShortageInYears < 0 else "+", abs(iMonthsExcessOrShortageInYears), repr(uNewYear),
repr(iNewMonth0Based), repr(uNewMonth0Based), repr(uNewMonth),
));
# If we added months and ended up in another month in which the current day does not exist (e.g. Feb 31st)
# reduce the day (i.e. Feb 28th/29th)
uLastDayInNewMonth = cDate.fuGetLastDayInMonth(uNewYear, uNewMonth);
uNewDayInNewMonth = oSelf.__uDay if oSelf.uDay <= uLastDayInNewMonth else uLastDayInNewMonth;
if gbDebugOutput and uNewDayInNewMonth != oSelf.__uDay: print("day %d => %d" % (
oSelf.__uDay, uNewDayInNewMonth
));
# Add the days by creating the Python datetime.date equivalent and adding the days using datetime.timedelta, then
# converting back to cDate. This allows us to reuse the Python API for tracking the number of days in each month.
oEndDate = cDate.foFromPyDate(
datetime.date(uNewYear, uNewMonth, uNewDayInNewMonth)
+ datetime.timedelta(oDuration.iDays)
);
return oEndDate;
def foGetDurationForEndDate(oSelf, oEndDate):
# If the end date is before this date, the duration is going to be negative.
# To keep this code simple to review, we always calculate the positive duration between the two dates
# and later invert it should the real result be a negative duration.
bNegativeDuration = oEndDate.fbIsBefore(oSelf);
uDurationMultiplier = -1 if bNegativeDuration else 1; # Used to potentially invert the duration later.
oFirstDate = oEndDate if bNegativeDuration else oSelf;
oLastDate = oSelf if bNegativeDuration else oEndDate;
uDurationYears = oLastDate.uYear - oFirstDate.uYear;
uDurationMonths = oLastDate.uMonth - oFirstDate.uMonth;
uDurationDays = oLastDate.uDay - oFirstDate.uDay;
# The number of days in the last month various
uDaysInLastDatesPreviousMonth = cDate.fuGetLastDayInMonth(oLastDate.uYear - (1 if oLastDate.uMonth == 1 else 0), ((oLastDate.uMonth + 10) % 12) + 1);
if uDurationDays >= oLastDate.uDay:
# If uDurationDays > last date's day, adding the days moved it into a new month; convert this into a month and adjust the days.
# e.g. 2000-1-31 -> 2000-2-2 => -1m+29d (at this point) => +2d (after this adjustment)
uDurationMonths += 1;
uDurationDays = uDaysInLastDatesPreviousMonth - uDurationDays;
elif uDurationDays < 0:
# If uDurationDays < 0, the day is before adding the days moved it into a new month; convert this into a month and adjust the days.
# e.g. 2000-1-2 -> 2000-2-1 => +1m-1d (at this point) => +30d (after this adjustment)
uDurationMonths -= 1;
uDurationDays += uDaysInLastDatesPreviousMonth;
# If uDurationMonths < 0 or >= 12, convert the excess to years and add them.
if uDurationMonths < 0 or uDurationMonths >= 12:
uDurationYears += int(uDurationMonths / 12) + (-1 if uDurationMonths < 0 else 0);
uDurationMonths = (uDurationMonths % 12) + (12 if uDurationMonths < 0 else 0);
from .cDateDuration import cDateDuration;
oDuration = cDateDuration(
uDurationYears * uDurationMultiplier,
uDurationMonths * uDurationMultiplier,
uDurationDays * uDurationMultiplier,
);
return oDuration;
def fbIsBefore(oSelf, oDate):
if oSelf.__uYear < oDate.uYear: return True;
if oSelf.__uYear > oDate.uYear: return False;
if oSelf.__uMonth < oDate.uMonth: return True;
if oSelf.__uMonth > oDate.uMonth: return False;
if oSelf.__uDay < oDate.uDay: return True;
#if oSelf.__uDay > oDate.uDay: return False;
return False;
def fbIsEqualTo(oSelf, oDate):
return oSelf.__uYear == oDate.uYear and oSelf.__uMonth == oDate.uMonth and oSelf.__uDay == oDate.uDay;
def fbIsAfter(oSelf, oDate):
if oSelf.__uYear > oDate.uYear: return True;
if oSelf.__uYear < oDate.uYear: return False;
if oSelf.__uMonth > oDate.uMonth: return True;
if oSelf.__uMonth < oDate.uMonth: return False;
if oSelf.__uDay > oDate.uDay: return True;
#if oSelf.__uDay < oDate.uDay: return False;
return False;
def fbIsInThePast(oSelf):
return cDate.fbIsBefore(oSelf, cDate.foNow());
def fbIsInThePastUTC(oSelf):
return cDate.fbIsBefore(oSelf, cDate.foNowUTC());
def fbIsToday(oSelf):
return cDate.fbIsEqualTo(oSelf, cDate.foNow());
def fbIsTodayUTC(oSelf):
return cDate.fbIsEqualTo(oSelf, cDate.foNowUTC());
def fbIsInTheFuture(oSelf):
return cDate.fbIsAfter(oSelf, cDate.foNow());
def fbIsInTheFutureUTC(oSelf):
return cDate.fbIsAfter(oSelf, cDate.foNowUTC());
def fsToHumanReadableString(oSelf):
# Month <day>th, <year>
return "%s %d%s, %d" % (
asMonths[oSelf.__uMonth - 1],
oSelf.__uDay, asOrdinalPostfixes[oSelf.__uDay],
oSelf.__uYear,
);
def foToPyDate(oSelf):
return datetime.date(oSelf.__uYear, oSelf.__uMonth, oSelf.__uDay);
def fnToTimestamp(oSelf):
return time.mktime(cDate.foToPyDate(oSelf).timetuple()) + (oSelf.uMicrosecond / | |
may be ignored with non-default options for the next two
inputs).
op_exclusions : None or list of gates, optional (default is None)
If not None, all Circuits containing *any* of the gates in this list are discarded,
and no comparison will be made for those strings.
op_exclusions : None or list of gates, optional (default is None)
If not None, a Circuit will be dropped from the list to implement the comparisons for
if it doesn't include *some* gate from this list (or is the empty circuit).
DS_names : None or list, optional (default is None)
If `dataset_list_multidataset` is a list of DataSets, this can be used to specify names
for the DataSets in the list. E.g., ["Time 0", "Time 1", "Time 3"] or ["Driving","NoDriving"].
allow_bad_circuits : bool, optional
Whether or not the data is allowed to have zero total counts for any circuits in any of the
passes. If false, then an error will be raise when there are such unimplemented circuits. If
true, then the data from those circuits that weren't run in one or more of the passes will
be discarded before any analysis is performed (equivalent to excluding them explicitly in with
the `circuits` input.
Returns
-------
A DataComparator object.
"""
if DS_names is not None:
if len(DS_names) != len(dataset_list_or_multidataset):
raise ValueError('Length of provided DS_names list must equal length of dataset_list_or_multidataset.')
if isinstance(circuits, str):
assert(circuits == 'all'), "If circuits is a string it must be 'all'!"
if isinstance(dataset_list_or_multidataset, list):
dsList = dataset_list_or_multidataset
olIndex = dsList[0].olIndex
olIndexListBool = [ds.olIndex == (olIndex) for ds in dsList]
DS_names = list(range(len(dataset_list_or_multidataset)))
if not _np.all(olIndexListBool):
raise ValueError('Outcomes labels and order must be the same across datasets.')
if circuits == 'all':
circuitList = dsList[0].keys()
circuitsListBool = [ds.keys() == circuitList for ds in dsList]
if not _np.all(circuitsListBool):
raise ValueError(
'If circuits="all" is used, then datasets must contain identical circuits. (They do not.)')
circuits = circuitList
elif isinstance(dataset_list_or_multidataset, _MultiDataSet):
dsList = [dataset_list_or_multidataset[key] for key in dataset_list_or_multidataset.keys()]
if circuits == 'all':
circuits = dsList[0].keys()
if DS_names is None:
DS_names = list(dataset_list_or_multidataset.keys())
else:
raise ValueError("The `dataset_list_or_multidataset` must be a list of DataSets of a MultiDataSet!")
if allow_bad_circuits:
trimmedcircuits = []
for circuit in circuits:
if min([ds[circuit].total for ds in dsList]) > 0:
trimmedcircuits.append(circuit)
circuits = trimmedcircuits
if op_exclusions is not None:
circuits_exc_temp = []
for circuit in circuits:
if is_circuit_allowed_by_exclusion(op_exclusions, circuit):
circuits_exc_temp.append(circuit)
circuits = list(circuits_exc_temp)
if op_inclusions is not None:
circuits_inc_temp = []
for circuit in circuits:
if is_circuit_allowed_by_inclusion(op_inclusions, circuit):
circuits_inc_temp.append(circuit)
circuits = list(circuits_inc_temp)
llrs = {}
pVals = {}
jsds = {}
dof = (len(dsList) - 1) * (len(dsList[0].olIndex) - 1)
total_counts = []
if len(dataset_list_or_multidataset) == 2:
tvds = {}
for circuit in circuits:
datalineList = [ds[circuit] for ds in dsList]
nListList = _np.array([list(dataline.allcounts.values()) for dataline in datalineList])
total_counts.append(_np.sum(nListList))
llrs[circuit] = loglikelihoodRatio(nListList)
jsds[circuit] = JensenShannonDivergence(nListList)
pVals[circuit] = pval(llrs[circuit], dof)
if len(dataset_list_or_multidataset) == 2:
tvds[circuit] = tvd(nListList)
self.dataset_list_or_multidataset = dataset_list_or_multidataset
self.pVals = pVals
self.pVals_pseudothreshold = None
self.llrs = llrs
self.llrs_pseudothreshold = None
self.jsds = jsds
if len(dataset_list_or_multidataset) == 2:
self.tvds = tvds
self.op_exclusions = op_exclusions
self.op_inclusions = op_inclusions
self.pVals0 = str(len(self.pVals) - _np.count_nonzero(list(self.pVals.values())))
self.dof = dof
self.num_strs = len(self.pVals)
self.DS_names = DS_names
if _np.std(_np.array(total_counts)) > 10e-10:
self.fixed_totalcount_data = False
self.counts_per_sequence = None
else:
self.fixed_totalcount_data = True
self.counts_per_sequence = int(total_counts[0])
self.aggregate_llr = _np.sum(list(self.llrs.values()))
self.aggregate_llr_threshold = None
self.aggregate_pVal = pval(self.aggregate_llr, self.num_strs * self.dof)
self.aggregate_pVal_threshold = None
# Convert the aggregate LLR to a signed standard deviations.
self.aggregate_nsigma = llr_to_signed_nsigma(self.aggregate_llr, self.num_strs * self.dof)
self.aggregate_nsigma_threshold = None
# All attributes to be populated in methods that can be called from .get methods, so
# we can raise a meaningful warning if they haven't been calculated yet.
self.sstvds = None
self.pVal_pseudothreshold = None
self.llr_pseudothreshold = None
self.pVal_pseudothreshold = None
self.jsd_pseudothreshold = None
def implement(self, significance=0.05, per_sequence_correction='Hochberg',
aggregate_test_weighting=0.5, pass_alpha=True, verbosity=2):
"""
Implements statistical hypothesis testing, to detect whether there is statistically
significant variation between the DateSets in this DataComparator. This performs
hypothesis tests on the data from individual circuits, and a joint hypothesis test
on all of the data. With the default settings, this is the method described and implemented
in "Probing context-dependent errors in quantum processors", by Rudinger et al. With
non-default settings, this is some minor variation on that method.
Note that the default values of all the parameters are likely sufficient for most
purposes.
Parameters
----------
significance : float in (0,1), optional (default is 0.05)
The "global" statistical significance to implement the tests at. I.e, with
the standard `per_sequence_correction` value (and some other values for this parameter)
the probability that a sequence that has been flagged up as context dependent
is actually from a context-independent circuit is no more than `significance`.
Precisely, `significance` is what the "family-wise error rate" (FWER) of the full set
of hypothesis tests (1 "aggregate test", and 1 test per sequence) is controlled to,
as long as `per_sequence_correction` is set to the default value, or another option
that controls the FWER of the per-sequence comparion (see below).
per_sequence_correction : string, optional (default is 'Hochberg')
The multi-hypothesis test correction used for the per-circuit/sequence comparisons.
(See "Probing context-dependent errors in quantum processors", by Rudinger et al. for
the details of what the per-circuit comparison is). This can be any string that is an allowed
value for the `localcorrections` input parameter of the HypothesisTest object. This includes:
- 'Hochberg'. This implements the Hochberg multi-test compensation technique. This
is strictly the best method available in the code, if you wish to control the FWER,
and it is the method described in "Probing context-dependent errors in quantum processors",
by Rudinger et al.
- 'Holms'. This implements the Holms multi-test compensation technique. This
controls the FWER, and it results in a strictly less powerful test than the Hochberg
correction.
- 'Bonferroni'. This implements the well-known Bonferroni multi-test compensation
technique. This controls the FWER, and it results in a strictly less powerful test than
the Hochberg correction.
- 'none'. This implements no multi-test compensation for the per-sequence comparsions,
so they are all implemented at a "local" signifincance level that is altered from `significance`
only by the (inbuilt) Bonferroni-like correction between the "aggregate" test and the per-sequence
tests. This option does *not* control the FWER, and many sequences may be flagged up as context
dependent even if none are.
-'Benjamini-Hochberg'. This implements the Benjamini-Hockberg multi-test compensation
technique. This does *not* control the FWER, and instead controls the "False Detection Rate"
(FDR); see, for example, https://en.wikipedia.org/wiki/False_discovery_rate. That means that
the global significance is maintained for the test of "Is there any context dependence?". I.e.,
one or more tests will trigger when there is no context
dependence with at most a probability of `significance`. But, if one or more per-sequence tests
trigger then we are only guaranteed that (in expectation) no more than a fraction of
"local-signifiance" of the circuits that have been flagged up as context dependent actually aren't.
Here, "local-significance" is the significance at which the per-sequence tests are, together,
implemented, which is `significance`*(1 - `aggregate_test_weighting`) if the aggregate test doesn't
detect context dependence and `significance` if it does (as long as `pass_alpha` is True). This
method is strictly more powerful than the Hochberg correction, but it controls a different, weaker
quantity.
aggregate_test_weighting : float in [0,1], optional (default is 0.5)
The weighting, in a generalized Bonferroni correction, to put on the "aggregate test", that jointly
tests all of the data for context dependence (in contrast to the per-sequence tests). If this is 0 then
the aggreate test is not implemented, and if it is 1 only the aggregate test is implemented (unless it
triggers and `pass_alpha` is True).
pass_alpha : Bool, optional (default is True)
The aggregate test is implemented first, at the "local" significance defined by `aggregate_test_weighting`
and `significance` (see above). If `pass_alpha` is True, then when the aggregate test | |
from math import tanh
import random
import time
import Assembly.AVX as AVX
import Assembly.CodeBlocks as CodeBlocks
import Assembly.TypeSizes as TypeSizes
import Classes.ExpressionComponent as EC
import Classes.Optimizer
import config
from Assembly.AVX import (avx_correctSize, avx_doToReg, avx_dropToAddress,
avx_loadToReg, avx_ralloc, avx_rfree)
from Assembly.CodeBlocks import (allocate_readonly, checkTrue,
createIntrinsicHeap, createStringConstant,
deregisterizeValueType, doFloatOperation,
doIntOperation, extra_parameterlabel, fncall,
function_allocator, function_closer,
functionlabel, getLogicLabel, loadToPtr,
loadToReg, maskset, movMemVar, movRegToVar,
movVarToReg, moveParameterVector, pack_string, raw_regmov,
registerizeValueType, spop, spush, syscall,
valueOf, win_align_stack, win_unalign_stack,
zeroize, moveVector)
from Assembly.Instructions import Instruction, floatTo32h, floatTo64h
from Assembly.Registers import *
from Assembly.TypeSizes import INTMAX, dwordImmediate, isfloat
from Classes.Constexpr import buildConstantSet, determineConstexpr
from Classes.DType import DType, typematch
from Classes.Error import *
from Classes.Token import *
from Classes.Variable import *
from ExpressionEvaluator import (ExpressionEvaluator, LeftSideEvaluator,
depositFinal)
from globals import (BOOL, CHAR, DOUBLE, INT, LONG, OPERATORS, SHORT, VOID,
TsCompatible, isIntrinsic)
from Optimizers.Intraprocedural import IntraproceduralOptimizer
from Optimizers.LoopOptimizer import LoopOptimizer
from Optimizers.Peephole import Peephole
from Postfixer import Postfixer
# multiply all items in an array
def product(arr):
total = arr[0]
for i in arr[1:]:
total *= i
return total
# pre-defined builtin functions
predefs = [
"typeof",
"sizeof",
"typeid",
"__isflt",
"__syscall",
"static_assert",
"__exists"
]
###################################################
#
# The Function class is where the bulk of compilation occurs.
# Function objects are isolated and created by compiler objects.
#
#
#
#####################################################
class Function:
def __init__(self, name, parameters, returntype, compiler,
tokens, inline=False, extern=False, compileCount=0, memberfn=False,
parentstruct=None, return_auto=False, declare_token=None, winextern=False,
lambdas=[]):
self.name = name # fn name
self.parameters = parameters # list:Variable parameters
self.returntype = returntype # DType: return type
self.compiler = compiler # Parent
self.tokens = tokens # All Tokens
self.asm = "\n" # raw assembly output
# determine if function is member, and if so what is
# its parent.
self.memberfn = memberfn
self.parentstruct = parentstruct
self.stackCounter = 8 # counter to keep track of stacksize
self.stackTotal = 8 # maintain total count
self.variables = [] # all local variables
self.declare_token = declare_token
# a hash table of indexes in self.variables
# for faster access during compiletime
# fmt: "<name>": <idx>
# idx being the index in self.variables
self.variable_reference = {}
# inline functions behave like macros, and are not called
self.inline = inline
# stack containing labels to jump to if the "continue" keyword is used
self.continues = []
# stack containing labels to jump to if the "break" keyword is used
self.breaks = []
self.lambdas = []
self.lambdaCount = 0
self.current_token = self.tokens[0] if len(
self.tokens) > 0 else None # current token
self.ctidx = 0 # corrent token index
self.maxtokens = len(self.tokens) # one len call
# extern is in reference to c-standard names vs .k names
self.extern = extern or winextern
self.winextern = winextern
# Variardic functions will behave slightly differently.
# They are defined using the '...' token in their parameters,
# because they take an arbitrary number of parameters.
self.variardic = False
# user labels are custom labels embedded in the code itself. EX:
# {
# mylabel:
# ...
# goto mylabel;
#
# }
#
# in the array they take the form of dictionaries: userlables = {"username":"assembly-name"}
# each user-defined label will have a corresponding assembly label.
self.userlabels = {}
self.destructor_text = "" # automatically called destructors
# used for read-only values generated during compiletime that can be
# stored in .text
self.suffix = ""
# number of times this function has been re-compiled in optimization
self.compileCount = compileCount
# \see Assembly.Instructions.Peephole
self.peephole = Peephole() # optimizer
# remaining available register declarations (normal regs)
self.regdeclremain_norm = 2
# remaining available register declarations (sse)
self.regdeclremain_sse = 4
# ExpressionComponents to keep track of register declarations
self.regdecls = []
# disowned[...] stores all non-primitive objects created within a function, whose ownership
# is transfered outside of the function. In short, all the objects that the function disowns.
# An example of this would be returning a local data structure.
self.disowned: List[str] = []
# Static variables will not inherit their actual name in the final assembly,
# this is where their actual labels can be associated with their given
# names.
self.staticnameref = {
}
# The local state stack stores the state of the local scope in a stack,
# so that temporary stack variables can be pushed and popped at will.
# For example, any declarations defined inside an if statement will need to be
# removed by pushing the stack state before the if statement, and
# popping it after.
self.localstate_stack = []
# Parameter information:
# number of sse parameter registers used
self.ssepcount = 0
# number of normal parameter registers used
self.normpcount = 0
# number of extra memory-stored registers are used
self.extra_params = 0
# the above information is set externally by the compiler class
# monitoring:
# Functions declared with the auto keyword as their type will have to determine
# their own returntype. This flag specifies that a function is declared
# with auto.
self.return_auto = return_auto
# determine if a function is a placeholder or a properly assigned
# function
self.unassigned = True
# hasReturned keeps track of if a function has made a guarenteed return.
# A guarenteed return is one not inside any other control structure, and that
# will always happen.
self.hasReturned = False
# containsReturn keeps track of if a function returns under any conditions at all
# (other than just reaching then end).
self.containsReturn = False
# cexterncalls tracks the number of C functions called
self.cexterncalls = 0
# if the return value is a constexpr
self.returnsConstexpr = False
self.constexpr_returnvalue = 0
# Set to true when there is inline assembly used in a function so that
# return / parameter warnings and optimziations do not interfere with user
# generated assembly.
self.contains_rawasm = False
# The isReturning flag is set when the function is compiling the return statement
# in order to signifiy that certain extra optimizations can be made. For example,
# register declarations do not need to be saved or restored during the return statement
# because they will go out of scope anyway.
self.isReturning = False
# recursive_depth keeps track of how many control-structures deep the function is.
# so, for example:
#
# recursive_depth = 0;
# if(...){
# ...
# recursive_depth = 1;
# switch(...){
# case ... {
# recursive_depth = 2;
# break;
# }
#
# }
# recursive_depth = 1;
#
#
# }
# recursive_depth = 0;
#
self.recursive_depth = 0
self.max_depth = 0
# canbeInline is used to determine if the compiler can safely make
# a function inline without the user specifically defining it as such
self.canbeInline = True
# isCompiled is used to determine if the function has already been
# compiled.
self.isCompiled = False
# closing label is the label placed right before the stack exit and return in the
# assembly of a function. It will be different for an inline vs regular
# function.
self.closinglabel = self.getClosingLabel()
# used to differentiate between template functions and regular
# functions
self.isTemplate = False
self.template_types = []
# Features
# count the number of other functions called to enable, with high optimization level,
# implicit parameter register declaration.
self.fncalls = 0
self.implicit_paramregdecl = False
# track unused / removable variables that can be optimized out in
# oplvl3
self.unreferenced = []
# Size optimization:
# When size optimization is being ran, functions are not inlined. This
# flag stores weather or not this function would have been inline
# otherwise
self.wouldbe_inline = False
# Count the number of times this function is called
self.references = 0
# advance token
def advance(self) -> Token:
# increment
self.ctidx += 1
# ensure bounds
if(self.ctidx == self.maxtokens):
throw(UnexepectedEOFError(self.tokens[-1]))
# update token
self.current_token = self.tokens[self.ctidx]
# return token
return self.current_token
# get the raw asm label used to call this function
def getCallingLabel(self) -> str:
return functionlabel(self).replace(":", "").replace("\n", "")
# check current token for semicolon
def checkSemi(self) -> None:
if(self.current_token.tok != T_ENDL):
throw(ExpectedSemicolon(self.current_token))
self.advance()
def getUserlabel(self, name) -> str:
if name | |
params:
body_params = params['registration']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['APP_NORMAL', 'OAUTH']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_registration(self, registration_id, **kwargs):
"""
Delete a registration.
Delete `registrationId`. This includes all instances of this registration.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_registration(registration_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str registration_id: id for this registration (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_registration_with_http_info(registration_id, **kwargs)
else:
(data) = self.delete_registration_with_http_info(registration_id, **kwargs)
return data
def delete_registration_with_http_info(self, registration_id, **kwargs):
"""
Delete a registration.
Delete `registrationId`. This includes all instances of this registration.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_registration_with_http_info(registration_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str registration_id: id for this registration (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['registration_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_registration" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'registration_id' is set
if ('registration_id' not in params) or (params['registration_id'] is None):
raise ValueError("Missing the required parameter `registration_id` when calling `delete_registration`")
collection_formats = {}
resource_path = '/registrations/{registrationId}'.replace('{format}', 'json')
path_params = {}
if 'registration_id' in params:
path_params['registrationId'] = params['registration_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['APP_NORMAL', 'OAUTH']
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_registration_configuration_setting(self, registration_id, setting_id, **kwargs):
"""
Clear a registration configuration.
Clears the `settingId` value for this registration. The effective value will become the value at the next level which has an explicit value set. Possibilities are course, application, or default.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_registration_configuration_setting(registration_id, setting_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str registration_id: id for this registration (required)
:param str setting_id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_registration_configuration_setting_with_http_info(registration_id, setting_id, **kwargs)
else:
(data) = self.delete_registration_configuration_setting_with_http_info(registration_id, setting_id, **kwargs)
return data
def delete_registration_configuration_setting_with_http_info(self, registration_id, setting_id, **kwargs):
"""
Clear a registration configuration.
Clears the `settingId` value for this registration. The effective value will become the value at the next level which has an explicit value set. Possibilities are course, application, or default.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_registration_configuration_setting_with_http_info(registration_id, setting_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str registration_id: id for this registration (required)
:param str setting_id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['registration_id', 'setting_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_registration_configuration_setting" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'registration_id' is set
if ('registration_id' not in params) or (params['registration_id'] is None):
raise ValueError("Missing the required parameter `registration_id` when calling `delete_registration_configuration_setting`")
# verify the required parameter 'setting_id' is set
if ('setting_id' not in params) or (params['setting_id'] is None):
raise ValueError("Missing the required parameter `setting_id` when calling `delete_registration_configuration_setting`")
collection_formats = {}
resource_path = '/registrations/{registrationId}/configuration/{settingId}'.replace('{format}', 'json')
path_params = {}
if 'registration_id' in params:
path_params['registrationId'] = params['registration_id']
if 'setting_id' in params:
path_params['settingId'] = params['setting_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['APP_NORMAL', 'OAUTH']
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_registration_global_data(self, registration_id, **kwargs):
"""
Delete the global data of a registration.
Delete global data associated with `registrationId`'. Calling this method will reset all global objectives associated with this registration, if any exist.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_registration_global_data(registration_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str registration_id: id for this registration (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_registration_global_data_with_http_info(registration_id, **kwargs)
else:
(data) = self.delete_registration_global_data_with_http_info(registration_id, **kwargs)
return data
def delete_registration_global_data_with_http_info(self, registration_id, **kwargs):
"""
Delete the global data of a registration.
Delete global data associated with `registrationId`'. Calling this method will reset all global objectives associated with this registration, if any exist.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_registration_global_data_with_http_info(registration_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str registration_id: id for this registration (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['registration_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_registration_global_data" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'registration_id' is set
if ('registration_id' not in params) or (params['registration_id'] is None):
raise ValueError("Missing the required parameter `registration_id` when calling `delete_registration_global_data`")
collection_formats = {}
resource_path = '/registrations/{registrationId}/globalData'.replace('{format}', 'json')
path_params = {}
if 'registration_id' in params:
path_params['registrationId'] = params['registration_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['APP_NORMAL', 'OAUTH']
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_registration_instance_configuration_setting(self, registration_id, instance_id, setting_id, **kwargs):
"""
Clear a configuration for an instance of a registration.
Clears the `settingId` value for this registration instance.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_registration_instance_configuration_setting(registration_id, instance_id, setting_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str registration_id: id for this registration (required)
:param int instance_id: The instance of this registration (required)
:param str setting_id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_registration_instance_configuration_setting_with_http_info(registration_id, instance_id, setting_id, **kwargs)
else:
(data) = self.delete_registration_instance_configuration_setting_with_http_info(registration_id, instance_id, | |
there
if 'id' not in atoms.columns : atoms['id'] = atoms.index.values
mobile_ions = atoms.query('atom==@mobile').sort_values(by='z')
num_planes = round(cell[2,2] / mobile_ions.z.diff().dropna().max())
## it would be simple to extend the plane detection to unequal #s of atoms
st = len(atoms.query('atom==@mobile')) // num_planes
plane_zs = [mobile_ions.iloc[st*x:st*(x+1)].z.mean() for x in range(num_planes) ]
dz = (max(plane_zs) - min(plane_zs)) / (num_planes-1)
# print(st, plane_zs)
## create a column with by-atom distances from a plane
atoms['dz'] = atoms.z.apply(lambda x: min([abs(x-y) for y in plane_zs]))/dz
## assign Al4, then Al3, then Al1+Al2 (Al2 is assigned later)
atoms.loc[atoms.atom == 'Al', 'symm'] = 'Al4'
atoms.loc[(atoms.dz > 0.212) & (atoms.atom == 'Al'), 'symm'] = 'Al3'
atoms.loc[(atoms.dz > 0.4) & (atoms.atom == 'Al'), 'symm'] = 'Al1'
## assign O5, then O4+O3, then O1+O2 (O3 and O2 assigned later)
atoms.loc[atoms.atom == 'O', 'symm'] = 'O5'
atoms.loc[(atoms.atom == 'O') & (atoms.dz > 0.144), 'symm'] = 'O4'
atoms.loc[(atoms.atom == 'O') & (atoms.dz > 0.288), 'symm'] = 'O1'
## assign Oi
if 'type' in atoms.columns :
atoms.loc[(atoms.type == 4) & (atoms.atom == 'O'), 'symm'] = 'Oi'
## assign Mg if they exist
atoms.loc[atoms.atom == 'Mg', 'symm'] = 'Mg'
## for each plane: find the aBR and BR sites, find defects,
## and assign O3, O2, Al2, and sites to mobile ions
for p in plane_zs:
o5s = atoms.query(f'symm == "O5" & {p-dz/3} < z < {p+dz/3}')
o4s = atoms.query(f'symm == "O4" & {p-dz/2} < z < {p+dz/2}'); # print(len(o4s))
o1s = atoms.query(f'symm == "O1" & {p-dz/2} < z < {p+dz/2}');
ois = atoms.query(f'symm == "Oi" & {p-dz/3} < z < {p+dz/3}')
a1s = atoms.query(f'symm == "Al1" & {p-dz} < z < {p+dz}')
mis = atoms.query(f'atom == @mobile & {p-dz/3} < z < {p+dz/3}'); mis.z=0
# print(len(o5s))
mobile_sites = get_mobile_ion_sites(o5s, p, cell, thresh=100)
# print(f'found {len(mobile_sites)} mobile-ion sites at z={p:.3f}')
## get mid-oxygen sites
_, edges, midpts = get_mid_oxygen_sites_freud(mobile_sites, cell, viz=False)
# print(edges[:5], midpts[:5])
## calculate where the Oi's are by distance, call those edges 'picked'
picked = list()
for i, oi in ois.iterrows():
r2 = (midpts[:,0]-oi.x)**2 + (midpts[:,1]-oi.y)**2
# print(np.argwhere(r2 == min(r2))[0][0])
picked.append(edges[np.argwhere(r2 == min(r2))[0][0]])
picked = set(picked)
sites_next_to_oi = np.array(sorted([x for x in flatten(list(picked))]))
## create a proper networkx graph from edge list, and calculate path lengths
nxg = nx.from_edgelist(edges)
paths = path_lengths(nxg)
## measure all path lengths to the oxygens; this yields a list
## if there are no such paths - then there are no defects
if picked :
paths_to_oi = [min(paths[sites_next_to_oi,x]) for x in range(len(mobile_sites))]
else :
paths_to_oi = [100 for x in range(len(mobile_sites))]
## get BR and a-BR sites for labeling mobile ions
site_types = auto_get_BR_sites(atoms.query('symm != "Oi"'), cell, mobile_sites, frac)
# print(sum([x=='BR' for x in site_types]), 'BR')
# print(sum([x=='aBR' for x in site_types]), 'aBR')
# print([x for x in site_types if 'error' in x])
# print(sum([x=='doubleprime' for x in site_types]), 'bdp')
## try making a lattice of BR's that would cover all BR's
BR_sites = list(); aBR_sites = list()
for br in [x for x in range(len(site_types)) if site_types[x] == 'BR'] :
BR_sites = [x for x in range(len(site_types)) if site_types[x] == 'BR' and not paths[br,x] % 2]
if len(BR_sites) == sum([x=='BR' for x in site_types]) :
BR_sites = [x for x in range(len(site_types)) if not paths[br,x] % 2]
aBR_sites = [x for x in range(len(site_types)) if paths[br,x] % 2]
# print(len(BR_sites),len(aBR_sites))
break
## some safeguard or error message needs to go here if this fails
BR_locs = mobile_sites[BR_sites]
BR_locs[:,-1] = p
aBR_locs = mobile_sites[aBR_sites]
aBR_locs[:,-1] = p
## find in which site each mobile ion resides using freud AABBQuery
# create the freud query (freud 2.2.0)
query_args = dict(mode='nearest', num_neighbors=1, exclude_ii=False)
box = freud.box.Box(Lx=cell[0,0], Ly=cell[1,1], is2D=True)
mobile_sites[:,-1] = 0
que = freud.locality.AABBQuery(box, mobile_sites)
result = que.query(mis[['x','y','z']].values, query_args)
## assign distance+site for every mobile ion
for r in result:
## first item should be ion, second should be site
atomid = mis.iloc[r[0]].id
siteid = r[1]
atoms.loc[atoms.id == atomid, 'symm'] = str(int(paths_to_oi[siteid])) + ('A' if siteid in aBR_sites else 'B')
## assign O3: query above/below aBR sites
query_args2 = dict(mode='nearest', num_neighbors=2, exclude_ii=False)
box2 = freud.box.Box(Lx=cell[0,0], Ly=cell[1,1], Lz=cell[2,2])
que2 = freud.locality.AABBQuery(box2, o4s[['x','y','z']].values)
result2 = que2.query(aBR_locs, query_args2)
## actually change the symmetry
# o3s = 0
for r in result2:
atomid = o4s.iloc[r[1]].id
atoms.loc[atoms.id == atomid, 'symm'] = 'O3'
# o3s += 1
# print(o3s)
## assign Al2: above / below O5's
que3 = freud.locality.AABBQuery(box2, a1s[['x','y','z']].values)
result3 = que3.query(o5s[['x','y','z']].values, query_args2)
for r in result3:
atomid = a1s.iloc[r[1]].id
atoms.loc[atoms.id == atomid, 'symm'] = 'Al2'
## assign O2: above/below BR sites
que4 = freud.locality.AABBQuery(box2, o1s[['x','y','z']].values)
result4 = que4.query(BR_locs, query_args2)
for r in result4:
atomid = o1s.iloc[r[1]].id
atoms.loc[atoms.id == atomid, 'symm'] = 'O2'
return atoms.drop(columns='dz')
# =============================================================================
# %% write a CIF file from a snapshot
# =============================================================================
def write_cif(filename, atoms, cell=np.eye(3), frac=False):
## add symmetry elements
atoms = add_symmetry_beta(atoms, cell, frac=frac)
## make atoms fractional and 0-1 rather than -0.5 to 0.5
for i, dim in enumerate(['x', 'y', 'z']) :
cell[i,i] = atoms[dim].max() - atoms[dim].min()
atoms[dim] = (atoms[dim]-atoms[dim].min()) / (atoms[dim].max() - atoms[dim].min())
## Open an output file. This will overwrite if a file with this name exists.
fout = open(filename, 'w')
## Write some front matter
fout.write('#=====\n#generated with python\n#=====\ndata_snapshot\n\nloop_\n')
## write compound formula
formula = ' '.join([x + str(len(atoms.query(f'atom == "{x}"'))) for x in atoms.atom.unique()])
fout.write(f'{"_chemical_name_common":<40}\'{formula}\'\n')
## write the dimensions of the simulation box
for n, dim in zip(['a','b','c'], [0,1,2]):
fout.write(f'_cell_length_{n:<27}{cell[dim,dim]:.4f}\n')
## write angles, works only for orthogonal now
for ang in ['alpha', 'beta', 'gamma']:
fout.write(f'_cell_angle_{ang:<28}90\n')
## write stuff like symmetry elements - not implemented
## write columns for atoms
fout.write('\nloop_\n')
props = ['_atom_site_label', '_atom_site_occupancy', '_atom_site_fract_x',
'_atom_site_fract_y', '_atom_site_fract_z', '_atom_site_type_symbol']
for prop in props:
fout.write(prop+'\n')
## write atoms
for i, a in atoms.iterrows():
fout.write(f'{a.atom:<3}1.0 {a.x:.6f} {a.y:.6f} {a.z:.6f} {a.symm}\n')
if not i % 500 : print(f'wrote atom #{i}')
## write charges
fout.write('\nloop_\n')
for prop in ['_atom_type_symbol', '_atom_type_oxidation_number'] :
fout.write(prop+'\n')
for s in sorted(atoms.symm.unique()) :
## default is the mobile ion
chg = 1.0
if 'Al' in s : chg = 3.
elif 'Mg' in s : chg = 2.
elif 'O' in s : chg = -2.
fout.write(f'{s:<4} {chg:.3f}\n')
# =============================================================================
# %% assign symmetry types to atoms for beta-doubleprime:
## for beta, make a new column with atom types by symmetry (Al, O) & site(mobile)
## Al1 : second layer of Al from planes (Oh symmetry)
## Al2 : almost exactly between planes (Td)
## Al3 : at the in-plane oxygens (Td)
## Al4 : exactly between planes (Oh)
## O1 : just above & below conduction planes, but not directly above/below sites
## O2 : same plane as O4, just not directly above / below Al2 and not next to Al4
## O3 : just above & below conduction planes, directly above/below sites and further from O5
## O4 : directly above / below Al2
## O5 : in-plane oxygens
# =============================================================================
def add_symmetry_bdp(atoms, cell=np.eye(3), mobile='Na', frac=False):
## create the column
atoms['symm'] = mobile
## add id column if it is not there
if 'id' not in atoms.columns : atoms['id'] = atoms.index.values
mobile_ions = atoms.query('atom==@mobile').sort_values(by='z')
num_planes = round(cell[2,2] / mobile_ions.z.diff().dropna().max())
## it would be simple to extend the plane detection to unequal #s of atoms
st = int(len(atoms.query('atom==@mobile')) / num_planes)
plane_zs = [mobile_ions.iloc[st*x:st*(x+1)].z.mean() for x in range(num_planes) ]
dz = (max(plane_zs) - min(plane_zs)) / (num_planes-1)
# print(f'{st} mobile ions per plane at zs: {plane_zs}')
## create a column with by-atom distances from a plane
atoms['dz'] = atoms.z.apply(lambda x: min([abs(x-y) for y in plane_zs]))/dz
## assign Als by increasing distance : Al3, then Al1, then Al2, then Al4
atoms.loc[atoms.atom == 'Al', 'symm'] = 'Al3'
atoms.loc[(atoms.dz > 0.205) & (atoms.atom == 'Al'), 'symm'] = 'Al1'
atoms.loc[(atoms.dz > 0.38) & (atoms.atom | |
if len(data.get("spans", [])) > 0
else [],
confidence=data.get("confidence", None),
)
class DocumentField(object):
"""An object representing the content and location of a document field value.
:ivar str value_type: The type of `value` found on DocumentField. Possible types include:
"string", "date", "time", "phoneNumber", "float", "integer", "selectionMark", "countryRegion",
"signature", "currency", "list", "dictionary".
:ivar value:
The value for the recognized field. Its semantic data type is described by `value_type`.
If the value is extracted from the document, but cannot be normalized to its type,
then access the `content` property for a textual representation of the value.
:vartype value: str, int, float, :class:`~datetime.date`, :class:`~datetime.time`,
:class:`~azure.ai.formrecognizer.CurrencyValue`,
dict[str, :class:`~azure.ai.formrecognizer.DocumentField`],
or list[:class:`~azure.ai.formrecognizer.DocumentField`]
:ivar content: The field's content.
:vartype content: str
:ivar bounding_regions: Bounding regions covering the field.
:vartype bounding_regions: list[~azure.ai.formrecognizer.BoundingRegion]
:ivar spans: Location of the field in the reading order concatenated content.
:vartype spans: list[~azure.ai.formrecognizer.DocumentSpan]
:ivar confidence: The confidence of correctly extracting the field.
:vartype confidence: float
"""
def __init__(self, **kwargs):
self.value_type = kwargs.get("value_type", None)
self.value = kwargs.get("value", None)
self.content = kwargs.get("content", None)
self.bounding_regions = kwargs.get("bounding_regions", None)
self.spans = kwargs.get("spans", None)
self.confidence = kwargs.get("confidence", None)
@classmethod
def _from_generated(cls, field):
if field is None:
return None
return cls(
value=get_field_value_v3(field),
value_type=adjust_value_type(field.type) if field.type else None,
content=field.content if field.content else None,
bounding_regions=[
BoundingRegion(
page_number=region.page_number,
bounding_box=get_bounding_box(region),
)
for region in field.bounding_regions
]
if field.bounding_regions
else [],
spans=[
DocumentSpan(
offset=span.offset,
length=span.length,
)
for span in field.spans
]
if field.spans
else [],
confidence=field.confidence if field.confidence else None,
)
def __repr__(self):
return (
"DocumentField(value_type={}, value={}, content={}, bounding_regions={}, spans={}, "
"confidence={})".format(
self.value_type,
repr(self.value),
self.content,
repr(self.bounding_regions),
repr(self.spans),
self.confidence,
)
)
def to_dict(self):
# type: () -> dict
"""Returns a dict representation of DocumentField.
:return: dict
:rtype: dict
"""
return {
"value_type": self.value_type,
"value": self.value,
"content": self.content,
"bounding_regions": [f.to_dict() for f in self.bounding_regions]
if self.bounding_regions
else [],
"spans": [f.to_dict() for f in self.spans]
if self.spans
else [],
"confidence": self.confidence,
}
@classmethod
def from_dict(cls, data):
# type: (dict) -> DocumentField
"""Converts a dict in the shape of a DocumentField to the model itself.
:param dict data: A dictionary in the shape of DocumentField.
:return: DocumentField
:rtype: DocumentField
"""
return cls(
value_type=data.get("value_type", None),
value=data.get("value", None),
content=data.get("content", None),
bounding_regions=[BoundingRegion.from_dict(v) for v in data.get("bounding_regions")] # type: ignore
if len(data.get("bounding_regions", [])) > 0
else [],
spans=[DocumentSpan.from_dict(v) for v in data.get("spans")] # type: ignore
if len(data.get("spans", [])) > 0
else [],
confidence=data.get("confidence", None),
)
class DocumentKeyValueElement(object):
"""An object representing the field key or value in a key-value pair.
:ivar content: Concatenated content of the key-value element in reading order.
:vartype content: str
:ivar bounding_regions: Bounding regions covering the key-value element.
:vartype bounding_regions: list[~azure.ai.formrecognizer.BoundingRegion]
:ivar spans: Location of the key-value element in the reading order of the concatenated
content.
:vartype spans: list[~azure.ai.formrecognizer.DocumentSpan]
"""
def __init__(self, **kwargs):
self.content = kwargs.get("content", None)
self.bounding_regions = kwargs.get("bounding_regions", None)
self.spans = kwargs.get("spans", None)
@classmethod
def _from_generated(cls, element):
return cls(
content=element.content,
bounding_regions=[
BoundingRegion._from_generated(region)
for region in element.bounding_regions
]
if element.bounding_regions
else [],
spans=[DocumentSpan._from_generated(span) for span in element.spans]
if element.spans
else [],
)
def __repr__(self):
return (
"DocumentKeyValueElement(content={}, bounding_regions={}, spans={})".format(
self.content,
repr(self.bounding_regions),
repr(self.spans),
)
)
def to_dict(self):
# type: () -> dict
"""Returns a dict representation of DocumentKeyValueElement.
:return: dict
:rtype: dict
"""
return {
"content": self.content,
"bounding_regions": [f.to_dict() for f in self.bounding_regions]
if self.bounding_regions
else [],
"spans": [f.to_dict() for f in self.spans]
if self.spans
else [],
}
@classmethod
def from_dict(cls, data):
# type: (dict) -> DocumentKeyValueElement
"""Converts a dict in the shape of a DocumentKeyValueElement to the model itself.
:param dict data: A dictionary in the shape of DocumentKeyValueElement.
:return: DocumentKeyValueElement
:rtype: DocumentKeyValueElement
"""
return cls(
content=data.get("content", None),
bounding_regions=[BoundingRegion.from_dict(v) for v in data.get("bounding_regions")] # type: ignore
if len(data.get("bounding_regions", [])) > 0
else [],
spans=[DocumentSpan.from_dict(v) for v in data.get("spans")] # type: ignore
if len(data.get("spans", [])) > 0
else [],
)
class DocumentKeyValuePair(object):
"""An object representing a document field with distinct field label (key) and field value (may be empty).
:ivar key: Field label of the key-value pair.
:vartype key: ~azure.ai.formrecognizer.DocumentKeyValueElement
:ivar value: Field value of the key-value pair.
:vartype value: ~azure.ai.formrecognizer.DocumentKeyValueElement
:ivar confidence: Confidence of correctly extracting the key-value pair.
:vartype confidence: float
"""
def __init__(self, **kwargs):
self.key = kwargs.get("key", None)
self.value = kwargs.get("value", None)
self.confidence = kwargs.get("confidence", None)
@classmethod
def _from_generated(cls, key_value_pair):
return cls(
key=DocumentKeyValueElement._from_generated(key_value_pair.key)
if key_value_pair.key
else None,
value=DocumentKeyValueElement._from_generated(key_value_pair.value)
if key_value_pair.value
else None,
confidence=key_value_pair.confidence,
)
def __repr__(self):
return "DocumentKeyValuePair(key={}, value={}, confidence={})".format(
repr(self.key),
repr(self.value),
self.confidence,
)
def to_dict(self):
# type: () -> dict
"""Returns a dict representation of DocumentKeyValuePair.
:return: dict
:rtype: dict
"""
return {
"key": self.key.to_dict() if self.key else None,
"value": self.value.to_dict() if self.value else None,
"confidence": self.confidence,
}
@classmethod
def from_dict(cls, data):
# type: (dict) -> DocumentKeyValuePair
"""Converts a dict in the shape of a DocumentKeyValuePair to the model itself.
:param dict data: A dictionary in the shape of DocumentKeyValuePair.
:return: DocumentKeyValuePair
:rtype: DocumentKeyValuePair
"""
return cls(
key=DocumentKeyValueElement.from_dict(data.get("key")) # type: ignore
if data.get("key")
else None,
value=DocumentKeyValueElement.from_dict(data.get("value")) # type: ignore
if data.get("value")
else None,
confidence=data.get("confidence", None),
)
class DocumentLine(object):
"""A content line object representing the content found on a single line of the document.
:ivar content: Concatenated content of the contained elements in reading order.
:vartype content: str
:ivar bounding_box: Bounding box of the line.
:vartype bounding_box: list[Point]
:ivar spans: Location of the line in the reading order concatenated content.
:vartype spans: list[~azure.ai.formrecognizer.DocumentSpan]
"""
def __init__(self, **kwargs):
self._parent = kwargs.get("_parent", None)
self.content = kwargs.get("content", None)
self.bounding_box = kwargs.get("bounding_box", None)
self.spans = kwargs.get("spans", None)
@classmethod
def _from_generated(cls, line, document_page):
return cls(
_parent=document_page,
content=line.content,
bounding_box=get_bounding_box(line),
spans=prepare_document_spans(line.spans),
)
def __repr__(self):
return "DocumentLine(content={}, bounding_box={}, spans={})".format(
self.content,
self.bounding_box,
repr(self.spans),
)
def to_dict(self):
# type: () -> dict
"""Returns a dict representation of DocumentLine.
:return: dict
:rtype: dict
"""
return {
"content": self.content,
"bounding_box": [f.to_dict() for f in self.bounding_box]
if self.bounding_box
else [],
"spans": [f.to_dict() for f in self.spans]
if self.spans
else [],
}
@classmethod
def from_dict(cls, data):
# type: (dict) -> DocumentLine
"""Converts a dict in the shape of a DocumentLine to the model itself.
:param dict data: A dictionary in the shape of DocumentLine.
:return: DocumentLine
:rtype: DocumentLine
"""
return cls(
content=data.get("content", None),
bounding_box=[Point.from_dict(v) for v in data.get("bounding_box")] # type: ignore
if len(data.get("bounding_box", [])) > 0
else [],
spans=[DocumentSpan.from_dict(v) for v in data.get("spans")] # type: ignore
if len(data.get("spans", [])) > 0
else [],
)
def get_words(self, **kwargs): # pylint: disable=unused-argument
# type: (Any) -> Iterable[DocumentWord]
"""Get the words found in the spans of this DocumentLine.
:return: iterable[DocumentWord]
:rtype: iterable[DocumentWord]
"""
if not self._parent:
raise ValueError(
"Cannot use get_words() on a model that has been converted from a dictionary. "
"Missing reference to parent element."
)
result = []
for word in self._parent.words:
if _in_span(word, self.spans):
result.append(word)
return result
class DocumentPage(object):
"""Content and layout elements extracted from a page of the input.
:ivar page_number: 1-based page number in the input document.
:vartype page_number: int
:ivar angle: The general orientation of the content in clockwise direction, measured
in degrees between (-180, 180].
:vartype angle: float
:ivar width: The width of the image/PDF in pixels/inches, respectively.
:vartype width: float
:ivar height: The height of the image/PDF in pixels/inches, respectively.
:vartype height: float
:ivar unit: The unit used by the width, height, and boundingBox properties. For
images, the unit is "pixel". For PDF, the unit is "inch". Possible values include: "pixel",
"inch".
:vartype unit: str
:ivar spans: Location of the page in the reading order concatenated content.
:vartype spans: list[~azure.ai.formrecognizer.DocumentSpan]
:ivar words: Extracted words from the page.
:vartype words: list[~azure.ai.formrecognizer.DocumentWord]
:ivar selection_marks: Extracted selection marks from the page.
:vartype selection_marks:
list[~azure.ai.formrecognizer.DocumentSelectionMark]
:ivar lines: Extracted lines from the page, potentially containing both textual and
visual elements.
:vartype lines: list[~azure.ai.formrecognizer.DocumentLine]
"""
def __init__(self, **kwargs):
self.page_number = kwargs.get("page_number", None)
self.angle = kwargs.get("angle", None)
self.width = kwargs.get("width", None)
self.height = kwargs.get("height", None)
self.unit = kwargs.get("unit", None)
self.spans = kwargs.get("spans", None)
self.words = kwargs.get("words", None)
self.selection_marks = kwargs.get("selection_marks", None)
self.lines = kwargs.get("lines", None)
@classmethod
def _from_generated(cls, page):
return cls(
page_number=page.page_number,
angle=adjust_text_angle(page.angle),
width=page.width,
height=page.height,
unit=page.unit,
lines=[DocumentLine._from_generated(line, page) for | |
#!/usr/bin/python
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
# Common python helper functions used for OpenStack charms.
from collections import OrderedDict
from functools import wraps
import subprocess
import json
import os
import sys
import six
import yaml
from charmhelpers.contrib.network import ip
from charmhelpers.core import (
unitdata,
)
from charmhelpers.core.hookenv import (
config,
log as juju_log,
charm_dir,
INFO,
relation_ids,
relation_set
)
from charmhelpers.contrib.storage.linux.lvm import (
deactivate_lvm_volume_group,
is_lvm_physical_volume,
remove_lvm_physical_volume,
)
from charmhelpers.contrib.network.ip import (
get_ipv6_addr
)
from charmhelpers.core.host import lsb_release, mounts, umount
from charmhelpers.fetch import apt_install, apt_cache, install_remote
from charmhelpers.contrib.python.packages import pip_install
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
'restricted main multiverse universe')
UBUNTU_OPENSTACK_RELEASE = OrderedDict([
('oneiric', 'diablo'),
('precise', 'essex'),
('quantal', 'folsom'),
('raring', 'grizzly'),
('saucy', 'havana'),
('trusty', 'icehouse'),
('utopic', 'juno'),
('vivid', 'kilo'),
])
OPENSTACK_CODENAMES = OrderedDict([
('2011.2', 'diablo'),
('2012.1', 'essex'),
('2012.2', 'folsom'),
('2013.1', 'grizzly'),
('2013.2', 'havana'),
('2014.1', 'icehouse'),
('2014.2', 'juno'),
('2015.1', 'kilo'),
])
# The ugly duckling
SWIFT_CODENAMES = OrderedDict([
('1.4.3', 'diablo'),
('1.4.8', 'essex'),
('1.7.4', 'folsom'),
('1.8.0', 'grizzly'),
('1.7.7', 'grizzly'),
('1.7.6', 'grizzly'),
('1.10.0', 'havana'),
('1.9.1', 'havana'),
('1.9.0', 'havana'),
('1.13.1', 'icehouse'),
('1.13.0', 'icehouse'),
('1.12.0', 'icehouse'),
('1.11.0', 'icehouse'),
('2.0.0', 'juno'),
('2.1.0', 'juno'),
('2.2.0', 'juno'),
('2.2.1', 'kilo'),
('2.2.2', 'kilo'),
])
DEFAULT_LOOPBACK_SIZE = '5G'
def error_out(msg):
juju_log("FATAL ERROR: %s" % msg, level='ERROR')
sys.exit(1)
def get_os_codename_install_source(src):
'''Derive OpenStack release codename from a given installation source.'''
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
rel = ''
if src is None:
return rel
if src in ['distro', 'distro-proposed']:
try:
rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
except KeyError:
e = 'Could not derive openstack release for '\
'this Ubuntu release: %s' % ubuntu_rel
error_out(e)
return rel
if src.startswith('cloud:'):
ca_rel = src.split(':')[1]
ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
return ca_rel
# Best guess match based on deb string provided
if src.startswith('deb') or src.startswith('ppa'):
for k, v in six.iteritems(OPENSTACK_CODENAMES):
if v in src:
return v
def get_os_version_install_source(src):
codename = get_os_codename_install_source(src)
return get_os_version_codename(codename)
def get_os_codename_version(vers):
'''Determine OpenStack codename from version number.'''
try:
return OPENSTACK_CODENAMES[vers]
except KeyError:
e = 'Could not determine OpenStack codename for version %s' % vers
error_out(e)
def get_os_version_codename(codename):
'''Determine OpenStack version number from codename.'''
for k, v in six.iteritems(OPENSTACK_CODENAMES):
if v == codename:
return k
e = 'Could not derive OpenStack version for '\
'codename: %s' % codename
error_out(e)
def get_os_codename_package(package, fatal=True):
'''Derive OpenStack release codename from an installed package.'''
import apt_pkg as apt
cache = apt_cache()
try:
pkg = cache[package]
except:
if not fatal:
return None
# the package is unknown to the current apt cache.
e = 'Could not determine version of package with no installation '\
'candidate: %s' % package
error_out(e)
if not pkg.current_ver:
if not fatal:
return None
# package is known, but no version is currently installed.
e = 'Could not determine version of uninstalled package: %s' % package
error_out(e)
vers = apt.upstream_version(pkg.current_ver.ver_str)
try:
if 'swift' in pkg.name:
swift_vers = vers[:5]
if swift_vers not in SWIFT_CODENAMES:
# Deal with 1.10.0 upward
swift_vers = vers[:6]
return SWIFT_CODENAMES[swift_vers]
else:
vers = vers[:6]
return OPENSTACK_CODENAMES[vers]
except KeyError:
e = 'Could not determine OpenStack codename for version %s' % vers
error_out(e)
def get_os_version_package(pkg, fatal=True):
'''Derive OpenStack version number from an installed package.'''
codename = get_os_codename_package(pkg, fatal=fatal)
if not codename:
return None
if 'swift' in pkg:
vers_map = SWIFT_CODENAMES
else:
vers_map = OPENSTACK_CODENAMES
for version, cname in six.iteritems(vers_map):
if cname == codename:
return version
# e = "Could not determine OpenStack version for package: %s" % pkg
# error_out(e)
os_rel = None
def os_release(package, base='essex'):
'''
Returns OpenStack release codename from a cached global.
If the codename can not be determined from either an installed package or
the installation source, the earliest release supported by the charm should
be returned.
'''
global os_rel
if os_rel:
return os_rel
os_rel = (get_os_codename_package(package, fatal=False) or
get_os_codename_install_source(config('openstack-origin')) or
base)
return os_rel
def import_key(keyid):
cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 " \
"--recv-keys %s" % keyid
try:
subprocess.check_call(cmd.split(' '))
except subprocess.CalledProcessError:
error_out("Error importing repo key %s" % keyid)
def configure_installation_source(rel):
'''Configure apt installation source.'''
if rel == 'distro':
return
elif rel == 'distro-proposed':
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
f.write(DISTRO_PROPOSED % ubuntu_rel)
elif rel[:4] == "ppa:":
src = rel
subprocess.check_call(["add-apt-repository", "-y", src])
elif rel[:3] == "deb":
l = len(rel.split('|'))
if l == 2:
src, key = rel.split('|')
juju_log("Importing PPA key from keyserver for %s" % src)
import_key(key)
elif l == 1:
src = rel
with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
f.write(src)
elif rel[:6] == 'cloud:':
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
rel = rel.split(':')[1]
u_rel = rel.split('-')[0]
ca_rel = rel.split('-')[1]
if u_rel != ubuntu_rel:
e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\
'version (%s)' % (ca_rel, ubuntu_rel)
error_out(e)
if 'staging' in ca_rel:
# staging is just a regular PPA.
os_rel = ca_rel.split('/')[0]
ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel
cmd = 'add-apt-repository -y %s' % ppa
subprocess.check_call(cmd.split(' '))
return
# map charm config options to actual archive pockets.
pockets = {
'folsom': 'precise-updates/folsom',
'folsom/updates': 'precise-updates/folsom',
'folsom/proposed': 'precise-proposed/folsom',
'grizzly': 'precise-updates/grizzly',
'grizzly/updates': 'precise-updates/grizzly',
'grizzly/proposed': 'precise-proposed/grizzly',
'havana': 'precise-updates/havana',
'havana/updates': 'precise-updates/havana',
'havana/proposed': 'precise-proposed/havana',
'icehouse': 'precise-updates/icehouse',
'icehouse/updates': 'precise-updates/icehouse',
'icehouse/proposed': 'precise-proposed/icehouse',
'juno': 'trusty-updates/juno',
'juno/updates': 'trusty-updates/juno',
'juno/proposed': 'trusty-proposed/juno',
'kilo': 'trusty-updates/kilo',
'kilo/updates': 'trusty-updates/kilo',
'kilo/proposed': 'trusty-proposed/kilo',
}
try:
pocket = pockets[ca_rel]
except KeyError:
e = 'Invalid Cloud Archive release specified: %s' % rel
error_out(e)
src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket)
apt_install('ubuntu-cloud-keyring', fatal=True)
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f:
f.write(src)
else:
error_out("Invalid openstack-release specified: %s" % rel)
def config_value_changed(option):
"""
Determine if config value changed since last call to this function.
"""
hook_data = unitdata.HookData()
with hook_data():
db = unitdata.kv()
current = config(option)
saved = db.get(option)
db.set(option, current)
if saved is None:
return False
return current != saved
def save_script_rc(script_path="scripts/scriptrc", **env_vars):
"""
Write an rc file in the charm-delivered directory containing
exported environment variables provided by env_vars. Any charm scripts run
outside the juju hook environment can source this scriptrc to obtain
updated config information necessary to perform health checks or
service changes.
"""
juju_rc_path = "%s/%s" % (charm_dir(), script_path)
if not os.path.exists(os.path.dirname(juju_rc_path)):
os.mkdir(os.path.dirname(juju_rc_path))
with open(juju_rc_path, 'wb') as rc_script:
rc_script.write(
"#!/bin/bash\n")
[rc_script.write('export %s=%s\n' % (u, p))
for u, p in six.iteritems(env_vars) if u != "script_path"]
def openstack_upgrade_available(package):
"""
Determines if an OpenStack upgrade is available from installation
source, based on version of installed package.
:param package: str: Name of installed package.
:returns: bool: : Returns True if configured installation source offers
a newer version of package.
"""
import apt_pkg as apt
src = config('openstack-origin')
cur_vers = get_os_version_package(package)
available_vers = get_os_version_install_source(src)
apt.init()
return apt.version_compare(available_vers, cur_vers) == 1
def ensure_block_device(block_device):
'''
Confirm block_device, create as loopback if necessary.
:param block_device: str: Full path of block device to ensure.
:returns: str: Full path of ensured block device.
'''
_none = ['None', 'none', None]
if (block_device in _none):
error_out('prepare_storage(): Missing required input: block_device=%s.'
% block_device)
if block_device.startswith('/dev/'):
bdev = block_device
elif block_device.startswith('/'):
_bd = block_device.split('|')
if len(_bd) == 2:
bdev, size = _bd
else:
bdev = block_device
size = DEFAULT_LOOPBACK_SIZE
bdev = ensure_loopback_device(bdev, size)
else:
bdev = '/dev/%s' % block_device
if not is_block_device(bdev):
error_out('Failed to locate valid block device at %s' % bdev)
return bdev
def clean_storage(block_device):
'''
Ensures a block device is clean. That is:
- unmounted
- any lvm volume groups are deactivated
- any lvm physical device signatures removed
- partition table wiped
:param block_device: str: Full path to block device to clean.
'''
for mp, d in mounts():
if d == block_device:
juju_log('clean_storage(): %s is mounted @ %s, unmounting.' %
(d, mp), level=INFO)
umount(mp, persist=True)
if is_lvm_physical_volume(block_device):
deactivate_lvm_volume_group(block_device)
remove_lvm_physical_volume(block_device)
else:
zap_disk(block_device)
is_ip = ip.is_ip
ns_query = ip.ns_query
get_host_ip = ip.get_host_ip
get_hostname = ip.get_hostname
def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'):
mm_map = {}
if os.path.isfile(mm_file):
with open(mm_file, 'r') as f:
mm_map = json.load(f)
return mm_map
def | |
<reponame>arokem/AFQ-Insight<filename>afqinsight/cnn.py
"""Build, fit, and predict with 1-D convolutional neural networks."""
import functools
import numpy as np
import os.path as op
import tempfile
from dipy.utils.optpkg import optional_package
from sklearn.impute import SimpleImputer
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
from sklearn.utils.validation import check_X_y, check_is_fitted
keras_msg = (
"To use afqinsight's convolutional neural nets for tractometry data, you will need "
"to have tensorflow and kerastuner installed. You can do this by installing "
"afqinsight with `pip install afqinsight[tf]`, or by separately installing these packages "
"with `pip install tensorflow kerastuner`."
)
kt, _, _ = optional_package("keras_tuner", keras_msg)
tf, has_tf, _ = optional_package("tensorflow", keras_msg)
if has_tf:
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv1D, Flatten, MaxPool1D, Dropout
from tensorflow.keras.callbacks import ModelCheckpoint
def build_model(hp, conv_layers, input_shape):
"""Build a keras model.
Uses keras tuner to build model - can control # layers, # filters in each layer, kernel size,
regularization etc
Parameters
----------
hp : tensorflow.keras.HyperParameters()
Hyperparameters class from which to sample hyperparameters
conv_layers : int
number of layers (one layer is Conv and MaxPool) in the sequential model.
input_shape : int
input shape of X so the model gets built continuously as you are adding layers
Returns
-------
model : tensorflow.keras.Model
compiled model that uses hyperparameters defined inline to hypertune the model
"""
model = Sequential()
model.add(
Conv1D(
filters=hp.Int("init_conv_filters", min_value=32, max_value=512, step=32),
kernel_size=hp.Int("init_conv_kernel", min_value=1, max_value=4, step=1),
activation="relu",
input_shape=input_shape,
)
)
for i in range(conv_layers - 1):
model.add(
Conv1D(
filters=hp.Int(
"conv_filters" + str(i), min_value=32, max_value=512, step=32
),
kernel_size=hp.Int(
"conv_kernel" + str(i), min_value=1, max_value=4, step=1
),
activation="relu",
)
)
model.add(MaxPool1D(pool_size=2, padding="same"))
model.add(Dropout(0.25))
model.add(Flatten())
dense_filters_2 = hp.Int("dense_filters_2", min_value=32, max_value=512, step=32)
model.add(Dense(dense_filters_2, activation="relu"))
model.add(Dropout(0.25))
model.add(Dense(64, activation="relu"))
model.add(Dense(1, activation="linear"))
model.compile(
loss="mean_squared_error", optimizer="adam", metrics=["mean_squared_error"]
)
return model
class ModelBuilder:
"""Build a complex model architecture with the specified number of layers.
Parameters
----------
tuner_type : str or class.
Tuner to use. One of {"hyperband", "bayesian", "random"}.
input_shape : tuple
Expected shape of the input data.
layers : int
Number of layers in the model.
max_epochs : int
Number of epochs to train the model.
X_test : numpy.ndarray
Test data.
y_test : numpy.ndarray
Test labels or test values.
batch_size : int
Batch size to use when training.
directory : str
Directory to save the model to.
project_name : str, optional
A string, the name to use as prefix for files saved by the tuner object. Defaults to None
tuner_kwargs : dict, optional
Keyword arguments to pass to the tuner class on initialization.
Defaults to tuner defaults.
"""
def __init__(
self,
tuner_type,
input_shape,
layers,
max_epochs,
X_test,
y_test,
batch_size,
directory=None,
project_name=None,
**tuner_kwargs,
):
self.tuner_type = tuner_type
self.layers = layers
self.input_shape = input_shape
self.max_epochs = max_epochs
self.batch_size = batch_size
self.X_test = X_test
self.y_test = y_test
self.directory = directory
self.project_name = project_name
self.tuner_kwargs = tuner_kwargs
def _get_tuner(self):
"""Call build_model and instantiate a Keras Tuner for the returned model depending on user choice of tuner.
Returns
-------
tuner : kerastuner.tuners
BayesianOptimization, Hyperband, or RandomSearch tuner
"""
# setting parameters beforehand
hypermodel = functools.partial(
build_model, conv_layers=self.layers, input_shape=self.input_shape
)
if isinstance(self.tuner_type, str):
# instantiating tuner based on user's choice
if self.tuner_type == "hyperband":
tuner = kt.Hyperband(
hypermodel=hypermodel,
objective="mean_squared_error",
max_epochs=10,
overwrite=True,
project_name=self.project_name,
directory=self.directory,
**self.tuner_kwargs,
)
elif self.tuner_type == "bayesian":
tuner = kt.BayesianOptimization(
hypermodel=hypermodel,
objective="mean_squared_error",
max_trials=10,
overwrite=True,
project_name=self.project_name,
directory=self.directory,
**self.tuner_kwargs,
)
elif self.tuner_type == "random":
tuner = kt.RandomSearch(
hypermodel=hypermodel,
objective="mean_squared_error",
max_trials=10,
overwrite=True,
project_name=self.project_name,
directory=self.directory,
**self.tuner_kwargs,
)
else:
raise ValueError(
f"tuner parameter expects 'hyperband', 'bayesian', or 'random', but you provided {self.tuner_type}"
)
return tuner
# We do not cover the following line, because CNN also handles this
# error:
else: # pragma: no cover
raise TypeError(
f"`tuner` parameter should be a string, but you provided {self.tuner_type}"
)
def _get_best_weights(self, model, X, y):
"""Fit a CNN and save the best weights.
Use keras ModelCheckpoint to fit CNN and save the weights from the epoch
that produced the lowest validation loss to a temporary file. Uses
temporary file to load the best weights into the CNN model and returns
this best model.
Parameters
----------
model : tensorflow.keras.Sequential()
Hyperparameters class from which to sample hyperparameters
X : array-like of shape (n_samples, n_features)
The feature samples
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
Returns
-------
model : tensorflow.keras.Model
fitted keras model with best weights loaded
"""
weights_path = op.join(tempfile.mkdtemp(), "weights.hdf5")
# making model checkpoint to save best model (# epochs) to file
model_checkpoint_callback = ModelCheckpoint(
filepath=weights_path,
monitor="val_loss",
mode="auto",
save_best_only=True,
save_weights_only=True,
verbose=True,
)
# Fitting model using model checkpoint callback to find best model which is saved to 'weights'
model.fit(
X,
y,
epochs=self.max_epochs,
batch_size=self.batch_size,
callbacks=[model_checkpoint_callback],
validation_data=(self.X_test, self.y_test),
)
# loading in weights
model.load_weights(weights_path)
# return the model
return model
def build_basic_model(self, X, y):
"""Build a sequential model without hyperparameter tuning.
Builds a static baseline sequential model with no hyperparameter tuning.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The feature samples
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
Returns
-------
model : tensorflow.keras.Model
compiled model using basic Weston Havens architecture
"""
model = Sequential()
model.add(Dense(128, activation="relu", input_shape=X.shape[1:]))
model.add(Conv1D(24, kernel_size=2, activation="relu"))
model.add(MaxPool1D(pool_size=2, padding="same"))
model.add(Conv1D(32, kernel_size=2, activation="relu"))
model.add(MaxPool1D(pool_size=2, padding="same"))
model.add(Conv1D(64, kernel_size=3, activation="relu"))
model.add(MaxPool1D(pool_size=2, padding="same"))
model.add(Conv1D(128, kernel_size=4, activation="relu"))
model.add(MaxPool1D(pool_size=2, padding="same"))
model.add(Conv1D(256, kernel_size=4, activation="relu"))
model.add(MaxPool1D(pool_size=2, padding="same"))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation="relu"))
model.add(Dropout(0.25))
model.add(Dense(64, activation="relu"))
model.add(Dense(1, activation="linear"))
model.compile(
loss="mean_squared_error", optimizer="adam", metrics=["mean_squared_error"]
)
best_model = self._get_best_weights(model, X, y)
return best_model
def build_tuned_model(self, X, y):
"""Build a tuned model using Keras tuner.
Initializes a Keras tuner on user's model, searches for best hyperparameters, and saves them.
Then builds "best" model using saved best hyperparameters found during the search and returns model
with best weights loaded from _get_best_weights.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The feature samples
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
Returns
-------
model : tensorflow.keras.Model
compiled model that uses hyperparameters defined inline to hypertune the model
"""
# initialize tuner
tuner = self._get_tuner()
# Find the optimal hyperparameters
tuner.search(X, y, epochs=50, validation_split=0.2)
# Save the optimal hyperparameters
best_hps = tuner.get_best_hyperparameters(num_trials=1)[0]
# make CNN model using best hyperparameters
model = tuner.hypermodel.build(best_hps)
best_model = self._get_best_weights(model, X, y)
return best_model
class CNN:
"""A Convolutional Neural Network model with a fit/predict interface.
Parameters
----------
n_nodes : int
Number of nodes in each bundle profile.
n_channels : int
Number of metrics in each bundle profile.
max_epochs : int
Maximum number of epochs to train model.
batch_size : int
Number of samples per batch.
tuner_type : str
Type of hyperparameter tuner to use. One of 'hyperband', 'bayesian', or
'random'.
layers : int
Number of convolutional layers to use.
test_size : float
Fraction of data to use as test set.
impute_strategy : str, optional
Imputation strategy to use. One of 'mean', 'median', or 'knn'.
Default: "median".
random_state : int or RandomState instance, optional
Default: None.
directory : str, optional
Directory to save model and hyperparameters. Default: "."
project_name : str, optional
A string, the name to use as prefix for files saved by the tuner
object. Defaults to None
tuner_kwargs : dict, optional
Keyword arguments to pass to tuner. Default: tuner defaults.
"""
def __init__(
self,
n_nodes,
n_channels,
max_epochs=50,
batch_size=32,
tuner_type=None,
layers=1,
test_size=0.2,
impute_strategy="median",
random_state=None,
directory=None,
project_name=None,
**tuner_kwargs,
):
# checking n_nodes is passed as int
if not isinstance(n_nodes, int):
raise TypeError("Parameter n_nodes must be an integer.")
else:
self.n_nodes = n_nodes
# checking n_channels is passed as int
if not isinstance(n_channels, int):
raise TypeError("Parameter n_channels must be an integer.")
else:
self.n_channels = n_channels
# checking layers is passed as int
if not isinstance(layers, int):
raise TypeError("Parameter layers must be an integer.")
else:
self.layers = layers
# checking max epochs is passed as int
if not isinstance(max_epochs, int):
raise TypeError("Parameter max_epochs must be an integer.")
else:
self.max_epochs = max_epochs
if not isinstance(batch_size, int):
raise TypeError("Parameter batch_size must be an integer.")
else:
self.batch_size = batch_size
# checking tiner is passed as str or None
if not isinstance(tuner_type, str) and tuner_type is not None:
raise TypeError("Parameter tuner must be str.")
else:
# | |
Token"
},
"token1": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "Wrapped Ether"
}
},
{
"id": "0x3e18ce4f283576d54e88f550ee2199df6a03ff22",
"token0": {
"id": "0x2c88c64a9b01ad99af0c19895645e211e21efd16",
"name": "HOURS"
},
"token1": {
"id": "0xdac17f958d2ee523a2206206994597c13d831ec7",
"name": "Te<PASSWORD> USD"
}
},
{
"id": "0x3e2cd79db408061bbd6ea42483d333d7d44d229b",
"token0": {
"id": "<KEY>",
"name": "B<PASSWORD>"
},
"token1": {
"id": "0xdac17f958d2ee523a2206206994597c13d831ec7",
"name": "Tether USD"
}
},
{
"id": "0x3e327a672734310d08f42da393bb1e386bbf28b7",
"token0": {
"id": "<KEY>",
"name": "Zenfuse Trading Platform Token"
},
"token1": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "Wrapped Ether"
}
},
{
"id": "0x3e467f46bc3ed8aeaec1c8c367dd16dc5ca4a936",
"token0": {
"id": "<KEY>",
"name": "Wrapped BTC"
},
"token1": {
"id": "0x4fe83213d56308330ec302a8bd641f1d0113a4cc",
"name": "Nu<PASSWORD>"
}
},
{
"id": "0x3e5a6af33cc70646bc46aff0af4b930d92856b18",
"token0": {
"id": "0x99d8a9c45b2eca8864373a26d1459e3dff1e17f3",
"name": "<PASSWORD> Money"
},
"token1": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "Wrapped Ether"
}
},
{
"id": "0x3e7baf0837091511f980b0d16b44265ae44f40c9",
"token0": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "Wrapped Ether"
},
"token1": {
"id": "0xe38b8ddec43b6a22e3d4e300a27d2fbd3d6494d2",
"name": "Athen<PASSWORD> <NAME>"
}
},
{
"id": "0x3e80118300af563aab1a5c2a3557b8c099ddcd56",
"token0": {
"id": "0x17494a642b1c173cada26ab5f06ffb1ed41f8aa7",
"name": "HOMECoin"
},
"token1": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "Wrapped Ether"
}
},
{
"id": "0x3e87d4c24fb56c52a0dbd089a6219b7086d577d8",
"token0": {
"id": "0x3a856d4effa670c54585a5d523e96513e148e95d",
"name": "Trias Token"
},
"token1": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "Wrapped Ether"
}
},
{
"id": "0x3eb284a7176fb2ecbe4f48af4a1a96bf6da076b8",
"token0": {
"id": "<KEY>",
"name": "BasketDAO DeFi Index"
},
"token1": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "Wrapped Ether"
}
},
{
"id": "0x3eb531c3df61fa07069bcef279f037ff212421dd",
"token0": {
"id": "<KEY>",
"name": "Responsible Gold Coin"
},
"token1": {
"id": "<KEY>",
"name": "USD Coin"
}
},
{
"id": "0x3eb5db51798bb9c3afc574ca199f1b0372ece132",
"token0": {
"id": "0x5dcdd01b9b3e0b11e2ba7a952c1fad47808febd5",
"name": "IQV4U"
},
"token1": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "Wrapped Ether"
}
},
{
"id": "0x3ebeb553a61ce91517fec8c4b8cdd855f42597e7",
"token0": {
"id": "<KEY>",
"name": "USD <PASSWORD>"
},
"token1": {
"id": "<KEY>",
"name": "Cross-Chain BCH"
}
},
{
"id": "0x3ed55de227079a1a7245a178f8815f6d14bbd900",
"token0": {
"id": "0x5c872500c00565505f3624ab435c222e558e9ff8",
"name": "CoTrader"
},
"token1": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "Wrapped Ether"
}
},
{
"id": "0x3ed81407cdfa7ae026fb82e05f4e0bba8b1ac0ed",
"token0": {
"id": "0xa48475204881884cd31a199874d410c7936d8b12",
"name": "<PASSWORD>"
},
"token1": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "Wrapped Ether"
}
},
{
"id": "0x3ee301ed8f42d106def4f9d9730ea3367880b771",
"token0": {
"id": "0x64d91f12ece7362f91a6f8e7940cd55f05060b92",
"name": "<PASSWORD>"
},
"token1": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "<PASSWORD>"
}
},
{
"id": "0x3ee9c0e9710c22286637f47574e1947d4c446050",
"token0": {
"id": "<KEY>",
"name": "ADK"
},
"token1": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "<PASSWORD>"
}
},
{
"id": "0x3eff8c8630ac9611f9c34cde90708d4e3f3aa274",
"token0": {
"id": "0x8df15f38407f68b98ad505440649017cc31c7588",
"name": "<PASSWORD>"
},
"token1": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "<PASSWORD>"
}
},
{
"id": "0x3f1694bc5e0f7e2496cbda681a5be90575fe5423",
"token0": {
"id": "0xbb4e410d9e30d5a78167798a571606f832bd4024",
"name": "K<PASSWORD>aroo Coin"
},
"token1": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "Wrapped Ether"
}
},
{
"id": "0x3f1a6182af41665e6d261efc895335f70e49b443",
"token0": {
"id": "0x1456688345527be1f37e9e627da0837d6f08c925",
"name": "USDP Stablecoin"
},
"token1": {
"id": "0xdac17f958d2ee523a2206206994597c13d831ec7",
"name": "Tether USD"
}
},
{
"id": "0x3f1cc52dda25f418250297afaba46fb179e6a410",
"token0": {
"id": "0xb8575c7411bd9a3ef378e2cf5bcbf22a7897dcaa",
"name": "Fair Lunch Token"
},
"token1": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "Wrapped Ether"
}
},
{
"id": "0x3f2da03c22165da462fca9e99a2e34034493441a",
"token0": {
"id": "0x5959ee3ab3ac348dd4213df5af78de97f289206c",
"name": "var<PASSWORD>"
},
"token1": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "<PASSWORD>"
}
},
{
"id": "0x<PASSWORD>",
"token0": {
"id": "0x95efd1fe6099f65a7ed524def487483221094947",
"name": "Crypto Bonus Miles Token"
},
"token1": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "<PASSWORD>"
}
},
{
"id": "0x3fa0ac224c1a8dd07a764a0bd3f4aff9bde2ca0b",
"token0": {
"id": "0xb1f71c6f3be946ed712e7e10897f696de3a60a67",
"name": "FANDOM TOKEN"
},
"token1": {
"id": "0xdac17f958d2ee523a2206206994597c13d831ec7",
"name": "Te<PASSWORD> USD"
}
},
{
"id": "0x3fab94456ef109a905ea6a5bd34ef7811ec51da3",
"token0": {
"id": "<KEY>",
"name": "Crossfi"
},
"token1": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "<PASSWORD>"
}
},
{
"id": "0x3fae0f474145a1a771f36bd188d1cc7057a91b06",
"token0": {
"id": "0x011864d37035439e078d64630777ec518138af05",
"name": "Zerogoki Token"
},
"token1": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "<PASSWORD>"
}
},
{
"id": "0x3fd99e87b025e4297865ceaa4efd912e3b2087a3",
"token0": {
"id": "0x103c3a209da59d3e7c4a89307e66521e081cfdf0",
"name": "Genesis Vision Token"
},
"token1": {
"id": "0xdac17f958d2ee523a2206206994597c13d831ec7",
"name": "Te<PASSWORD> USD"
}
},
{
"id": "0x3fdb6c0073fe81dc662c8a1a418aa3195f4a75cb",
"token0": {
"id": "<KEY>",
"name": "Ot<PASSWORD>"
},
"token1": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "<PASSWORD>"
}
},
{
"id": "0x3fe55d440adb6e07fa8e69451f5511d983882487",
"token0": {
"id": "0x30bcd71b8d21fe830e493b30e90befba29de9114",
"name": "<PASSWORD>"
},
"token1": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "<PASSWORD>"
}
},
{
"id": "0x3fed392cde08fa170d6ef924eb886e7967fdf164",
"token0": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "<PASSWORD>"
},
"token1": {
"id": "<KEY>",
"name": "<PASSWORD>"
}
},
{
"id": "0x4004d94a4e6a6ca2b745b9b2e9fd262f3c3bdd98",
"token0": {
"id": "<KEY>",
"name": "BTC 2x Max ADL-guided"
},
"token1": {
"id": "<KEY>",
"name": "ETH 2x Flexible Leverage Index"
}
},
{
"id": "0x4012737a154f1c44df37379a765b87a1ea397edc",
"token0": {
"id": "0x3d3d35bb9bec23b06ca00fe472b50e7a4c692c30",
"name": "V<PASSWORD>"
},
"token1": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "Wrapped Ether"
}
},
{
"id": "0x402d4865f9bf101e4d06e0bd565a8e85118520e2",
"token0": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "Wrapped Ether"
},
"token1": {
"id": "0xe15732e39363615826727f4be524b9e95a1ead59",
"name": "ROUND"
}
},
{
"id": "0x402dfea3c033a7f2d57a13787458029f470ec90a",
"token0": {
"id": "0x5b982018545ff26f0cf2e3cda8aed859e3072e07",
"name": "KaikenToken"
},
"token1": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "<PASSWORD>"
}
},
{
"id": "0x40302bd08e3d0ae97f0792651e775f9c0065a10e",
"token0": {
"id": "0x737eec5170a255b052757279598d049d4179efb2",
"name": "<PASSWORD>"
},
"token1": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "<PASSWORD>"
}
},
{
"id": "0x40442495d3e03492eab8aaadb3aa3c238c83094d",
"token0": {
"id": "<KEY>",
"name": "<PASSWORD>"
},
"token1": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "<PASSWORD>"
}
},
{
"id": "0x405127050fc7e45d861e84cb96368b6e45015bb9",
"token0": {
"id": "0x2b5ca2f9510cf1e3595ff219f24d75d4244585ea",
"name": "EthereumPay"
},
"token1": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "Wrapped Ether"
}
},
{
"id": "0x406113a76bdedb4a0333ff3d0b80c46b3035b655",
"token0": {
"id": "0x77fba179c79de5b7653f68b5039af940ada60ce0",
"name": "<PASSWORD>"
},
"token1": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "Wrapped Ether"
}
},
{
"id": "<KEY>",
"token0": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "Wrapped <PASSWORD>"
},
"token1": {
"id": "<KEY>",
"name": "AdToken"
}
},
{
"id": "<KEY>",
"token0": {
"id": "0x4691937a7508860f876c9c0a2a617e7d9e945d4b",
"name": "Wootrade Network"
},
"token1": {
"id": "0x4fabb145d64652a948d72533023f6e7a623c7c53",
"name": "Binance USD"
}
},
{
"id": "<KEY>",
"token0": {
"id": "0x1940daa4cdb3e6b1de04680c47c8adeb5e2f27ac",
"name": "<PASSWORD>AB<PASSWORD>"
},
"token1": {
"id": "0x8e9a29e7ed21db7c5b2e1cd75e676da0236dfb45",
"name": "M<PASSWORD>"
}
},
{
"id": "0x40817acc99bf2090fc1088aa1dc2043e38450622",
"token0": {
"id": "<KEY>",
"name": "<PASSWORD>"
},
"token1": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "Wrapped Ether"
}
},
{
"id": "0x4084503be90f5f36c38aac0052c0d9b0e11bee8b",
"token0": {
"id": "0x4a1d0c737748305ad12f59157f0c17c2f6f31c8b",
"name": "<NAME>"
},
"token1": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "<PASSWORD>"
}
},
{
"id": "0x408a3962d014d5bb9ee2e4ba97c2366d11751db8",
"token0": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "<PASSWORD>"
},
"token1": {
"id": "0xc990e227d2ed9af6b3afb42f7522c99a0ef1892d",
"name": "<PASSWORD>"
}
},
{
"id": "0x408ade445f7467443d6ae2cdde6bc20867c82c1f",
"token0": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "<PASSWORD>"
},
"token1": {
"id": "0xfbb0c2e86b2a26edd8440d495c982ad0becd84a6",
"name": "beard.eth — ENS Domain"
}
},
{
"id": "0x4094306ef222c3a4c50d5497e0a959a29d3dacc4",
"token0": {
"id": "0x58f37eeb033459301822242a05a3e8d53b8e0a73",
"name": "DOGE INU"
},
"token1": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "<PASSWORD>"
}
},
{
"id": "0x40aecf08f8f550fcc3c7401394c6a32ebe65b8a1",
"token0": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "<PASSWORD>"
},
"token1": {
"id": "<KEY>",
"name": "<PASSWORD>"
}
},
{
"id": "<KEY>",
"token0": {
"id": "<KEY>",
"name": "Smooth <PASSWORD>"
},
"token1": {
"id": "0xdac17f958d2ee523a2206206994597c13d831ec7",
"name": "Te<PASSWORD>"
}
},
{
"id": "0x40e37e6541a8adc8da1cf8871a7af673c1b51e3b",
"token0": {
"id": "0x38affa4a5572cd7f19bb8bbaba00cc697c574059",
"name": "Vanstar"
},
"token1": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "Wrapped Ether"
}
},
{
"id": "0x40ed419e373adb0e985d1ec1defa80ffc667e8be",
"token0": {
"id": "0x2620638eda99f9e7e902ea24a285456ee9438861",
"name": "Crust Storage Market"
},
"token1": {
"id": "0xdac17f958d2ee523a2206206994597c13d831ec7",
"name": "Tether USD"
}
},
{
"id": "0x40fe3d7cf1771311dbffec07c6c17d5b2aff190f",
"token0": {
"id": "0x82125afe01819dff1535d0d6276d57045291b6c0",
"name": "Mar<PASSWORD>"
},
"token1": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "<PASSWORD>"
}
},
{
"id": "0x410f72313075e4f54323fd6984bc12d1b69dc53c",
"token0": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "Wrapped Ether"
},
"token1": {
"id": "0xe1c4ec950ad1e5730765675b9f06a5942589a1c3",
"name": "littledogecoin.com"
}
},
{
"id": "0x4120ff31b38253ca7013d193e9ae0e29e7f2285e",
"token0": {
"id": "<KEY>",
"name": "BAC"
},
"token1": {
"id": "0x6b175474e89094c44da98b954eedeac495271d0f",
"name": "Dai Stablecoin"
}
},
{
"id": "0x412940fdac1214fc3df430769f54e69210a18e49",
"token0": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "Wrapped Ether"
},
"token1": {
"id": "<KEY>",
"name": "ELA on Ethereum"
}
},
{
"id": "<KEY>",
"token0": {
"id": "<KEY>",
"name": "Un<PASSWORD>"
},
"token1": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "Wrapped Ether"
}
},
{
"id": "0x414095030e897b707c485fada1f4ab63cced5b80",
"token0": {
"id": "<KEY>",
"name": "<PASSWORD>"
},
"token1": {
"id": "<KEY>",
"name": "<PASSWORD> bearing USDC"
}
},
{
"id": "0x41454f14bde226c035b99901e4e98df59aefabeb",
"token0": {
"id": "<KEY>",
"name": "Bidao"
},
"token1": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "Wrapped Ether"
}
},
{
"id": "0x41506d56b16794e4f7f423aeff366740d4bdd387",
"token0": {
"id": "0x6619078bdd8324e01e9a8d4b3d761b050e5ecf06",
"name": "<PASSWORD>"
},
"token1": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "<PASSWORD>"
}
},
{
"id": "0x415f354fcebf3a137d71fe5d8a099eccd13bca8f",
"token0": {
"id": "0x5ec7c2e926cb6bdff5ee75cadbe0ff4cf96ff09c",
"name": "<PASSWORD>"
},
"token1": {
"id": "0xdac17f958d2ee523a2206206994597c13d831ec7",
"name": "<PASSWORD>"
}
},
{
"id": "0x41610532c101e3c83758c6491197884cc9d45ce8",
"token0": {
"id": "0x9c405acf8688afb61b3197421cdeec1a266c6839",
"name": "Do<PASSWORD>"
},
"token1": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "<PASSWORD>"
}
},
{
"id": "0x41635ef999feebbcef4c3d30afbc7a1af5abe2e6",
"token0": {
"id": "0x03223c647554cf4dd77785e4daaa05fe35622817",
"name": "FAMFUNToken"
},
"token1": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "<PASSWORD>"
}
},
{
"id": "<KEY>",
"token0": {
"id": "<KEY>",
"name": "Wrapped BTC"
},
"token1": {
"id": "0x4691937a7508860f876c9c0a2a617e7d9e945d4b",
"name": "Wootrade Network"
}
},
{
"id": "0x418eb2f342d8189d480506ed814bcdf8cac52254",
"token0": {
"id": "0x34612903db071e888a4dadcaa416d3ee263a87b9",
"name": "ethart"
},
"token1": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "Wrapped Ether"
}
},
{
"id": "0x419bff8ef6bec27af00b263fcc968af30afaf58a",
"token0": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "Wrapped Ether"
},
"token1": {
"id": "0xf0c5831ec3da15f3696b4dad8b21c7ce2f007f28",
"name": "AXIS Token"
}
},
{
"id": "0x419e02bf3009411f598b09b89acc0a0417683849",
"token0": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "<PASSWORD>"
},
"token1": {
"id": "0xcdeeeaaf2e96c25c679155e3854169c2f336b931",
"name": "Metaverse<PASSWORD>"
}
},
{
"id": "0x41aa55d2dcd42760ef0b5da303730e7ac734a475",
"token0": {
"id": "<KEY>",
"name": "Synapse Finance "
},
"token1": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "<PASSWORD>"
}
},
{
"id": "0x41b536722c014a577f06a4bb0dfa08bf0b8f5e87",
"token0": {
"id": "0x4fe83213d56308330ec302a8bd641f1d0113a4cc",
"name": "Nu<PASSWORD>"
},
"token1": {
"id": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"name": "<PASSWORD>"
}
},
| |
in self.extensions:
subtags.append(ext)
if self.private:
subtags.append(self.private)
self._str_tag = '-'.join(subtags)
return self._str_tag
def simplify_script(self) -> 'Language':
"""
Remove the script from some parsed language data, if the script is
redundant with the language.
>>> Language.make(language='en', script='Latn').simplify_script()
Language.make(language='en')
>>> Language.make(language='yi', script='Latn').simplify_script()
Language.make(language='yi', script='Latn')
>>> Language.make(language='yi', script='Hebr').simplify_script()
Language.make(language='yi')
"""
if self._simplified is not None:
return self._simplified
if self.language and self.script:
if DEFAULT_SCRIPTS.get(self.language) == self.script:
result = self.update_dict({'script': None})
self._simplified = result
return self._simplified
self._simplified = self
return self._simplified
def assume_script(self) -> 'Language':
"""
Fill in the script if it's missing, and if it can be assumed from the
language subtag. This is the opposite of `simplify_script`.
>>> Language.make(language='en').assume_script()
Language.make(language='en', script='Latn')
>>> Language.make(language='yi').assume_script()
Language.make(language='yi', script='Hebr')
>>> Language.make(language='yi', script='Latn').assume_script()
Language.make(language='yi', script='Latn')
This fills in nothing when the script cannot be assumed -- such as when
the language has multiple scripts, or it has no standard orthography:
>>> Language.make(language='sr').assume_script()
Language.make(language='sr')
>>> Language.make(language='eee').assume_script()
Language.make(language='eee')
It also dosn't fill anything in when the language is unspecified.
>>> Language.make(region='US').assume_script()
Language.make(region='US')
"""
if self._assumed is not None:
return self._assumed
if self.language and not self.script:
try:
self._assumed = self.update_dict({'script': DEFAULT_SCRIPTS[self.language]})
except KeyError:
self._assumed = self
else:
self._assumed = self
return self._assumed
def prefer_macrolanguage(self) -> 'Language':
"""
BCP 47 doesn't specify what to do with macrolanguages and the languages
they contain. The Unicode CLDR, on the other hand, says that when a
macrolanguage has a dominant standardized language, the macrolanguage
code should be used for that language. For example, Mandarin Chinese
is 'zh', not 'cmn', according to Unicode, and Malay is 'ms', not 'zsm'.
This isn't a rule you'd want to follow in all cases -- for example, you may
want to be able to specifically say that 'ms' (the Malay macrolanguage)
contains both 'zsm' (Standard Malay) and 'id' (Indonesian). But applying
this rule helps when interoperating with the Unicode CLDR.
So, applying `prefer_macrolanguage` to a Language object will
return a new object, replacing the language with the macrolanguage if
it is the dominant language within that macrolanguage. It will leave
non-dominant languages that have macrolanguages alone.
>>> Language.get('arb').prefer_macrolanguage()
Language.make(language='ar')
>>> Language.get('cmn-Hant').prefer_macrolanguage()
Language.make(language='zh', script='Hant')
>>> Language.get('yue-Hant').prefer_macrolanguage()
Language.make(language='yue', script='Hant')
"""
if self._macrolanguage is not None:
return self._macrolanguage
language = self.language or 'und'
if language in NORMALIZED_MACROLANGUAGES:
self._macrolanguage = self.update_dict({
'language': NORMALIZED_MACROLANGUAGES[language]
})
else:
self._macrolanguage = self
return self._macrolanguage
def broaden(self) -> 'List[Language]':
"""
Iterate through increasingly general versions of this parsed language tag.
This isn't actually that useful for matching two arbitrary language tags
against each other, but it is useful for matching them against a known
standardized form, such as in the CLDR data.
The list of broader versions to try appears in UTR 35, section 4.3,
"Likely Subtags".
>>> for langdata in Language.get('nn-Latn-NO-x-thingy').broaden():
... print(langdata)
nn-Latn-NO-x-thingy
nn-Latn-NO
nn-NO
nn-Latn
nn
und-Latn
und
"""
if self._broader is not None:
return self._broader
self._broader = [self]
seen = set(self.to_tag())
for keyset in self.BROADER_KEYSETS:
filtered = self._filter_attributes(keyset)
tag = filtered.to_tag()
if tag not in seen:
self._broader.append(filtered)
seen.add(tag)
return self._broader
def matchable_tags(self) -> 'List[Language]':
if self._matchable_tags is not None:
return self._matchable_tags
self._matchable_tags = []
for keyset in self.MATCHABLE_KEYSETS:
filtered_tag = self._filter_attributes(keyset).to_tag()
self._matchable_tags.append(filtered_tag)
return self._matchable_tags
def maximize(self) -> 'Language':
"""
The Unicode CLDR contains a "likelySubtags" data file, which can guess
reasonable values for fields that are missing from a language tag.
This is particularly useful for comparing, for example, "zh-Hant" and
"zh-TW", two common language tags that say approximately the same thing
via rather different information. (Using traditional Han characters is
not the same as being in Taiwan, but each implies that the other is
likely.)
These implications are provided in the CLDR supplemental data, and are
based on the likelihood of people using the language to transmit
information on the Internet. (This is why the overall default is English,
not Chinese.)
>>> str(Language.get('zh-Hant').maximize())
'zh-Hant-TW'
>>> str(Language.get('zh-TW').maximize())
'zh-Hant-TW'
>>> str(Language.get('ja').maximize())
'ja-Jpan-JP'
>>> str(Language.get('pt').maximize())
'pt-Latn-BR'
>>> str(Language.get('und-Arab').maximize())
'ar-Arab-EG'
>>> str(Language.get('und-CH').maximize())
'de-Latn-CH'
>>> str(Language.make().maximize()) # 'MURICA.
'en-Latn-US'
>>> str(Language.get('und-ibe').maximize())
'en-ibe-Latn-US'
"""
if self._filled is not None:
return self._filled
for broader in self.broaden():
tag = broader.to_tag()
if tag in LIKELY_SUBTAGS:
result = Language.get(LIKELY_SUBTAGS[tag], normalize=False)
result = result.update(self)
self._filled = result
return result
raise RuntimeError(
"Couldn't fill in likely values. This represents a problem with "
"the LIKELY_SUBTAGS data."
)
# Support an old, wordier name for the method
fill_likely_values = maximize
def match_score(self, supported: 'Language') -> int:
"""
Suppose that `self` is the language that the user desires, and
`supported` is a language that is actually supported. This method
returns a number from 0 to 100 indicating how similar the supported
language is (higher numbers are better). This is not a symmetric
relation.
The algorithm here is described (badly) in a Unicode technical report
at http://unicode.org/reports/tr35/#LanguageMatching. If you find these
results bothersome, take it up with Unicode, unless it's particular
tweaks we implemented such as macrolanguage matching.
See :func:`tag_match_score` for a function that works on strings,
instead of requiring you to instantiate Language objects first.
Further documentation and examples appear with that function.
"""
if supported == self:
return 100
desired_complete = self.prefer_macrolanguage().maximize()
supported_complete = supported.prefer_macrolanguage().maximize()
desired_triple = (desired_complete.language, desired_complete.script, desired_complete.region)
supported_triple = (supported_complete.language, supported_complete.script, supported_complete.region)
return 100 - raw_distance(desired_triple, supported_triple)
# These methods help to show what the language tag means in natural
# language. They actually apply the language-matching algorithm to find
# the right language to name things in.
def _get_name(self, attribute: str, language, min_score: int):
assert attribute in self.ATTRIBUTES
if isinstance(language, Language):
language = language.to_tag()
attr_value = getattr(self, attribute)
if attr_value is None:
return None
names = code_to_names(attribute, attr_value)
names['und'] = getattr(self, attribute)
return self._best_name(names, language, min_score)
def _best_name(self, names: dict, language: str, min_score: int):
possible_languages = sorted(names.keys())
target_language, score = best_match(language, possible_languages, min_score)
return names[target_language]
def language_name(self, language=DEFAULT_LANGUAGE, min_score: int=75) -> str:
"""
Give the name of the language (not the entire tag, just the language part)
in a natural language. The target language can be given as a string or
another Language object.
By default, things are named in English:
>>> Language.get('fr').language_name()
'French'
>>> Language.get('el').language_name()
'Greek'
But you can ask for language names in numerous other languages:
>>> Language.get('fr').language_name('fr')
'français'
>>> Language.get('el').language_name('fr')
'grec'
Why does everyone get Slovak and Slovenian confused? Let's ask them.
>>> Language.get('sl').language_name('sl')
'slovenščina'
>>> Language.get('sk').language_name('sk')
'slovenčina'
>>> Language.get('sl').language_name('sk')
'slovinčina'
>>> Language.get('sk').language_name('sl')
'slovaščina'
"""
return self._get_name('language', language, min_score)
def autonym(self, min_score: int=95) -> str:
"""
Give the name of this language *in* this language.
>>> Language.get('fr').autonym()
'français'
>>> Language.get('es').autonym()
'español'
>>> Language.get('ja').autonym()
'日本語'
This doesn't give the name of the region or script, but in some cases
the language can name itself in multiple scripts:
>>> Language.get('sr-Latn').autonym()
'srpski'
>>> Language.get('sr-Cyrl').autonym()
'српски'
>>> Language.get('pa').autonym()
'ਪੰਜਾਬੀ'
>>> Language.get('pa-Arab').autonym()
'پنجابی'
This only works for language codes that CLDR has locale data for. You
can't ask for the autonym of 'ja-Latn' and get 'nihongo'.
"""
return self.language_name(language=self, min_score=min_score)
def script_name(self, language=DEFAULT_LANGUAGE, min_score: int=75) -> str:
"""
Describe the script part of the language tag in a natural language.
"""
return self._get_name('script', language, min_score)
def region_name(self, language=DEFAULT_LANGUAGE, min_score: int=75) -> str:
"""
Describe the region part of the language tag in a natural language.
"""
return self._get_name('region', language, min_score)
def variant_names(self, language=DEFAULT_LANGUAGE, min_score: int=75) -> list:
"""
Describe each of the variant parts of the language tag in a natural
language.
"""
names = []
for variant in self.variants:
var_names = code_to_names('variant', variant)
names.append(self._best_name(var_names, language, min_score))
return names
def describe(self, language=DEFAULT_LANGUAGE, min_score: int=75) -> dict:
"""
Return a dictionary that describes a given language tag in a specified
natural language.
See `language_name` and related methods for more specific versions of this.
The desired `language` will in fact be matched against the available
options using the matching technique that this module provides. We can
illustrate many aspects of this by asking for a description of Shavian
script (a script devised by author <NAME>), and where you
| |
# Authors:
# <NAME>
#
# See the LICENSE file for legal information regarding use of this file.
import unittest
from tack.compat import a2b_hex
from tack.crypto.python.Python_AES import Python_AES
from tack.crypto.python.Python_ECGenerator import Python_ECGenerator
from tack.crypto.python.numbertheory import *
from tack.crypto.python.ellipticcurve import *
from tack.crypto.python.ecdsa import *
class PythonCryptoTest(unittest.TestCase):
def test_PythonAES(self):
key = a2b_hex("603deb1015ca71be2b73aef0857d77811f352c073b6108d72d9810a30914dff4")
IV = a2b_hex("000102030405060708090A0B0C0D0E0F")
plaintext = a2b_hex("6bc1bee22e409f96e93d7e117393172a")
ciphertext = a2b_hex("f58c4c04d6e5f1ba779eabfb5f7bfbd6")
assert(Python_AES(key, IV).encrypt(plaintext) == ciphertext)
assert(Python_AES(key, IV).decrypt(ciphertext) == plaintext)
def test_PythonECDSA(self):
publicKey, privateKey = Python_ECGenerator().generateECKeyPair()
data = bytearray([0,1,2,3])
badData = bytearray([0,1,2,4])
signature = privateKey.sign(data)
assert(publicKey.verify(data, signature))
assert(not publicKey.verify(badData, signature))
def test_NumberTheory(self):
miller_rabin_test_count = 0
# Making sure locally defined exceptions work:
# p = modular_exp( 2, -2, 3 )
# p = square_root_mod_prime( 2, 3 )
#print "Testing gcd..."
assert gcd( 3*5*7, 3*5*11, 3*5*13 ) == 3*5
assert gcd( [ 3*5*7, 3*5*11, 3*5*13 ] ) == 3*5
assert gcd( 3 ) == 3
#print "Testing lcm..."
assert lcm( 3, 5*3, 7*3 ) == 3*5*7
assert lcm( [ 3, 5*3, 7*3 ] ) == 3*5*7
assert lcm( 3 ) == 3
#print "Testing next_prime..."
bigprimes = ( 999671,
999683,
999721,
999727,
999749,
999763,
999769,
999773,
999809,
999853,
999863,
999883,
999907,
999917,
999931,
999953,
999959,
999961,
999979,
999983 )
for i in range( len( bigprimes ) - 1 ):
assert next_prime( bigprimes[i] ) == bigprimes[ i+1 ]
error_tally = 0
# Test the square_root_mod_prime function:
for p in smallprimes[:50]:
#print "Testing square_root_mod_prime for modulus p = %d." % p
squares = []
for root in range( 0, 1+p//2 ):
sq = ( root * root ) % p
squares.append( sq )
calculated = square_root_mod_prime( sq, p )
if ( calculated * calculated ) % p != sq:
error_tally = error_tally + 1
print("Failed to find %d as sqrt( %d ) mod %d. Said %d." %
( root, sq, p, calculated ))
for nonsquare in range( 0, p ):
if nonsquare not in squares:
try:
calculated = square_root_mod_prime( nonsquare, p )
except SquareRootError:
pass
else:
error_tally = error_tally + 1
print("Failed to report no root for sqrt( %d ) mod %d." %
( nonsquare, p ))
# Test the jacobi function:
for m in range( 3, 100, 2 ):
#print "Testing jacobi for modulus m = %d." % m
if is_prime( m ):
squares = []
for root in range( 1, m ):
if jacobi( root * root, m ) != 1:
error_tally = error_tally + 1
print("jacobi( %d * %d, %d ) != 1" % ( root, root, m ))
squares.append( root * root % m )
for i in range( 1, m ):
if not i in squares:
if jacobi( i, m ) != -1:
error_tally = error_tally + 1
print("jacobi( %d, %d ) != -1" % ( i, m ))
else: # m is not prime.
f = factorization( m )
for a in range( 1, m ):
c = 1
for i in f:
c = c * jacobi( a, i[0] ) ** i[1]
if c != jacobi( a, m ):
error_tally = error_tally + 1
print("%d != jacobi( %d, %d )" % ( c, a, m ))
# Test the inverse_mod function:
#print "Testing inverse_mod . . ."
import random
n_tests = 0
for i in range( 100 ):
m = random.randint( 20, 10000 )
for j in range( 100 ):
a = random.randint( 1, m-1 )
if gcd( a, m ) == 1:
n_tests = n_tests + 1
inv = inverse_mod( a, m )
if inv <= 0 or inv >= m or ( a * inv ) % m != 1:
error_tally = error_tally + 1
print("%d = inverse_mod( %d, %d ) is wrong." % ( inv, a, m ))
assert(False)
#assert n_tests > 1000
#print n_tests, " tests of inverse_mod completed."
#print error_tally, "errors detected."
assert(error_tally == 0)
return 1
def test_EllipticCurve(self):
def test_add( c, x1, y1, x2, y2, x3, y3 ):
"""We expect that on curve c, (x1,y1) + (x2, y2 ) = (x3, y3)."""
p1 = Point( c, x1, y1 )
p2 = Point( c, x2, y2 )
p3 = p1 + p2
#print "%s + %s = %s" % ( p1, p2, p3 ),
if p3.x() != x3 or p3.y() != y3:
print(" Failure: should give (%d,%d)." % ( x3, y3 ))
assert()
def test_double( c, x1, y1, x3, y3 ):
"""We expect that on curve c, 2*(x1,y1) = (x3, y3)."""
p1 = Point( c, x1, y1 )
p3 = p1.double()
#print "%s doubled = %s" % ( p1, p3 ),
if p3.x() != x3 or p3.y() != y3:
print(" Failure: should give (%d,%d)." % ( x3, y3 ))
assert()
def test_multiply( c, x1, y1, m, x3, y3 ):
"""We expect that on curve c, m*(x1,y1) = (x3,y3)."""
p1 = Point( c, x1, y1 )
p3 = p1 * m
#print "%s * %d = %s" % ( p1, m, p3 ),
if p3.x() != x3 or p3.y() != y3:
print(" Failure: should give (%d,%d)." % ( x3, y3 ))
assert()
# A few tests from X9.62 B.3:
c = CurveFp( 23, 1, 1 )
test_add( c, 3, 10, 9, 7, 17, 20 )
test_double( c, 3, 10, 7, 12 )
test_add( c, 3, 10, 3, 10, 7, 12 ) # (Should just invoke double.)
test_multiply( c, 3, 10, 2, 7, 12 )
# From X9.62 I.1 (p. 96):
g = Point( c, 13, 7, 7 )
check = INFINITY
for i in range( 7 + 1 ):
p = ( i % 7 ) * g
#print("%s * %d = %s, expected %s . . ." % ( g, i, p, check ))
if p == check:
#print(" Good.")
pass
else:
print(p.x(), p.y())
#print(check.x(), check.y())
#print(" Bad. %s %s %s %s" % (p, check, type(p), type(check)))
assert()
check = check + g
# NIST Curve P-192:
p = 6277101735386680763835789423207666416083908700390324961279
r = 6277101735386680763835789423176059013767194773182842284081
s = 0x3045ae6fc8422f64ed579528d38120eae12196d5
c = 0x3099d2bbbfcb2538542dcd5fb078b6ef5f3d6fe2c745de65
b = 0x64210519e59c80e70fa7e9ab72243049feb8deecc146b9b1
Gx = 0x188da80eb03090f67cbf20eb43a18800f4ff0afd82ff1012
Gy = 0x07192b95ffc8da78631011ed6b24cdd573f977a11e794811
c192 = CurveFp( p, -3, b )
p192 = Point( c192, Gx, Gy, r )
# Checking against some sample computations presented
# in X9.62:
d = 651056770906015076056810763456358567190100156695615665659
Q = d * p192
if Q.x() != 0x62B12D60690CDCF330BABAB6E69763B471F994DD702D16A5:
print("p192 * d came out wrong.")
assert()
k = 6140507067065001063065065565667405560006161556565665656654
R = k * p192
if R.x() != 0x885052380FF147B734C330C43D39B2C4A89F29B0F749FEAD \
or R.y() != 0x9CF9FA1CBEFEFB917747A3BB29C072B9289C2547884FD835:
print("k * p192 came out wrong.")
assert()
u1 = 2563697409189434185194736134579731015366492496392189760599
u2 = 6266643813348617967186477710235785849136406323338782220568
temp = u1 * p192 + u2 * Q
if temp.x() != 0x885052380FF147B734C330C43D39B2C4A89F29B0F749FEAD \
or temp.y() != 0x9CF9FA1CBEFEFB917747A3BB29C072B9289C2547884FD835:
print("u1 * p192 + u2 * Q came out wrong.")
assert()
return 1
def test_ECDSA(self):
import random
def test_point_validity( generator, x, y, expected ):
"""generator defines the curve; is (x,y) a point on
this curve? "expected" is True if the right answer is Yes."""
if point_is_valid( generator, x, y ) == expected:
#print("Point validity tested as expected.")
pass
else:
print("*** Point validity test gave wrong result.")
assert()
def test_signature_validity( Msg, Qx, Qy, R, S, expected ):
"""Msg = message, Qx and Qy represent the base point on
elliptic curve c192, R and S are the signature, and
"expected" is True iff the signature is expected to be valid."""
pubk = Public_key( generator_192,
Point( curve_192, Qx, Qy ) )
got = pubk.verifies( digest_integer( Msg ), Signature( R, S ) )
if got == expected:
#print("Signature tested as expected: got %s, expected %s." % \
# ( got, expected ))
pass
else:
print("*** Signature test failed: got %s, expected %s." %
( got, expected ))
assert()
#print("NIST Curve P-192:")
p192 = generator_192
# From X9.62:
d = 651056770906015076056810763456358567190100156695615665659
Q = d * p192
if Q.x() != 0x62B12D60690CDCF330BABAB6E69763B471F994DD702D16A5:
#print("*** p192 * d came out wrong.")
assert()
k = 6140507067065001063065065565667405560006161556565665656654
R = k * p192
if R.x() != 0x885052380FF147B734C330C43D39B2C4A89F29B0F749FEAD \
or R.y() != 0x9CF9FA1CBEFEFB917747A3BB29C072B9289C2547884FD835:
#print("*** k * p192 came out wrong.")
assert()
u1 = 2563697409189434185194736134579731015366492496392189760599
u2 | |
file_based_convert_examples_to_features(
examples, label_map, max_seq_length, tokenizer, output_file):
"""Convert a set of `InputExample`s to a TFRecord file."""
writer = tf.python_io.TFRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_map,
max_seq_length, tokenizer)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["start_labels_ids"] = create_int_feature(feature.start_labels_ids)
features["end_labels_ids"] = create_int_feature(feature.end_labels_ids)
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
def file_based_input_fn_builder(input_file, seq_length, is_training,
drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"start_labels_ids": tf.FixedLenFeature([seq_length], tf.int64),
"end_labels_ids": tf.FixedLenFeature([seq_length], tf.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
# d = d.repeat(1)
d = d.shuffle(buffer_size=500)
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
def focal_loss(logits,labels,mask,num_labels,one_hot=True,lambda_param=1.5):
probs = tf.nn.softmax(logits,axis=-1)
pos_probs = probs[:,:,1]
prob_label_pos = tf.where(tf.equal(labels,1),pos_probs,tf.ones_like(pos_probs))
prob_label_neg = tf.where(tf.equal(labels,0),pos_probs,tf.zeros_like(pos_probs))
loss = tf.pow(1. - prob_label_pos,lambda_param)*tf.log(prob_label_pos + 1e-7) + \
tf.pow(prob_label_neg,lambda_param)*tf.log(1. - prob_label_neg + 1e-7)
loss = -loss * tf.cast(mask,tf.float32)
loss = tf.reduce_sum(loss,axis=-1,keepdims=True)
# loss = loss/tf.cast(tf.reduce_sum(mask,axis=-1),tf.float32)
loss = tf.reduce_mean(loss)
return loss
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
start_labels_ids, end_labels_ids, num_labels, use_one_hot_embeddings):
"""Creates a classification model."""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
output_layer = model.get_sequence_output()
hidden_size = output_layer.shape[-1].value
max_seq_length = output_layer.shape[1].value
##add CRF layer and biLSTM layer
if is_training:
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
hidden = tf.reshape(output_layer, shape=[-1, hidden_size])
with tf.variable_scope("start_logits"):
start_W = tf.get_variable("start_W", shape=[hidden_size, num_labels],
dtype=tf.float32, initializer=initializers.xavier_initializer())
start_b = tf.get_variable("start_b", shape=[num_labels], dtype=tf.float32,
initializer=tf.zeros_initializer())
start_pred = tf.nn.xw_plus_b(hidden, start_W, start_b)
with tf.variable_scope("end_logits"):
end_W = tf.get_variable("end_W", shape=[hidden_size, num_labels],
dtype=tf.float32, initializer=initializers.xavier_initializer())
end_b = tf.get_variable("end_b", shape=[num_labels], dtype=tf.float32,
initializer=tf.zeros_initializer())
end_pred = tf.nn.xw_plus_b(hidden, end_W, end_b)
with tf.variable_scope("start_loss"):
logits = tf.reshape(start_pred, [-1, max_seq_length, num_labels])
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(start_labels_ids, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
start_loss = tf.reduce_mean(per_example_loss)
probabilities = tf.nn.softmax(logits, axis=-1)
start_pred_ids = tf.argmax(probabilities,axis=-1)
with tf.variable_scope("end_start_loss"):
logits = tf.reshape(end_pred, [-1, max_seq_length, num_labels])
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(end_labels_ids, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
end_loss = tf.reduce_mean(per_example_loss)
probabilities = tf.nn.softmax(logits, axis=-1)
end_pred_ids = tf.argmax(probabilities,axis=-1)
total_loss = start_loss + end_loss
return (total_loss, logits, start_pred_ids, end_pred_ids)
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
start_labels_ids = features["start_labels_ids"]
end_labels_ids = features["end_labels_ids"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
# 使用参数构建模型,input_idx 就是输入的样本idx表示,label_ids 就是标签的idx表示
(total_loss, logits, start_pred_ids, end_pred_ids) = create_model(
bert_config, is_training, input_ids, input_mask, segment_ids, start_labels_ids,
end_labels_ids, num_labels, use_one_hot_embeddings)
pred_ids = tf.stack([start_pred_ids,end_pred_ids],axis=1)
print('-*'*30)
print(pred_ids)
tvars = tf.trainable_variables()
scaffold_fn = None
# 加载BERT模型
if init_checkpoint:
(assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars,
init_checkpoint)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
'''
tf.logging.info("**** Trainable Variables ****")
# 打印加载模型的参数
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
'''
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
scaffold_fn=scaffold_fn) #
else:
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions=pred_ids,
scaffold_fn=scaffold_fn
)
return output_spec
return model_fn
def labeltoid(label_list):
label_map = {}
# 1表示从1开始对label进行index化
for (i, label) in enumerate(label_list):
label_map[label] = i
# 保存label->index 的map
with codecs.open(os.path.join(FLAGS.output_dir, 'label2id.pkl'), 'wb') as w:
pickle.dump(label_map, w)
return label_map
def save_best_model(cur_ckpt_path,best_model_path):
cmd1 = 'cp '+cur_ckpt_path+'.index '+best_model_path+'.index'
cmd2 = 'cp '+cur_ckpt_path+'.meta '+best_model_path+'.meta'
cmd3 = 'cp '+cur_ckpt_path+'.data-00000-of-00001 '+best_model_path+'.data-00000-of-00001'
os.system(cmd1)
os.system(cmd2)
os.system(cmd3)
def get_pred_metric(result, eval_input_ids, tokenizer):
all_pred_ent = []
# print(len(result))
# print(len(eval_input_ids))
# print(result)
for i in range(len(result)):
# print(i)
tmp_input_ids = eval_input_ids[i]
start_preds = result[i][0]
end_preds = result[i][1]
start_inds = []
end_inds = []
# print(start_preds)
# print(end_preds)
for ind in range(len(start_preds)):
if(start_preds[ind]==1):
start_inds.append(ind)
for ind in range(len(end_preds)):
if(end_preds[ind]==1):
end_inds.append(ind)
if(len(start_inds)==0):
all_pred_ent.append('')
else:
ans = []
def back(start_inds, end_inds):
# global ans
if(len(start_inds)==0 or len(end_inds)==0):
return
while(len(end_inds)>0 and end_inds[0]<start_inds[0]):
end_inds = end_inds[1:]
if(len(end_inds)>0):
while(len(start_inds)>1 and (end_inds[0]-start_inds[1])>0 and ((end_inds[0]-start_inds[0])>(end_inds[0]-start_inds[1]))):
start_inds = start_inds[1:]
ans.append((start_inds[0],end_inds[0]))
back(start_inds[1:],end_inds[1:])
back(start_inds, end_inds)
if(len(ans)==0):
all_pred_ent.append('')
else:
all_tmp_ent = []
for item in ans:
s_ind = item[0]
e_ind = item[1]
# print(s_ind, e_ind)
tmp_ent = ' '.join(tokenizer.convert_ids_to_tokens(tmp_input_ids[s_ind:e_ind+1])).replace(' ##','')
end_str = ''
e_ind += 1
while((e_ind<len(tmp_input_ids)-1) and ('##' in tokenizer.convert_ids_to_tokens([tmp_input_ids[e_ind]])[0])):
end_str += tokenizer.convert_ids_to_tokens([tmp_input_ids[e_ind]])[0].replace('##','')
e_ind += 1
tmp_ent += end_str
all_tmp_ent.append(tmp_ent)
# print(all_tmp_ent)
all_pred_ent.append(all_tmp_ent)
# print(' '.join(tokenizer.convert_ids_to_tokens(tmp_input_ids)))
# print(all_tmp_ent)
# print(all_pred_ent)
# print(len(all_pred_ent))
## save result in file
with open(os.path.join(FLAGS.output_dir, 'dev_pred_answer.txt'), 'w') as f:
for entities in all_pred_ent:
if len(entities) == 0:
f.write('\n')
else:
f.write('\t'.join(entities) + '\n')
with open(os.path.join(FLAGS.data_dir, 'dev_answer.txt'), 'r') as f:
gold = f.readlines()
all_pred = 0
for item in all_pred_ent:
if(item==''):
continue
else:
for i in item:
all_pred += 1
tp = 0
all_ann = 0
for i in range(len(gold)):
if(len(gold[i].strip())!=0):
# print(gold[i])
for k in gold[i].strip().split('\t'):
all_ann += 1
for i in range(len(gold)):
if(all_pred_ent[i]!=''):
for j in all_pred_ent[i]:
for e in gold[i].strip().split('\t'):
if j.lower() == e.lower():
tp += 1
break
p = tp/all_pred
r = tp/all_ann
f = (2*p*r)/(p+r)
f1 = f
print(tp,all_pred,all_ann)
print(p,r,f)
# print(all_pred_ent)
return f1
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
processors = {
"ner": NerProcessor,
}
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
FLAGS.init_checkpoint)
if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:
raise ValueError(
"At least one of `do_train`, `do_eval` or `do_predict' must be True.")
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
## del last training file
if(FLAGS.do_train and FLAGS.clean):
if os.path.exists(FLAGS.output_dir):
def del_file(path):
ls = os.listdir(path)
for i in ls:
c_path = os.path.join(path, i)
if os.path.isdir(c_path):
del_file(c_path)
else:
os.remove(c_path)
try:
del_file(FLAGS.output_dir)
except Exception as e:
print(e)
print('pleace remove the files of output dir and data.conf')
exit(-1)
tf.gfile.MakeDirs(FLAGS.output_dir)
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
label_list = processor.get_labels()
label_map = labeltoid(label_list)
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
print(tokenizer.convert_ids_to_tokens([101, 2424, 1996, 15316, 4668, 1997, 5423, 15660, 102 ]))
# sys.exit(0)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=None,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
train_examples = None
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_train:
train_examples = processor.get_train_examples(FLAGS.data_dir)
num_train_steps = int(
len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
model_fn = model_fn_builder(
bert_config=bert_config,
num_labels=len(label_list),
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
model_dir=FLAGS.output_dir,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
if FLAGS.do_train:
train_file = os.path.join(FLAGS.output_dir, "train.tf_record")
file_based_convert_examples_to_features(
train_examples, label_map, FLAGS.max_seq_length, tokenizer, train_file)
tf.logging.info("***** Running training *****")
tf.logging.info(" Num examples = %d", len(train_examples))
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=train_file,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True)
if FLAGS.do_eval:
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
eval_input_ids = []
for (ex_index, example) in enumerate(eval_examples):
feature = convert_single_example(ex_index, example, label_map,
FLAGS.max_seq_length, tokenizer)
eval_input_ids.append(feature.input_ids)
num_actual_eval_examples = len(eval_examples)
eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record")
file_based_convert_examples_to_features(
eval_examples, label_map, FLAGS.max_seq_length, tokenizer, eval_file)
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(eval_examples), num_actual_eval_examples,
len(eval_examples) - num_actual_eval_examples)
tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
eval_input_fn = file_based_input_fn_builder(
input_file=eval_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=False)
## | |
"bien",
"honk",
"doughnut",
"tags",
"wrapping",
"tar",
"om",
"dinozzo",
"ducky",
"unfaithful",
"delia",
"rae",
"jojo",
"stance",
"barker",
"financially",
"clam",
"clanking",
"sarcastic",
"blur",
"organizing",
"nosy",
"boyle",
"pasha",
"my-",
"attacker",
"horribly",
"inject",
"descend",
"wi",
"'re-",
"liberated",
"burner",
"fluffy",
"melancholy",
"puppets",
"anal",
"grieve",
"awe",
"gallons",
"earring",
"unprecedented",
"yank",
"jamaica",
"brutally",
"extortion",
"guardians",
"disciples",
"simultaneously",
"elves",
"malibu",
"lupin",
"postman",
"neville",
"senseless",
"chapman",
"sittin",
"hostel",
"marcia",
"melvin",
"cockroach",
"clamp",
"poets",
"chemist",
"professionally",
"faded",
"vargas",
"gunpowder",
"inc",
"gala",
"surroundings",
"wisconsin",
"rivals",
"cackling",
"withstand",
"contemporary",
"harlem",
"half-hour",
"marlon",
"endured",
"arsenal",
"underway",
"donuts",
"daryl",
"frickin",
"injected",
"digs",
"siegfried",
"elisabeth",
"billionaire",
"cardboard",
"hugs",
"'an",
"bandages",
"everlasting",
"liverpool",
"cooperative",
"diploma",
"neural",
"fortunes",
"rural",
"abbott",
"receptionist",
"antiques",
"labels",
"sizes",
"millennium",
"layout",
"immoral",
"manages",
"hisses",
"usher",
"impulsive",
"pollution",
"admirer",
"'r",
"rejoice",
"iowa",
"marlene",
"hindi",
"holder",
"squeezed",
"merge",
"journalism",
"nominated",
"sundown",
"assistants",
"miraculous",
"stared",
"prescribed",
"lotta",
"swearing",
"guarantees",
"massachusetts",
"junction",
"consumption",
"astrid",
"metropolis",
"iceland",
"bridal",
"virgins",
"victorious",
"squirt",
"pads",
"modified",
"hanson",
"casket",
"blaze",
"kimble",
"cliffs",
"destroyer",
"offshore",
"tendency",
"nip",
"scanning",
"first-class",
"viva",
"intensive",
"gustav",
"endangered",
"va",
"circling",
"ave",
"stashed",
"das",
"backward",
"hai",
"chamberlain",
"sari",
"thermal",
"opponents",
"fiber",
"last-minute",
"toto",
"circumstance",
"geography",
"stargate",
"interpret",
"sicily",
"pisses",
"preaching",
"transporter",
"bosom",
"salsa",
"eighty",
"shred",
"dread",
"duration",
"beggars",
"mein",
"josephine",
"networks",
"formality",
"heater",
"par",
"smug",
"ordeal",
"rhonda",
"agriculture",
"aigoo",
"katya",
"fowler",
"descended",
"lasting",
"abyss",
"marries",
"aurora",
"sleigh",
"frenchman",
"rouge",
"survives",
"margo",
"redhead",
"terrence",
"creativity",
"dominant",
"isis",
"kip",
"corny",
"aubrey",
"graphic",
"irrational",
"mathematical",
"sacked",
"paws",
"chariot",
"havoc",
"andreas",
"midget",
"celestial",
"cetera",
"nino",
"upgrade",
"blasting",
"skulls",
"it-it",
"mean-",
"videotape",
"wedded",
"babysit",
"ancestor",
"transit",
"rained",
"thingy",
"wellington",
"abide",
"reserves",
"'they",
"rim",
"chico",
"settles",
"decisive",
"motors",
"cheesy",
"symbolic",
"roxy",
"clink",
"handicapped",
"mating",
"cranky",
"brightest",
"molecules",
"meyer",
"sheridan",
"sorry.",
"animation",
"grail",
"voluntarily",
"creeping",
"furnace",
"thine",
"titus",
"pierced",
"bu",
"chinatown",
"slade",
"sai",
"toothpaste",
"cuddle",
"langley",
"egyptians",
"bankruptcy",
"lever",
"shaman",
"nipple",
"weeds",
"becker",
"reacted",
"mashed",
"sloane",
"regent",
"bentley",
"tiles",
"decoy",
"crusade",
"vivid",
"slides",
"jennings",
"catering",
"dishonest",
"subs",
"lurking",
"airplanes",
"shaky",
"barren",
"alligator",
"moth",
"uncover",
"lorna",
"axel",
"expressing",
"clacking",
"doggie",
"airlines",
"dum",
"unicorn",
"jackets",
"goofy",
"marcy",
"businessmen",
"bums",
"folded",
"defender",
"alarmed",
"goody",
"noose",
"chic",
"forgiving",
"thatcher",
"index",
"blush",
"fertile",
"amateurs",
"deliveries",
"armour",
"werner",
"shits",
"twinkle",
"parted",
"syndicate",
"alexandra",
"righty",
"regions",
"technician",
"waffles",
"skirts",
"baroness",
"castro",
"malfunction",
"memorable",
"middle-aged",
"thea",
"calories",
"cuisine",
"motorbike",
"snoop",
"acknowledged",
"spelled",
"evolve",
"cox",
"sedative",
"array",
"tenderness",
"klingon",
"garland",
"smuggled",
"nicola",
"raju",
"reversed",
"royce",
"beforehand",
"continuous",
"yoko",
"cecil",
"seniors",
"mri",
"synthetic",
"forgave",
"discoveries",
"kimmy",
"agitated",
"attendance",
"maple",
"pools",
"wanker",
"casa",
"firearms",
"offenders",
"obligations",
"moan",
"signor",
"graffiti",
"supplied",
"linen",
"defect",
"patriotic",
"ethical",
"pest",
"lengths",
"vin",
"minnie",
"spectrum",
"distinction",
"decorate",
"bounds",
"hums",
"mandarin",
"reluctant",
"rand",
"retain",
"justine",
"educate",
"minnesota",
"mp",
"cruiser",
"permits",
"screwdriver",
"exploration",
"mace",
"newborn",
"shivering",
"slime",
"oddly",
"badger",
"bruised",
"luigi",
"attaboy",
"eyeballs",
"institutions",
"panama",
"boundary",
"hypothesis",
"ffff00",
"drought",
"rembrandt",
"absorbed",
"clinking",
"executioner",
"darts",
"catalina",
"lured",
"lingerie",
"gon",
"flaws",
"thinner",
"lucien",
"tug",
"cornered",
"bertie",
"uptown",
"clash",
"aerial",
"hansen",
"relentless",
"viable",
"raul",
"wen",
"scholars",
"marley",
"limitations",
"shelves",
"funerals",
"criticize",
"jacqueline",
"forrest",
"shaun",
"continental",
"bookstore",
"balanced",
"workplace",
"theta",
"ramsey",
"dixie",
"bankers",
"dumbass",
"superstitious",
"clause",
"fudge",
"bennet",
"assignments",
"councilman",
"slogan",
"trinity",
"quadrant",
"boarded",
"ivory",
"velocity",
"composer",
"mccarthy",
"initiated",
"covert",
"builder",
"insignificant",
"dire",
"gears",
"comply",
"credentials",
"organizations",
"applaud",
"trend",
"providence",
"sparkle",
"chump",
"derrick",
"donations",
"flooding",
"maintained",
"snowing",
"shaolin",
"ladyship",
"artifacts",
"allegedly",
"howl",
"deploy",
"showtime",
"thornton",
"deposition",
"fractures",
"handshake",
"neglect",
"adapted",
"patrols",
"bess",
"cyber",
"hetty",
"protocols",
"overdue",
"slayer",
"headphones",
"identities",
"appoint",
"rotation",
"solely",
"tasha",
"aging",
"pitching",
"examining",
"viewing",
"memorize",
"freshen",
"mango",
"valued",
"sonar",
"crichton",
"decay",
"eu",
"nightfall",
"olives",
"miniature",
"omen",
"kristin",
"bai",
"what-what",
"chum",
"posse",
"mutiny",
"toaster",
"span",
"jumbo",
"blames",
"mystical",
"researchers",
"exposing",
"dung",
"welsh",
"frat",
"waterfall",
"lifts",
"disgusted",
"yikes",
"windshield",
"akira",
"maintaining",
"receives",
"plantation",
"antenna",
"bodyguards",
"havana",
"anya",
"edit",
"superficial",
"allergies",
"invites",
"comparing",
"dirk",
"cartoons",
"mexicans",
"timber",
"supervision",
"siberia",
"winding",
"rust",
"intensity",
"hiccup",
"francois",
"johan",
"jogging",
"confrontation",
"flyers",
"trustworthy",
"jarvis",
"coaster",
"paragraph",
"mosquitoes",
"adios",
"lovin",
"unexpectedly",
"russo",
"amuse",
"splashing",
"soviets",
"lorry",
"dominate",
"bundy",
"howie",
"jp",
"unreal",
"screens",
"mouthing",
"ziggy",
"failures",
"commanded",
"recon",
"representation",
"bending",
"gallant",
"satisfying",
"cheater",
"pornography",
"urn",
"assign",
"uncles",
"vine",
"auspicious",
"tripping",
"seafood",
"mellow",
"announcing",
"resumes",
"hawaiian",
"ac",
"centers",
"adultery",
"scatter",
"lotion",
"opposing",
"dominated",
"unpack",
"violate",
"transmit",
"testicles",
"zhao",
"ames",
"t.c.",
"kayla",
"hungary",
"albums",
"well-being",
"porridge",
"gavel",
"prem",
"rattles",
"eliot",
"titties",
"strengthen",
"sensitivity",
"fitzgerald",
"tamara",
"removal",
"sacks",
"downloaded",
"morocco",
"sniffling",
"crowns",
"saddam",
"nerds",
"accepts",
"hive",
"radha",
"outdoor",
"raquel",
"dew",
"ing",
"mt",
"currents",
"explanations",
"soaking",
"tanaka",
"vitals",
"navigation",
"joins",
"intrigued",
"harp",
"führer",
"margin",
"everett",
"anthem",
"aa",
"kylie",
"'c",
"distracting",
"fertility",
"amazingly",
"horrific",
"indictment",
"superstition",
"ultrasound",
"shifted",
"sincerity",
"hamburg",
"davy",
"trashed",
"casper",
"dumplings",
"digest",
"whacked",
"jacks",
"kiddin",
"casualty",
"bethany",
"thump",
"proportion",
"quantity",
"janine",
"deceiving",
"weave",
"hikaru",
"sdh",
"corrupted",
"gallows",
"budge",
"fraction",
"afar",
"hiroshi",
"leela",
"measurements",
"landlady",
"certified",
"dusk",
"luisa",
"insisting",
"hilton",
"indoors",
"hawkeye",
"saliva",
"homey",
"whispered",
"maura",
"cyanide",
"marched",
"accounted",
"diver",
"eligible",
"robbins",
"ch00ff00",
"messes",
"abandoning",
"blossoms",
"wiggle",
"awaken",
"cid",
"stained",
"ballard",
"elevated",
"ryo",
"transparent",
"unclear",
"strokes",
"aria",
"hwa",
"solidarity",
"sidekick",
"starter",
"regarded",
"corinne",
"vulcan",
"bradford",
"godzilla",
"belgian",
"seemingly",
"cling",
"components",
"vibrates",
"missouri",
"constance",
"batting",
"chanel",
"sap",
"marbles",
"shifting",
"wiping",
"refill",
"famine",
"sneeze",
"puzzles",
"provisions",
"shite",
"walkin",
"syringe",
"platinum",
"preferably",
"oyster",
"exhaust",
"electromagnetic",
"consumer",
"lasagna",
"barnaby",
"catastrophic",
"winchester",
"woah",
"be-",
"evacuated",
"serge",
"relic",
"describes",
"merger",
"muck",
"buds",
"socially",
"enlisted",
"deposits",
"philippines",
"archbishop",
"smuggle",
"climax",
"progressive",
"wheezing",
"singles",
"pillar",
"fearful",
"spade",
"saucer",
"ratio",
"craving",
"addicts",
"clifford",
"orson",
"nashville",
"beauties",
"obscure",
"signora",
"troublesome",
"rubble",
"largely",
"fong",
"irina",
"ahn",
"admiration",
"lizzy",
"flap",
"brutality",
"mariana",
"prevail",
"tattooed",
"brats",
"anguish",
"whim",
"nada",
"leaks",
"cum",
"cupid",
"hana",
"colonial",
"frau",
"speeds",
"'uld",
"perpetrator",
"watermelon",
"emptied",
"sponsored",
"rooftop",
"athena",
"managers",
"extraction",
"stat",
"rhino",
"diarrhea",
"disregard",
"warmed",
"goggles",
"colombia",
"struggles",
"fitness",
"scalp",
"veterans",
"suitcases",
"kensi",
"watchman",
"caress",
"devlin",
"exclusively",
"disciple",
"misty",
"aura",
"helm",
"crockett",
"inhale",
"buckets",
"adored",
"shillings",
"kari",
"tents",
"revoir",
"hedge",
"e.r.",
"snorting",
"viper",
"digger",
"breaker",
"meatballs",
"suppress",
"witty",
"wonderfully",
"shogun",
"encouragement",
"intercepted",
"goddamned",
"posting",
"cleanse",
"f.b.i.",
"no-",
"competitor",
"abbot",
"trenches",
"sizzling",
"lyon",
"mustang",
"stewie",
"kg",
"rested",
"reflected",
"stand-up",
"habitat",
"molecular",
"obnoxious",
"24-hour",
"wraith",
"sarcasm",
"tung",
"renowned",
"tofu",
"wildly",
"blondie",
"refund",
"goon",
"pulp",
"exaggerated",
"shad0",
"embedded",
"chateau",
"harlan",
"provoked",
"antony",
"guido",
"nile",
"alfie",
"cages",
"skates",
"resemble",
"sticker",
"lennox",
"unstoppable",
"veal",
"fe",
"isle",
"characteristics",
"earthquakes",
"cold-blooded",
"ambushed",
"zap",
"reaper",
"deployed",
"biblical",
"puck",
"mystic",
"aya",
"gaby",
"intends",
"muffins",
"speedy",
"'course",
"republicans",
"bolts",
"enrique",
"hoax",
"raided",
"intersection",
"presently",
"supplier",
"shuffle",
"edited",
"rubbed",
"vegan",
"outdoors",
"distinctive",
"upcoming",
"brighton",
"wagons",
"granger",
"peeing",
"oracle",
"glitter",
"amulet",
"suspend",
"guild",
"convertible",
"squared",
"battling",
"steward",
"weston",
"gateway",
"rites",
"distributed",
"pavement",
"projector",
"rugby",
"jt",
"fragment",
"baldwin",
"obscene",
"zebra",
"raping",
"analyst",
"eta",
"elbows",
"detonator",
"disneyland",
"rails",
"earthly",
"modesty",
"assess",
"vinci",
"librarian",
"do-",
"moran",
"suffice",
"gravitational",
"crowley",
"slumber",
"homie",
"bribes",
"aces",
"sodium",
"hubert",
"trajectory",
"credible",
"declined",
"canary",
"eyesight",
"jug",
"duplicate",
"developments",
"capitalism",
"polished",
"defenses",
"xi",
"crates",
"fences",
"differ",
"hodgins",
"enzo",
"chaotic",
"gardening",
"gram",
"tinker",
"forecast",
"ti",
"pinched",
"tangled",
"macy",
"plunge",
"bulletproof",
"module",
"firewood",
"pizzas",
"overly",
"retail",
"novak",
"grover",
"arturo",
"appalling",
"rows",
"felicia",
"concentrating",
"transferring",
"mash",
"growl",
"sliced",
"rewards",
"braces",
"livestock",
"users",
"recruiting",
"gunman",
"dictate",
"bathrooms",
"nobles",
"picky",
"linus",
"platter",
"vultures",
"organism",
"sweeter",
"scarecrow",
"manufacturing",
"pup",
"mitzvah",
"crackles",
"proudly",
"interpreter",
"leigh",
"genetically",
"frying",
"redeem",
"cara",
"ledge",
"qing",
"tyranny",
"charleston",
"cutest",
"skins",
"rosy",
"nomination",
"textbook",
"descendants",
"sabine",
"saunders",
"hast",
"dubois",
"finland",
"loretta",
"unsolved",
"pioneer",
"or-",
"ramirez",
"suzie",
"hugged",
"angelica",
"cesare",
"throttle",
"powered",
"carving",
"thrive",
"d.j.",
"allergy",
"icu",
"suzy",
"pong",
"squares",
"alphabet",
"threesome",
"accordingly",
"sect",
"maternity",
"scarce",
"internship",
"morty",
"tones",
"sirs",
"unwell",
"nellie",
"import",
"temporal",
"mohammed",
"anticipate",
"democrats",
"pfft",
"terminated",
"spree",
"ami",
"interviewer",
"respectfully",
"futile",
"dictator",
"stuffy",
"wai",
"mammals",
"fasten",
"neptune",
"giggle",
"grandchild",
"pharmaceutical",
"persian",
"tactic",
"itching",
"observer",
"tesla",
"wallpaper",
"thorne",
"jiang",
"austrian",
"output",
"'malley",
"goliath",
"trophies",
"joyful",
"spouse",
"giorgio",
"rightly",
"hobbies",
"'there",
"vito",
"naming",
"yates",
"stale",
"teamwork",
"obedient",
"switches",
"rating",
"regulation",
"raving",
"dioxide",
"boner",
"bc",
"implant",
"mari",
"satisfactory",
"espresso",
"bach",
"sleeper",
"construct",
"rations",
"munch",
"anjali",
"coordinate",
"stag",
"dominique",
"backseat",
"chrissy",
"analyzed",
"reschedule",
"uphold",
"sorority",
"bono",
"princeton",
"inconvenient",
"crumbs",
"reconstruction",
"pipeline",
"reunited",
"confidentiality",
"embraced",
"overruled",
"enlightenment",
"puddle",
"mortals",
"comfortably",
"vlad",
"misha",
"ukraine",
"swam",
"annette",
"demonic",
"sora",
"demise",
"donatello",
"fungus",
"barefoot",
"tacos",
"skeletons",
"beak",
"fanfare",
"maryland",
"felipe",
"picard",
"mascot",
"lemonis",
"drastic",
"proceeding",
"presidents",
"intrude",
"stiles",
"sums",
"deprived",
"advertisement",
"farting",
"straightened",
"foremost",
"adjustment",
"charade",
| |
"""Download and install structured genome data and aligner index files.
Downloads prepared FASTA, indexes for aligners like BWA, Bowtie and novoalign
and other genome data in automated pipelines. Specify the genomes and aligners
to use in an input biodata.yaml configuration file.
The main targets are fabric functions:
- install_data -- Install biological data from scratch, including indexing genomes.
- install_data_s3 -- Install biological data, downloading pre-computed indexes from S3.
- upload_s3 -- Upload created indexes to biodata S3 bucket.
"""
import os
import operator
import socket
import subprocess
from contextlib import contextmanager
from fabric.api import *
from fabric.contrib.files import *
from fabric.context_managers import path
try:
import yaml
except ImportError:
yaml = None
try:
import boto
except ImportError:
boto = None
from cloudbio.biodata import galaxy
from cloudbio.biodata.dbsnp import download_dbsnp
from cloudbio.biodata.rnaseq import download_transcripts
# -- Configuration for genomes to download and prepare
class _DownloadHelper:
def __init__(self):
self.config = {}
def ucsc_name(self):
return None
def _exists(self, fname, seq_dir):
"""Check if a file exists in either download or final destination.
"""
return exists(fname) or exists(os.path.join(seq_dir, fname))
class UCSCGenome(_DownloadHelper):
def __init__(self, genome_name, dl_name=None):
_DownloadHelper.__init__(self)
self.data_source = "UCSC"
self._name = genome_name
self.dl_name = dl_name if dl_name is not None else genome_name
self._url = "ftp://hgdownload.cse.ucsc.edu/goldenPath/%s/bigZips" % \
genome_name
def ucsc_name(self):
return self._name
def download(self, seq_dir):
zipped_file = None
genome_file = "%s.fa" % self._name
if not self._exists(genome_file, seq_dir):
zipped_file = self._download_zip(seq_dir)
if zipped_file.endswith(".tar.gz"):
run("tar -xzpf %s" % zipped_file)
elif zipped_file.endswith(".zip"):
run("unzip %s" % zipped_file)
elif zipped_file.endswith(".gz"):
run("gunzip -c %s > out.fa" % zipped_file)
else:
raise ValueError("Do not know how to handle: %s" % zipped_file)
tmp_file = genome_file.replace(".fa", ".txt")
with settings(warn_only=True):
result = run("ls *.fa")
# some UCSC downloads have the files in multiple directories
# mv them to the parent directory and delete the child directories
#ignore_random = " -a \! -name '*_random.fa' -a \! -name 'chrUn*'" \
# "-a \! -name '*hap*.fa'"
ignore_random = ""
if result.failed:
run("find . -name '*.fa'%s -exec mv {} . \;" % ignore_random)
run("find . -type d -a \! -name '\.' | xargs rm -rf")
result = run("find . -name '*.fa'%s" % ignore_random)
result = [x.strip() for x in result.split("\n")]
result.sort()
run("cat %s > %s" % (" ".join(result), tmp_file))
run("rm -f *.fa")
run("mv %s %s" % (tmp_file, genome_file))
return genome_file, [zipped_file]
def _download_zip(self, seq_dir):
for zipped_file in ["chromFa.tar.gz", "%s.fa.gz" % self._name,
"chromFa.zip"]:
if not self._exists(zipped_file, seq_dir):
with settings(warn_only=True):
result = run("wget %s/%s" % (self._url, zipped_file))
if not result.failed:
break
else:
break
return zipped_file
class NCBIRest(_DownloadHelper):
"""Retrieve files using the TogoWS REST server pointed at NCBI.
"""
def __init__(self, name, refs, dl_name=None):
_DownloadHelper.__init__(self)
self.data_source = "NCBI"
self._name = name
self._refs = refs
self.dl_name = dl_name if dl_name is not None else name
self._base_url = "http://togows.dbcls.jp/entry/ncbi-nucleotide/%s.fasta"
def download(self, seq_dir):
genome_file = "%s.fa" % self._name
if not self._exists(genome_file, seq_dir):
for ref in self._refs:
run("wget %s" % (self._base_url % ref))
run("ls -l")
sed('%s.fasta' % ref, '^>.*$', '>%s' % ref, '1')
tmp_file = genome_file.replace(".fa", ".txt")
run("cat *.fasta > %s" % tmp_file)
run("rm -f *.fasta")
run("rm -f *.bak")
run("mv %s %s" % (tmp_file, genome_file))
return genome_file, []
class EnsemblGenome(_DownloadHelper):
"""Retrieve genome FASTA files from Ensembl.
ftp://ftp.ensemblgenomes.org/pub/plants/release-3/fasta/
arabidopsis_thaliana/dna/Arabidopsis_thaliana.TAIR9.55.dna.toplevel.fa.gz
ftp://ftp.ensembl.org/pub/release-56/fasta/
caenorhabditis_elegans/dna/Caenorhabditis_elegans.WS200.56.dna.toplevel.fa.gz
"""
def __init__(self, ensembl_section, release_number, release2, organism,
name, convert_to_ucsc=False, dl_name = None):
_DownloadHelper.__init__(self)
self.data_source = "Ensembl"
if ensembl_section == "standard":
url = "ftp://ftp.ensembl.org/pub/"
else:
url = "ftp://ftp.ensemblgenomes.org/pub/%s/" % ensembl_section
url += "release-%s/fasta/%s/dna/" % (release_number, organism.lower())
self._url = url
release2 = ".%s" % release2 if release2 else ""
self._get_file = "%s.%s%s.dna.toplevel.fa.gz" % (organism, name,
release2)
self._name = name
self.dl_name = dl_name if dl_name is not None else name
self._convert_to_ucsc = convert_to_ucsc
def download(self, seq_dir):
genome_file = "%s.fa" % self._name
if not self._exists(self._get_file, seq_dir):
run("wget %s%s" % (self._url, self._get_file))
if not self._exists(genome_file, seq_dir):
run("gunzip -c %s > %s" % (self._get_file, genome_file))
if self._convert_to_ucsc:
#run("sed s/ / /g %s" % genome_file)
raise NotImplementedError("Replace with chr")
return genome_file, [self._get_file]
class BroadGenome(_DownloadHelper):
"""Retrieve genomes organized and sorted by Broad for use with GATK.
Uses the UCSC-name compatible versions of the GATK bundles.
"""
def __init__(self, name, bundle_version, target_fasta, dl_name=None):
_DownloadHelper.__init__(self)
self.data_source = "UCSC"
self._name = name
self.dl_name = dl_name if dl_name is not None else name
self._target = target_fasta
self._ftp_url = "ftp://gsapubftp-anonymous:@ftp.broadinstitute.org/bundle/" + \
"{ver}/{org}/".format(ver=bundle_version, org=self.dl_name)
def download(self, seq_dir):
org_file = "%s.fa" % self._name
if not self._exists(org_file, seq_dir):
run("wget %s%s.gz" % (self._ftp_url, self._target))
run("gunzip %s.gz" % self._target)
run("mv %s %s" % (self._target, org_file))
return org_file, []
BROAD_BUNDLE_VERSION = "2.3"
DBSNP_VERSION = "137"
GENOMES_SUPPORTED = [
("phiX174", "phix", NCBIRest("phix", ["NC_001422.1"])),
("Scerevisiae", "sacCer2", UCSCGenome("sacCer2")),
("Mmusculus", "mm10", UCSCGenome("mm10")),
("Mmusculus", "mm9", UCSCGenome("mm9")),
("Mmusculus", "mm8", UCSCGenome("mm8")),
("Hsapiens", "hg18", BroadGenome("hg18", BROAD_BUNDLE_VERSION,
"Homo_sapiens_assembly18.fasta")),
("Hsapiens", "hg19", BroadGenome("hg19", BROAD_BUNDLE_VERSION,
"ucsc.hg19.fasta")),
("Hsapiens", "GRCh37", BroadGenome("GRCh37", BROAD_BUNDLE_VERSION,
"human_g1k_v37.fasta", "b37")),
("Rnorvegicus", "rn5", UCSCGenome("rn5")),
("Rnorvegicus", "rn4", UCSCGenome("rn4")),
("Xtropicalis", "xenTro2", UCSCGenome("xenTro2")),
("Athaliana", "araTha_tair9", EnsemblGenome("plants", "6", "",
"Arabidopsis_thaliana", "TAIR9")),
("Dmelanogaster", "dm3", UCSCGenome("dm3")),
("Celegans", "WS210", EnsemblGenome("standard", "60", "60",
"Caenorhabditis_elegans", "WS210")),
("Mtuberculosis_H37Rv", "mycoTube_H37RV", NCBIRest("mycoTube_H37RV",
["NC_000962"])),
("Msmegmatis", "92", NCBIRest("92", ["NC_008596.1"])),
("Paeruginosa_UCBPP-PA14", "386", NCBIRest("386", ["CP000438.1"])),
("Ecoli", "eschColi_K12", NCBIRest("eschColi_K12", ["U00096.2"])),
("Amellifera_Honeybee", "apiMel3", UCSCGenome("apiMel3")),
("Cfamiliaris_Dog", "canFam3", UCSCGenome("canFam3")),
("Cfamiliaris_Dog", "canFam2", UCSCGenome("canFam2")),
("Drerio_Zebrafish", "danRer6", UCSCGenome("danRer6")),
("Ecaballus_Horse", "equCab2", UCSCGenome("equCab2")),
("Fcatus_Cat", "felCat3", UCSCGenome("felCat3")),
("Ggallus_Chicken", "galGal3", UCSCGenome("galGal3")),
("Tguttata_Zebra_finch", "taeGut1", UCSCGenome("taeGut1")),
]
GENOME_INDEXES_SUPPORTED = ["bowtie", "bowtie2", "bwa", "maq", "novoalign", "novoalign-cs",
"ucsc", "mosaik", "eland", "bfast", "arachne"]
DEFAULT_GENOME_INDEXES = ["ucsc", "seq"]
# -- Fabric instructions
def _check_version():
version = env.version
if int(version.split(".")[0]) < 1:
raise NotImplementedError("Please install fabric version 1 or better")
def install_data(config_source):
"""Main entry point for installing useful biological data.
"""
_check_version()
# Append a potentially custom system install path to PATH so tools are found
with path(os.path.join(env.system_install, 'bin')):
genomes, genome_indexes, config = _get_genomes(config_source)
genome_indexes += [x for x in DEFAULT_GENOME_INDEXES if x not in genome_indexes]
_data_ngs_genomes(genomes, genome_indexes)
_install_additional_data(genomes, genome_indexes, config)
def install_data_s3(config_source):
"""Install data using pre-existing genomes present on Amazon s3.
"""
_check_version()
genomes, genome_indexes, config = _get_genomes(config_source)
genome_indexes += [x for x in DEFAULT_GENOME_INDEXES if x not in genome_indexes]
_download_genomes(genomes, genome_indexes)
_install_additional_data(genomes, genome_indexes, config)
def install_data_rsync(config_source):
"""Install data using pre-existing genomes from Galaxy rsync servers.
"""
_check_version()
genomes, genome_indexes, config = _get_genomes(config_source)
genome_indexes += [x for x in DEFAULT_GENOME_INDEXES if x not in genome_indexes]
genome_dir = _make_genome_dir()
galaxy.rsync_genomes(genome_dir, genomes, genome_indexes)
def upload_s3(config_source):
"""Upload prepared genome files by identifier to Amazon s3 buckets.
"""
if boto is None:
raise ImportError("install boto to upload to Amazon s3")
if env.host != "localhost" and not env.host.startswith(socket.gethostname()):
raise ValueError("Need to run S3 upload on a local machine")
_check_version()
genomes, genome_indexes, config = _get_genomes(config_source)
genome_indexes += [x for x in DEFAULT_GENOME_INDEXES if x not in genome_indexes]
_data_ngs_genomes(genomes, genome_indexes)
_upload_genomes(genomes, genome_indexes)
def _install_additional_data(genomes, genome_indexes, config):
download_dbsnp(genomes, BROAD_BUNDLE_VERSION, DBSNP_VERSION)
download_transcripts(genomes, env)
for custom in config.get("custom", []):
_prep_custom_genome(custom, genomes, genome_indexes, env)
if config.get("install_liftover", False):
lift_over_genomes = [g.ucsc_name() for (_, _, g) in genomes if g.ucsc_name()]
_data_liftover(lift_over_genomes)
if config.get("install_uniref", False):
_data_uniref()
def _get_genomes(config_source):
if isinstance(config_source, dict):
config = config_source
else:
if yaml is None:
raise ImportError("install yaml to read configuration from %s" % config_source)
with open(config_source) as in_handle:
config = yaml.load(in_handle)
genomes = []
genomes_config = config["genomes"] or []
env.logger.info("List of genomes to get (from the config file at '{0}'): {1}"\
.format(config_source, ', '.join(g.get('name', g["dbkey"]) for g in genomes_config)))
for g in genomes_config:
ginfo = None
for info in GENOMES_SUPPORTED:
if info[1] == g["dbkey"]:
ginfo = info
break
assert ginfo is not None, "Did not find download info for %s" % g["dbkey"]
name, gid, manager = ginfo
manager.config = g
genomes.append((name, gid, manager))
indexes = config["genome_indexes"] or []
return genomes, indexes, config
# == Decorators and context managers
def _if_installed(pname):
"""Run if the given program name is installed.
"""
def argcatcher(func):
def decorator(*args, **kwargs):
with settings(
hide('warnings', 'running', 'stdout', 'stderr'),
warn_only=True):
result = run(pname)
if result.return_code not in [127]:
return func(*args, **kwargs)
return decorator
return argcatcher
@contextmanager
def _make_tmp_dir():
work_dir = os.path.join(env.data_files, "tmp")
if not exists(work_dir):
run("mkdir %s" % work_dir)
yield work_dir
if exists(work_dir):
run("rm -rf %s" % work_dir)
# ## Genomes index for next-gen sequencing tools
def _make_genome_dir():
genome_dir = os.path.join(env.data_files, "genomes")
with settings(warn_only=True):
result = run("mkdir -p %s" % genome_dir)
if result.failed:
sudo("mkdir -p %s" % genome_dir)
sudo("chown -R %s %s" % (env.user, genome_dir))
return genome_dir
def _data_ngs_genomes(genomes, genome_indexes):
"""Download and create index files for next generation genomes.
"""
genome_dir = _make_genome_dir()
for organism, genome, manager in genomes:
cur_dir = os.path.join(genome_dir, organism, genome)
| |
<gh_stars>0
# Copyright (C) 2015 UCSC Computational Genomics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from collections import defaultdict
import os
import time
import pickle
from Queue import Queue
import logging
import sys
import mesos.interface
import mesos.native
from mesos.interface import mesos_pb2
from toil.batchSystems.abstractBatchSystem import AbstractBatchSystem
from toil.batchSystems.mesos import ToilJob, ResourceRequirement, TaskData
log = logging.getLogger(__name__)
class MesosBatchSystem(AbstractBatchSystem, mesos.interface.Scheduler):
"""
A toil batch system implementation that uses Apache Mesos to distribute toil jobs as Mesos tasks over a
cluster of slave nodes. A Mesos framework consists of a scheduler and an executor. This class acts as the
scheduler and is typically run on the master node that also runs the Mesos master process with which the
scheduler communicates via a driver component. The executor is implemented in a separate class. It is run on each
slave node and communicates with the Mesos slave process via another driver object. The scheduler may also be run
on a separate node from the master, which we then call somewhat ambiguously the driver node.
"""
@staticmethod
def supportsHotDeployment():
return True
def __init__(self, config, maxCores, maxMemory, maxDisk, masterIP,
userScript=None, toilDistribution=None):
AbstractBatchSystem.__init__(self, config, maxCores, maxMemory, maxDisk)
# The hot-deployed resources representing the user script and the toil distribution
# respectively. Will be passed along in every Mesos task. See
# toil.common.HotDeployedResource for details.
self.userScript = userScript
self.toilDistribution = toilDistribution
# Written to when mesos kills tasks, as directed by toil
self.killedSet = set()
# Dictionary of queues, which toil assigns jobs to. Each queue represents a job type,
# defined by resource usage
self.jobQueueList = defaultdict(list)
# IP of mesos master. specified in MesosBatchSystem, currently loopback
self.masterIP = masterIP
# queue of jobs to kill, by jobID.
self.killSet = set()
# Dict of launched jobIDs to TaskData named tuple. Contains start time, executorID, and slaveID.
self.runningJobMap = {}
# Queue of jobs whose status has been updated, according to mesos. Req'd by toil
self.updatedJobsQueue = Queue()
# Wether to use implicit/explicit acknowledgments
self.implicitAcknowledgements = self.getImplicit()
# Reference to the Mesos driver used by this scheduler, to be instantiated in run()
self.driver = None
# FIXME: This comment makes no sense to me
# Returns Mesos executor object, which is merged into Mesos tasks as they are built
self.executor = self.buildExecutor()
self.nextJobID = 0
self.lastReconciliation = time.time()
self.reconciliationPeriod = 120
# Start the driver
self._startDriver()
def issueBatchJob(self, command, memory, cores, disk):
"""
Issues the following command returning a unique jobID. Command is the string to run, memory is an int giving
the number of bytes the job needs to run in and cores is the number of cpus needed for the job and error-file
is the path of the file to place any std-err/std-out in.
"""
# puts job into job_type_queue to be run by Mesos, AND puts jobID in current_job[]
self.checkResourceRequest(memory, cores, disk)
jobID = self.nextJobID
self.nextJobID += 1
job = ToilJob(jobID=jobID,
resources=ResourceRequirement(memory=memory, cores=cores, disk=disk),
command=command,
userScript=self.userScript,
toilDistribution=self.toilDistribution)
job_type = job.resources
log.debug("Queueing the job command: %s with job id: %s ..." % (command, str(jobID)))
self.jobQueueList[job_type].append(job)
log.debug("... queued")
return jobID
def killBatchJobs(self, jobIDs):
"""
Kills the given job IDs.
"""
localSet = set()
if self.driver is None:
raise RuntimeError("There is no scheduler driver")
for jobID in jobIDs:
log.debug("passing tasks to kill to Mesos driver")
self.killSet.add(jobID)
localSet.add(jobID)
if jobID not in self.getIssuedBatchJobIDs():
self.killSet.remove(jobID)
localSet.remove(jobID)
log.debug("Job %s already finished", jobID)
else:
taskId = mesos_pb2.TaskID()
taskId.value = str(jobID)
self.driver.killTask(taskId)
while localSet:
log.debug("in while loop")
intersection = localSet.intersection(self.killedSet)
localSet -= intersection
self.killedSet -= intersection
if not intersection:
log.debug("sleeping in the while")
time.sleep(1)
def getIssuedBatchJobIDs(self):
"""
A list of jobs (as jobIDs) currently issued (may be running, or maybe just waiting).
"""
# TODO: Ensure jobList holds jobs that have been "launched" from Mesos
jobList = []
for k, queue in self.jobQueueList.iteritems():
for item in queue:
jobList.append(item.jobID)
for k, v in self.runningJobMap.iteritems():
jobList.append(k)
return jobList
def getRunningBatchJobIDs(self):
"""
Gets a map of jobs (as jobIDs) currently running (not just waiting) and a how long they have been running for
(in seconds).
"""
currentTime = dict()
for jobID, data in self.runningJobMap.iteritems():
currentTime[jobID] = time.time() - data.startTime
return currentTime
def getUpdatedBatchJob(self, maxWait):
"""
Gets a job that has updated its status, according to the job manager. Max wait gives the number of seconds to
pause waiting for a result. If a result is available returns (jobID, exitValue) else it returns None.
"""
i = self.getFromQueueSafely(self.updatedJobsQueue, maxWait)
if i is None:
return None
jobID, retcode = i
self.updatedJobsQueue.task_done()
log.debug("Job updated with code {}".format(retcode))
return i
def getWaitDuration(self):
"""
Gets the period of time to wait (floating point, in seconds) between checking for
missing/overlong jobs.
"""
return self.reconciliationPeriod
@classmethod
def getRescueBatchJobFrequency(cls):
"""
Parasol leaks jobs, but rescuing jobs involves calls to parasol list jobs and pstat2,
making it expensive. We allow this every 10 minutes..
"""
return 1800 # Half an hour
def buildExecutor(self):
"""
Creates and returns an ExecutorInfo instance representing our executor implementation.
"""
def scriptPath(executorClass):
path = sys.modules[executorClass.__module__].__file__
if path.endswith('.pyc'):
path = path[:-1]
return path
executorInfo = mesos_pb2.ExecutorInfo()
# The executor program is installed as a setuptools entry point by setup.py
executorInfo.command.value = "toil-mesos-executor"
executorInfo.executor_id.value = "toilExecutor"
executorInfo.name = "Test Executor (Python)"
executorInfo.source = "python_test"
return executorInfo
def getImplicit(self):
"""
Determine whether to run with implicit or explicit acknowledgements.
"""
implicitAcknowledgements = 1
if os.getenv("MESOS_EXPLICIT_ACKNOWLEDGEMENTS"):
log.debug("Enabling explicit status update acknowledgements")
implicitAcknowledgements = 0
return implicitAcknowledgements
def _startDriver(self):
"""
The Mesos driver thread which handles the scheduler's communication with the Mesos master
"""
framework = mesos_pb2.FrameworkInfo()
framework.user = "" # Have Mesos fill in the current user.
framework.name = "toil"
if os.getenv("MESOS_CHECKPOINT"):
log.debug("Enabling checkpoint for the framework")
framework.checkpoint = True
if os.getenv("MESOS_AUTHENTICATE"):
raise NotImplementedError("Authentication is currently not supported")
else:
framework.principal = framework.name
self.driver = mesos.native.MesosSchedulerDriver(self, framework, self.masterIP,
self.implicitAcknowledgements)
assert self.driver.start() == mesos_pb2.DRIVER_RUNNING
def shutdown(self):
log.info("Stopping Mesos driver")
self.driver.stop()
log.info("Joining Mesos driver")
driver_result = self.driver.join()
log.info("Joined Mesos driver")
if driver_result != mesos_pb2.DRIVER_STOPPED:
raise RuntimeError("Mesos driver failed with %i", driver_result)
def registered(self, driver, frameworkId, masterInfo):
"""
Invoked when the scheduler successfully registers with a Mesos master
"""
log.debug("Registered with framework ID %s" % frameworkId.value)
def _sortJobsByResourceReq(self):
job_types = list(self.jobQueueList.keys())
# sorts from largest to smallest core usage
# TODO: add a size() method to ResourceSummary and use it as the key. Ask me why.
job_types.sort(key=lambda resourceRequirement: ResourceRequirement.cores)
job_types.reverse()
return job_types
def _declineAllOffers(self, driver, offers):
for offer in offers:
log.debug("No jobs to assign. Rejecting offer".format(offer.id.value))
driver.declineOffer(offer.id)
def _determineOfferResources(self, offer):
offerCores = 0
offerMem = 0
offerStor = 0
for resource in offer.resources:
if resource.name == "cpus":
offerCores += resource.scalar.value
elif resource.name == "mem":
offerMem += resource.scalar.value
elif resource.name == "disk":
offerStor += resource.scalar.value
return offerCores, offerMem, offerStor
def _prepareToRun(self, job_type, offer, index):
jt_job = self.jobQueueList[job_type][index] # get the first element to insure FIFO
task = self._createTask(jt_job, offer)
return task
def _deleteByJobID(self, jobID, ):
# FIXME: not efficient, I'm sure.
for key, jobType in self.jobQueueList.iteritems():
for job in jobType:
if jobID == job.jobID:
jobType.remove(job)
def _updateStateToRunning(self, offer, task):
self.runningJobMap[int(task.task_id.value)] = TaskData(startTime=time.time(),
slaveID=offer.slave_id,
executorID=task.executor.executor_id)
self._deleteByJobID(int(task.task_id.value))
def resourceOffers(self, driver, offers):
"""
Invoked when resources have been offered to this framework.
"""
job_types = self._sortJobsByResourceReq()
if len(job_types) == 0 or (len(self.getIssuedBatchJobIDs()) - len(self.getRunningBatchJobIDs()) == 0):
log.debug("Declining offers")
# If there are no jobs, we can get stuck with no jobs and no new offers until we decline it.
self._declineAllOffers(driver, offers)
return
# Right now, gives priority to largest jobs
for offer in offers:
tasks = []
# TODO: In an offer, can there | |
data
x = test_set_x[index * batch_size: (index + 1) * batch_size]
y_kpt_norm = test_set_y['kpt_norm'][index * batch_size: (index + 1) * batch_size]
y_kpt_ocular_dist = test_set_y['ocular_dist'][index * batch_size: (index + 1) * batch_size]
y_bound_mask = test_set_y['bound_mask'][index * batch_size: (index + 1) * batch_size]
y_mask_border = test_set_y['mask_border'][index * batch_size: (index + 1) * batch_size]
y_border_pixel = test_set_y['border_pixel'][index * batch_size: (index + 1) * batch_size]
# at test time we consider the one-hot output of
# the RCN and pass it to the denoising model
if self.tcdcn_cfNet:
one_hot_maps_4D = get_one_hot_predictions(self.tcdcn_cfNet, x, self.dim)
# in this case, we are evaluating on the cfNet model's output, so consider all points.
mask_kpts = np.ones_like(y_kpt_norm)
else:
source_points = get_source_points(self.tcdcn_cfNet, x, y_kpt_norm, self.num_model_kpts)
# getting the one-hot matrices of the kpt_locations + adding noise to it and
# getting mask for the noised locations
# one_hot_maps_4D is of shape (#batch, #kpts, #dim, #dim)
# mask is of shape (#batch, #kpts)
one_hot_maps_4D, y_mask_jittered = get_and_noise_one_hot_maps(source_points, self.dim,
self.nMaps_shuffled,
self.rng, self.dropout_kpts)
# in this case we are dealing with the true keypoints, so only consider the jittered ones
# It gets the masks where each one value indicates
# a jittered kpt that is not also on the border.
mask_kpts = y_mask_border * y_mask_jittered
# Note: the above term gives error on the true keypoint distribution.
# if you want to get erro on distance between predicted and sometimes cropped true-values, then use
#mask_kpts = y_mask_jittered
#y_mask_kpts is of shape (#batch * #kpts)
y_mask_kpts = np.ndarray.flatten(mask_kpts)
test_num_samples += np.sum(y_mask_kpts)
cost_kpt, L2_cost, error_kpt = self.tcdcn.test_300W(self.L2_coef, one_hot_maps_4D,
y_kpt_ocular_dist, y_kpt_norm,
y_bound_mask, y_mask_kpts,
y_border_pixel, dropout=0)
# accumulating the values of the mini-batcjes
epoch_cost_kpt.append(cost_kpt)
epoch_error_kpt.append(error_kpt)
# getting the average of the whole epoch
avg_epoch_cost_kpt = np.mean(epoch_cost_kpt)
# getting the average of each keypoint over all of the samples
#epoch_error_kpt = np.sum(np.array(epoch_error_kpt), axis=0)
avg_epoch_error_kpt_avg = np.sum(epoch_error_kpt)/test_num_samples
# appending epoch results
error_dict['cost_kpt'].append(avg_epoch_cost_kpt)
error_dict['error_kpt_avg'].append(avg_epoch_error_kpt_avg)
if (epoch+1) >= sw_lenght:
mean_cost_kpt = np.mean(error_dict['cost_kpt'][-sw_lenght:])
error_dict['cost_kpt_sliding'].append(mean_cost_kpt)
mean_error_kpt_avg = np.mean(error_dict['error_kpt_avg'][-sw_lenght:])
error_dict['error_kpt_avg_sliding'].append(mean_error_kpt_avg)
if mean_cost_kpt < error_dict['min_cost_kpt_sliding']:
error_dict['min_cost_kpt_sliding'] = mean_cost_kpt
if mean_error_kpt_avg < error_dict['min_error_kpt_avg_sliding']:
error_dict['min_error_kpt_avg_sliding'] = mean_error_kpt_avg
return avg_epoch_error_kpt_avg
def train(self):
# setting the mask for the tasks
params_pickle_base = 'shared_conv_params'
self.num_kpts_MTFL = 5
self.num_kpts_300W = 68
tcdcn = self.tcdcn
num_epochs = self.num_epochs
batch_size = self.batch_size
L2_coef = self.L2_coef
file_suffix = self.file_suffix
mask_MTFL = self.mask_MTFL
mask_300W = self.mask_300W
Train, Valid, Test = self.sets
if mask_MTFL:
self.train_set_x_MTFL, self.train_set_y_MTFL = Train['MTFL']
valid_set_x_MTFL, valid_set_y_MTFL = Valid['MTFL']
elif mask_300W:
self.train_set_x_300W, self.train_set_y_300W = Train['300W']
valid_set_x_300W, valid_set_y_300W = Valid['300W']
else:
raise ValueError('Neither mask_MTFL=%s nor mask_300W=%s is True' %(mask_MTFL, mask_300W))
########################
# training the convnet #
########################
sys.stderr.write("training starts ...\n")
start_time = time.time() # start time for training
period_start_time = time.time() # start time for the saving model
save_tresh_mins = 360. # setting the time threshold for saving the model params to four hours
epoch_100_start_time = time.time() # start time for training
# since the datasets are trained simultaneously, The results are kept in one OrderedDict
Train_error = OrderedDict()
Train_error['cost'] = [] # total cost for training (keypoint cost + L2 coef + other if applicable)
Train_error['cost_kpt'] = [] # keypoint cost for training
Train_error['cost_kpt_sliding'] = [] # keypoint cost for training, measured by taking a sliding window of train_total_cost_kpt
Train_error['cost_gl'] = []
Train_error['cost_gen'] = []
Train_error['cost_sm'] = []
Train_error['cost_pose'] = []
Train_error['error_test'] = [] # total error for the auxiliary tasks
Train_error['error_kpt'] = [] # individual error for each keypoint location (the test time evaluation metric)
Train_error['error_kpt_avg'] = [] # average of the error for all keypoints
Train_error['error_kpt_avg_sliding'] = [] # average of the error for all keypoints, measured by taking a sliding windows of train_total_error_kpt_avg
Train_error['L2_norm'] = []
Train_error['min_cost_kpt_sliding'] = np.inf
Train_error['min_error_kpt_avg_sliding'] = np.inf
# batch data
Train_error['cost_batch'] = []
Train_error['cost_kpt_batch'] = []
Train_error['L2_norm_batch'] = []
Train_error['cost_gl_batch'] = []
Train_error['cost_gen_batch'] = []
Train_error['cost_sm_batch'] = []
Train_error['cost_pose_batch'] = []
# the results are kept separately for each valid set
Valid_error = OrderedDict()
for subset in Valid.keys():
setx, sety = Valid[subset]
subset_dict = OrderedDict()
subset_dict['num_batches'] = int(np.ceil(setx.shape[0]/float(batch_size)))
subset_dict['num_samples'] = setx.shape[0]
subset_dict['cost'] = [] # total cost for training (keypoint cost + L2 coef + other if applicable)
subset_dict['cost_kpt'] = [] # keypoint cost for training
subset_dict['cost_kpt_sliding'] = [] # keypoint cost for training, measured by taking a sliding window of train_total_cost_kpt
subset_dict['cost_gl'] = []
subset_dict['cost_gen'] = []
subset_dict['cost_sm'] = []
subset_dict['cost_pose'] = []
subset_dict['error_test'] = [] # total error for the auxiliary tasks
subset_dict['error_kpt'] = [] # individual error for each keypoint location (the test time evaluation metric)
subset_dict['error_kpt_avg'] = [] # average of the error for all keypoints
subset_dict['error_kpt_avg_sliding'] = [] # average of the error for all keypoints, measured by taking a sliding windows of train_total_error_kpt_avg
subset_dict['L2_norm'] = []
subset_dict['min_cost_kpt_sliding'] = np.inf
subset_dict['min_error_kpt_avg_sliding'] = np.inf
subset_dict['min_cost_kpt'] = np.inf
subset_dict['min_cost_kpt_epoch'] = -1
subset_dict['best_params'] = [] # the parameters of the best model for this validation set
Valid_error[subset] = subset_dict
###########################
# getting the mask_border #
###########################
kpt_norm = sety['kpt_norm']
bound_mask = sety['bound_mask']
mask_border = get_bound_mask(bound_mask)
mask_border = mask_padded_kpts(kpt_norm, mask_border)
sety['mask_border'] = mask_border
#########################
# getting border_pixels #
#########################
pad_ratio = sety['pad_ratio']
border_pixel = padRatio_to_pixels(pad_ratio, setx.shape[1])
sety['border_pixel'] = border_pixel
##############################
# making the values discrete #
##############################
# getting kpts in the range of [0, dim**2]
kpt_discret = discretise_y(kpt_norm, self.dim)
sety['kpt_norm'] = kpt_discret
Valid[subset] = (setx, sety)
Test_error = OrderedDict()
for dset in Test.keys():
dset_dict = OrderedDict()
for subset in Test[dset].keys():
setx, sety = Test[dset][subset]
subset_dict = OrderedDict()
subset_dict['num_batches'] = int(np.ceil(setx.shape[0]/float(batch_size)))
subset_dict['num_samples'] = setx.shape[0]
subset_dict['cost_kpt'] = []
subset_dict['cost_kpt_sliding'] = []
subset_dict['error_kpt'] = []
subset_dict['error_kpt_avg'] = []
subset_dict['error_kpt_avg_sliding'] = []
subset_dict['min_cost_kpt_sliding'] = np.inf
subset_dict['min_error_kpt_avg_sliding'] = np.inf
dset_dict[subset] = subset_dict
###########################
# getting the mask_border #
###########################
kpt_norm = sety['kpt_norm']
bound_mask = sety['bound_mask']
mask_border = get_bound_mask(bound_mask)
mask_border = mask_padded_kpts(kpt_norm, mask_border)
sety['mask_border'] = mask_border
#########################
# getting border_pixels #
#########################
pad_ratio = sety['pad_ratio']
border_pixel = padRatio_to_pixels(pad_ratio, setx.shape[1])
sety['border_pixel'] = border_pixel
##############################
# making the values discrete #
##############################
# getting kpts in the range of [0, dim**2]
kpt_discret = discretise_y(kpt_norm, self.dim)
sety['kpt_norm'] = kpt_discret
Test[dset][subset] = (setx, sety)
Test_error[dset] = dset_dict
'''
# dumping the params before start of the model
params_pickle_name = params_pickle_base + '_epoch_0' + file_suffix + '.pickle'
tcdcn.dump_params(params_pickle_name)
# testing the dumped values by previously trained model"
params_pickle_name = params_pickle_base + '_epoch_' + str(num_epochs) + '.pickle'
tcdcn.load_params(params_pickle_name)
print ' printing the values'
params = tcdcn.get_params()
for param in params:
print "params %s" %(param,)
'''
########################
# dataset based values #
########################
# the index for minibatches while trainning
if Train.has_key('MTFL'):
setx, sety = Train['MTFL']
self.train_nbatches_MTFL = int(np.ceil(setx.shape[0]/float(batch_size)))
if Train.has_key('300W'):
setx, sety = Train['300W']
self.train_nbatches_300W = int(np.ceil(setx.shape[0]/float(batch_size)))
self.index_MTFL = 0
self.index_300W = 0
# initialing seed value
self.SEED_MTFL = self.num_queue_elem
self.SEED_300W = self.num_queue_elem
# specifying the number of updates in an epoch
if mask_MTFL and mask_300W:
per_epoch_updates = 80 #number of updates in an epoch
elif mask_MTFL:
per_epoch_updates = self.train_nbatches_MTFL
elif mask_300W:
per_epoch_updates = self.train_nbatches_300W
else:
raise Exception('none of the masks is True')
self.total_updates = per_epoch_updates * num_epochs # total number of updates in training
if mask_MTFL:
self.max_epochs_MTFL = np.ceil(self.total_updates / float(self.train_nbatches_MTFL))
if mask_300W:
self.max_epochs_300W = np.ceil(self.total_updates / float(self.train_nbatches_300W))
#################################
# going through training epochs #
#################################
# running the thread for training the model
# each iteratiob of this while loop is one iteration of epoch
sys.stderr.write("Starting the first epoch.\n")
for epoch in xrange(num_epochs):
# checking whether child processes are stil alive
if self.producers:
for pr in self.producers:
if pr.exitcode > 0:
sys.stderr.write("An error encountered in one of the child processes. exiting ...%i\n")
exit()
#sys.stderr.write("training epoch %i\n" %(epoch+1))
epoch_start_time = time.time()
epoch_cost = []
epoch_error_test = []
epoch_cost_kpt = []
epoch_error_kpt = []
epoch_cost_gl = []
epoch_cost_gen = []
epoch_cost_sm = []
epoch_cost_pose = []
epoch_lambda_gl = []
epoch_lambda_gen = []
epoch_lambda_sm = []
epoch_lambda_pose = []
epoch_l2_cost = []
# the number of seen examples in this epoch
self.samples_seen = 0
for upd in xrange(per_epoch_updates):
one_hot_maps_4D, y_kpt_norm, y_kpt_ocular_dist, y_mask_kpts = self.get_mini_batch_train_300W()
# getting | |
<filename>lib/matplotlib/table.py
"""
Place a table below the x-axis at location loc.
The table consists of a grid of cells.
The grid need not be rectangular and can have holes.
Cells are added by specifying their row and column.
For the purposes of positioning the cell at (0, 0) is
assumed to be at the top left and the cell at (max_row, max_col)
is assumed to be at bottom right.
You can add additional cells outside this range to have convenient
ways of positioning more interesting grids.
Author : <NAME> <<EMAIL>>
Copyright : 2004 <NAME> and <NAME>
License : matplotlib license
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import warnings
from . import artist
from .artist import Artist, allow_rasterization
from .patches import Rectangle
from .cbook import is_string_like
from matplotlib import docstring
from .text import Text
from .transforms import Bbox
from matplotlib.path import Path
class Cell(Rectangle):
"""
A cell is a Rectangle with some associated text.
"""
PAD = 0.1 # padding between text and rectangle
def __init__(self, xy, width, height,
edgecolor='k', facecolor='w',
fill=True,
text='',
loc=None,
fontproperties=None
):
# Call base
Rectangle.__init__(self, xy, width=width, height=height,
edgecolor=edgecolor, facecolor=facecolor)
self.set_clip_on(False)
# Create text object
if loc is None:
loc = 'right'
self._loc = loc
self._text = Text(x=xy[0], y=xy[1], text=text,
fontproperties=fontproperties)
self._text.set_clip_on(False)
def set_transform(self, trans):
Rectangle.set_transform(self, trans)
# the text does not get the transform!
def set_figure(self, fig):
Rectangle.set_figure(self, fig)
self._text.set_figure(fig)
def get_text(self):
'Return the cell Text intance'
return self._text
def set_fontsize(self, size):
self._text.set_fontsize(size)
def get_fontsize(self):
'Return the cell fontsize'
return self._text.get_fontsize()
def auto_set_font_size(self, renderer):
""" Shrink font size until text fits. """
fontsize = self.get_fontsize()
required = self.get_required_width(renderer)
while fontsize > 1 and required > self.get_width():
fontsize -= 1
self.set_fontsize(fontsize)
required = self.get_required_width(renderer)
return fontsize
@allow_rasterization
def draw(self, renderer):
if not self.get_visible():
return
# draw the rectangle
Rectangle.draw(self, renderer)
# position the text
self._set_text_position(renderer)
self._text.draw(renderer)
def _set_text_position(self, renderer):
""" Set text up so it draws in the right place.
Currently support 'left', 'center' and 'right'
"""
bbox = self.get_window_extent(renderer)
l, b, w, h = bbox.bounds
# draw in center vertically
self._text.set_verticalalignment('center')
y = b + (h / 2.0)
# now position horizontally
if self._loc == 'center':
self._text.set_horizontalalignment('center')
x = l + (w / 2.0)
elif self._loc == 'left':
self._text.set_horizontalalignment('left')
x = l + (w * self.PAD)
else:
self._text.set_horizontalalignment('right')
x = l + (w * (1.0 - self.PAD))
self._text.set_position((x, y))
def get_text_bounds(self, renderer):
""" Get text bounds in axes co-ordinates. """
bbox = self._text.get_window_extent(renderer)
bboxa = bbox.inverse_transformed(self.get_data_transform())
return bboxa.bounds
def get_required_width(self, renderer):
""" Get width required for this cell. """
l, b, w, h = self.get_text_bounds(renderer)
return w * (1.0 + (2.0 * self.PAD))
def set_text_props(self, **kwargs):
'update the text properties with kwargs'
self._text.update(kwargs)
class CustomCell(Cell):
"""
A subclass of Cell where the sides may be visibly toggled.
"""
_edges = 'BRTL'
_edge_aliases = {'open': '',
'closed': _edges, # default
'horizontal': 'BT',
'vertical': 'RL'
}
def __init__(self, *args, **kwargs):
visible_edges = kwargs.pop('visible_edges')
Cell.__init__(self, *args, **kwargs)
self.visible_edges = visible_edges
@property
def visible_edges(self):
return self._visible_edges
@visible_edges.setter
def visible_edges(self, value):
if value is None:
self._visible_edges = self._edges
elif value in self._edge_aliases:
self._visible_edges = self._edge_aliases[value]
else:
for edge in value:
if edge not in self._edges:
msg = ('Invalid edge param {0}, must only be one of'
' {1} or string of {2}.').format(
value,
", ".join(self._edge_aliases.keys()),
", ".join(self._edges),
)
raise ValueError(msg)
self._visible_edges = value
def get_path(self):
'Return a path where the edges specificed by _visible_edges are drawn'
codes = [Path.MOVETO]
for edge in self._edges:
if edge in self._visible_edges:
codes.append(Path.LINETO)
else:
codes.append(Path.MOVETO)
if Path.MOVETO not in codes[1:]: # All sides are visible
codes[-1] = Path.CLOSEPOLY
return Path(
[[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0], [0.0, 0.0]],
codes,
readonly=True
)
class Table(Artist):
"""
Create a table of cells.
Table can have (optional) row and column headers.
Each entry in the table can be either text or patches.
Column widths and row heights for the table can be specified.
Return value is a sequence of text, line and patch instances that make
up the table
"""
codes = {'best': 0,
'upper right': 1, # default
'upper left': 2,
'lower left': 3,
'lower right': 4,
'center left': 5,
'center right': 6,
'lower center': 7,
'upper center': 8,
'center': 9,
'top right': 10,
'top left': 11,
'bottom left': 12,
'bottom right': 13,
'right': 14,
'left': 15,
'top': 16,
'bottom': 17,
}
FONTSIZE = 10
AXESPAD = 0.02 # the border between the axes and table edge
def __init__(self, ax, loc=None, bbox=None, **kwargs):
Artist.__init__(self)
if is_string_like(loc) and loc not in self.codes:
warnings.warn('Unrecognized location %s. Falling back on '
'bottom; valid locations are\n%s\t' %
(loc, '\n\t'.join(six.iterkeys(self.codes))))
loc = 'bottom'
if is_string_like(loc):
loc = self.codes.get(loc, 1)
self.set_figure(ax.figure)
self._axes = ax
self._loc = loc
self._bbox = bbox
# use axes coords
self.set_transform(ax.transAxes)
self._texts = []
self._cells = {}
self._edges = None
self._autoRows = []
self._autoColumns = []
self._autoFontsize = True
self.update(kwargs)
self.set_clip_on(False)
self._cachedRenderer = None
def add_cell(self, row, col, *args, **kwargs):
""" Add a cell to the table. """
xy = (0, 0)
cell = CustomCell(xy, visible_edges=self.edges, *args, **kwargs)
cell.set_figure(self.figure)
cell.set_transform(self.get_transform())
cell.set_clip_on(False)
self._cells[(row, col)] = cell
@property
def edges(self):
return self._edges
@edges.setter
def edges(self, value):
self._edges = value
def _approx_text_height(self):
return (self.FONTSIZE / 72.0 * self.figure.dpi /
self._axes.bbox.height * 1.2)
@allow_rasterization
def draw(self, renderer):
# Need a renderer to do hit tests on mouseevent; assume the last one
# will do
if renderer is None:
renderer = self._cachedRenderer
if renderer is None:
raise RuntimeError('No renderer defined')
self._cachedRenderer = renderer
if not self.get_visible():
return
renderer.open_group('table')
self._update_positions(renderer)
keys = list(six.iterkeys(self._cells))
keys.sort()
for key in keys:
self._cells[key].draw(renderer)
# for c in self._cells.itervalues():
# c.draw(renderer)
renderer.close_group('table')
def _get_grid_bbox(self, renderer):
"""Get a bbox, in axes co-ordinates for the cells.
Only include those in the range (0,0) to (maxRow, maxCol)"""
boxes = [self._cells[pos].get_window_extent(renderer)
for pos in six.iterkeys(self._cells)
if pos[0] >= 0 and pos[1] >= 0]
bbox = Bbox.union(boxes)
return bbox.inverse_transformed(self.get_transform())
def contains(self, mouseevent):
"""Test whether the mouse event occurred in the table.
Returns T/F, {}
"""
if six.callable(self._contains):
return self._contains(self, mouseevent)
# TODO: Return index of the cell containing the cursor so that the user
# doesn't have to bind to each one individually.
if self._cachedRenderer is not None:
boxes = [self._cells[pos].get_window_extent(self._cachedRenderer)
for pos in six.iterkeys(self._cells)
if pos[0] >= 0 and pos[1] >= 0]
bbox = Bbox.union(boxes)
return bbox.contains(mouseevent.x, mouseevent.y), {}
else:
return False, {}
def get_children(self):
'Return the Artists contained by the table'
return list(six.itervalues(self._cells))
get_child_artists = get_children # backward compatibility
def get_window_extent(self, renderer):
'Return the bounding box of the table in window coords'
boxes = [cell.get_window_extent(renderer)
for cell in six.itervalues(self._cells)]
return Bbox.union(boxes)
def _do_cell_alignment(self):
""" Calculate row heights and column widths.
Position cells accordingly.
"""
# Calculate row/column widths
widths = {}
heights = {}
for (row, col), cell in six.iteritems(self._cells):
height = heights.setdefault(row, 0.0)
heights[row] = max(height, cell.get_height())
width = widths.setdefault(col, 0.0)
widths[col] = max(width, cell.get_width())
# work out left position for each column
xpos = 0
lefts = {}
cols = list(six.iterkeys(widths))
cols.sort()
for col in cols:
lefts[col] = xpos
xpos += widths[col]
ypos = 0
bottoms = {}
rows = list(six.iterkeys(heights))
rows.sort()
rows.reverse()
for row in rows:
bottoms[row] = ypos
ypos += heights[row]
# set cell positions
for (row, col), cell in six.iteritems(self._cells):
cell.set_x(lefts[col])
cell.set_y(bottoms[row])
def auto_set_column_width(self, col):
self._autoColumns.append(col)
def _auto_set_column_width(self, col, renderer):
""" Automagically set width for column.
"""
cells = [key for key in self._cells if key[1] == col]
# find max width
width = 0
for cell in cells:
c = self._cells[cell]
width = max(c.get_required_width(renderer), width)
# Now set the widths
for cell in cells:
self._cells[cell].set_width(width)
def auto_set_font_size(self, value=True):
""" Automatically set font size. """
self._autoFontsize = value
def _auto_set_font_size(self, renderer):
if len(self._cells) == 0:
return
fontsize = list(six.itervalues(self._cells))[0].get_fontsize()
cells = []
for key, cell in six.iteritems(self._cells):
# ignore auto-sized columns
if key[1] in self._autoColumns:
continue
size = cell.auto_set_font_size(renderer)
fontsize = min(fontsize, size)
cells.append(cell)
# now set all fontsizes equal
for cell in six.itervalues(self._cells):
cell.set_fontsize(fontsize)
def scale(self, xscale, yscale):
""" Scale column widths by xscale and row heights | |
US_origin_destination = oil_attrs["categories"]["US_origin_destination"]
# Get cargo oil type attribution information from oil-type yaml files
yaml_file = transport_data_dir / Path(oil_attrs["files"]["CAD_origin"]).name
with yaml_file.open("rt") as f:
CAD_yaml = yaml.safe_load(f)
yaml_file = transport_data_dir / Path(oil_attrs["files"]["WA_destination"]).name
with yaml_file.open("rt") as f:
WA_in_yaml = yaml.safe_load(f)
WA_in_noinfo = _calc_no_info_facilities(WA_in_yaml)
yaml_file = transport_data_dir / Path(oil_attrs["files"]["WA_origin"]).name
with yaml_file.open("rt") as f:
WA_out_yaml = yaml.safe_load(f)
WA_out_noinfo = _calc_no_info_facilities(WA_out_yaml)
# US_origin is for US as origin
yaml_file = transport_data_dir / Path(oil_attrs["files"]["US_origin"]).name
with yaml_file.open("rt") as f:
US_yaml = yaml.safe_load(f)
# US_combined represents the combined import and export of oil
yaml_file = transport_data_dir / Path(oil_attrs["files"]["US_combined"]).name
with yaml_file.open("rt") as f:
USall_yaml = yaml.safe_load(f)
yaml_file = transport_data_dir / Path(oil_attrs["files"]["Pacific_origin"]).name
with yaml_file.open("rt") as f:
Pacific_yaml = yaml.safe_load(f)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# NOTE: these pairs need to be used together for "get_oil_type_cargo"
# (but don't yet have error-checks in place):
# - "WA_in_yaml" and "destination"
# - "WA_out_yaml" and "origin"
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if origin in CAD_origin_destination:
if origin == "Westridge Marine Terminal":
if destination == "U.S. Oil & Refining":
oil_type = get_oil_type_cargo(
CAD_yaml, origin, vessel_type, random_generator
)
elif destination in US_origin_destination:
oil_type = get_oil_type_cargo(
CAD_yaml, origin, vessel_type, random_generator
)
elif destination in CAD_origin_destination:
# assume export within CAD is from Jet fuel storage tanks
# as there is a pipeline to Parkland for crude oil
oil_type = "jet"
else:
oil_type = get_oil_type_cargo(
CAD_yaml, origin, vessel_type, random_generator
)
else:
if destination in US_origin_destination:
# we have better information on WA fuel transfers,
# so I prioritize this information source
oil_type = get_oil_type_cargo(
WA_in_yaml, destination, vessel_type, random_generator
)
elif destination == "ESSO Nanaimo Departure Bay":
oil_type = get_oil_type_cargo(
CAD_yaml, destination, vessel_type, random_generator
)
elif destination == "Suncor Nanaimo":
oil_type = get_oil_type_cargo(
CAD_yaml, destination, vessel_type, random_generator
)
else:
oil_type = get_oil_type_cargo(
CAD_yaml, origin, vessel_type, random_generator
)
elif origin in US_origin_destination and origin not in WA_out_noinfo[vessel_type]:
if destination == "Westridge Marine Terminal":
# Westridge stores jet fuel from US for re-distribution
oil_type = "jet"
else:
oil_type = get_oil_type_cargo(
WA_out_yaml, origin, vessel_type, random_generator
)
elif (
destination in US_origin_destination
and destination not in WA_in_noinfo[vessel_type]
):
oil_type = get_oil_type_cargo(
WA_in_yaml, destination, vessel_type, random_generator
)
elif destination in CAD_origin_destination:
if destination == "Westridge Marine Terminal":
# Westridge doesn't receive crude for storage
oil_type = "jet"
else:
oil_type = get_oil_type_cargo(
CAD_yaml, destination, vessel_type, random_generator
)
elif origin == "Pacific":
oil_type = get_oil_type_cargo(
Pacific_yaml, origin, vessel_type, random_generator
)
elif origin == "US":
oil_type = get_oil_type_cargo(US_yaml, origin, vessel_type, random_generator)
else:
# For all other traffic, use a generic fuel attribution from the combined
# US import and export
oil_type = get_oil_type_cargo(USall_yaml, None, vessel_type, random_generator)
return oil_type
def get_oil_type_barge(
oil_attrs, origin, destination, transport_data_dir, random_generator
):
"""Randomly choose type of cargo oil spilled from abarge based on AIS track
origin & destination, and oil cargo attribution analysis.
Decision tree for allocating oil type to barge traffic see Google drawing
[Barge_Oil_Attribution](https://docs.google.com/drawings/d/10PM53-UnnILYCAPKU9MxiR-Y4OW0tIMhVzSjaHr-iSc/edit)
for a visual representation.
:param dict oil_attrs: Oil attribution information from the output of make_oil_attrs.py.
:param str or None origin: Origin of AIS track from which spill occurs.
:param str or None destination: Destination of AIS track from which spill occurs.
:param transport_data_dir: Directory path to marine_transport_data files repository
cloned from https://github.com/MIDOSS/marine_transport_data.
:type transport_data_dir: :py:class:`pathlib.Path`
:param random_generator: PCG-64 random number generator
:type random_generator: :py:class:`numpy.random.Generator`
:return: 2-tuple composed of:
* Type of oil spilled (str or None)
* Fuel or cargo spill flag (boolean)
:rtype: tuple
"""
vessel_type = "barge"
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Set 'fuel_spill'
#
# Fuel_spill is used to flag ship tracks with barge designation
# as non-oil cargo traffic with, hence, fuel-spill risk only
# instead of combined cargo- & fuel-spill risk.
#
# This flag will turn to True (fuel-spill risk only) when:
# 1) Tug is not included in Casey's pre-selected "Voyage" dataset,
# which selected tug traffic that traveled within a 2 km of
# known marine oil terminal at some point in 2018. If not
# included, the origin/destination values are null.
# 2) Tug is included in Casey's pre-selected data but is not
# joined by our origin-destination analysis and, as a result,
# has null values for origin/destination.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
fuel_spill = False
# Assign US and CAD origin/destinations from oil_attrs file
CAD_origin_destination = oil_attrs["categories"]["CAD_origin_destination"]
US_origin_destination = oil_attrs["categories"]["US_origin_destination"]
# Get cargo oil type attribution information from oil-type yaml files
yaml_file = transport_data_dir / Path(oil_attrs["files"]["CAD_origin"]).name
with yaml_file.open("rt") as f:
CAD_yaml = yaml.safe_load(f)
yaml_file = transport_data_dir / Path(oil_attrs["files"]["WA_destination"]).name
with yaml_file.open("rt") as f:
WA_in_yaml = yaml.safe_load(f)
WA_in_noinfo = _calc_no_info_facilities(WA_in_yaml)
yaml_file = transport_data_dir / Path(oil_attrs["files"]["WA_origin"]).name
with yaml_file.open("rt") as f:
WA_out_yaml = yaml.safe_load(f)
WA_out_noinfo = _calc_no_info_facilities(WA_out_yaml)
# US_origin is for US as origin
yaml_file = transport_data_dir / Path(oil_attrs["files"]["US_origin"]).name
with yaml_file.open("rt") as f:
US_yaml = yaml.safe_load(f)
# US_combined represents the combined import and export of oil
yaml_file = transport_data_dir / Path(oil_attrs["files"]["US_combined"]).name
with yaml_file.open("rt") as f:
USall_yaml = yaml.safe_load(f)
yaml_file = transport_data_dir / Path(oil_attrs["files"]["Pacific_origin"]).name
with yaml_file.open("rt") as f:
Pacific_yaml = yaml.safe_load(f)
# get probability of non-allocated track being an oil-barge
probability_oilcargo = oil_attrs["vessel_attributes"]["barge"][
"probability_oilcargo"
]
probability_fuelonly = 1 - probability_oilcargo
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# these pairs need to be used together for "get_oil_type_cargo"
# (but don't yet have error-checks in place):
# - "WA_in_yaml" and "destination"
# - "WA_out_yaml" and "origin"
#
# ERROR CATCH for case of no oil transfer for given selection of
# yaml file, origin, and vessel_type is currently to set flag to fuel
# spill potential and not cargo spill potential
#
# Why?
#
# Because there are lots of tugs that are not associated
# with oil tank barges. We do our best to identify oil cargo and
# then need to rely on a probability of oil cargo informed by AIS
# traffic data.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if origin in CAD_origin_destination:
if origin == "Westridge Marine Terminal":
if destination in CAD_origin_destination:
oil_type = "jet"
else:
# allocate oil type based on a 'barge' from Westridge
oil_type = get_oil_type_cargo(
CAD_yaml, origin, vessel_type, random_generator
)
else:
if destination in US_origin_destination:
# we have better information on WA fuel transfers,
# so I'm prioritizing this information source
oil_type = get_oil_type_cargo(
WA_in_yaml, destination, vessel_type, random_generator
)
# There is a possibility that barge traffic has a CAD
# origin but the US destination is matched with no fuel
# transport. It's not likely; but a possibility. This
# is an error catch for if there is no fuel-type
# associated with a barge import to WA destination.
# sum(probability) == 0 will return empty oil type
# *** ERROR CATCH ***
if not oil_type:
fuel_spill = True
oil_type = None
# *** END ERROR CATCH ***
elif destination == "ESSO Nanaimo Departure Bay":
# These are fixed to have a oil type option for all
# vessel types. No error catch needed.
# See CAD_origin.yaml for verification.
oil_type = get_oil_type_cargo(
CAD_yaml, destination, vessel_type, random_generator
)
elif destination == "Suncor Nanaimo":
# Similar to ESSO. No error catch needed.
oil_type = get_oil_type_cargo(
CAD_yaml, destination, vessel_type, random_generator
)
else:
# if origin is a CAD terminal with no US oil terminal
# destination and no destination to a better known
# CAD terminal then just use the CAD origin allocation
# An option here is to flag a destination of 'Pacific'
# or 'US' and use US fuel allocation. I didn't see a
# compelling case for adding this complexity, so I kept
# it simple. Similar to ESSO, above, no error catch
# needed.
oil_type = get_oil_type_cargo(
CAD_yaml, origin, vessel_type, random_generator
)
elif origin in US_origin_destination and origin not in WA_out_noinfo[vessel_type]:
oil_type = get_oil_type_cargo(
WA_out_yaml, origin, vessel_type, random_generator
)
# *** ERROR CATCH ***
# As a result of using 2 different data sources (AIS and
# Ecology), there is a chance that AIS has origin from a
# marine terminal for which no barge transfers are recorded
# in the DOE database. For this unlikely but possible case,
# I attribute | |
possaliency
def convertToNegativeSaliency(self, saliency):
negsaliency = F.threshold(-1 * saliency, 0.0, 0.0)
negsaliency = negsaliency / torch.max(negsaliency)
return negsaliency
def convertToAbsoluteSaliency(self, saliency):
return torch.abs(saliency)
def getAbsoluteSaliencyImage(self, state, atariimg, mode='value', action=None, threshold=0.0, lag=-1):
if lag == -1:
saliency = self.convertToAbsoluteSaliency(self.averageSaliencyMap(state, mode=mode, action=action))
else:
saliency = self.convertToAbsoluteSaliency(self.frameSaliencyMap(state, mode=mode, action=action, lag=lag))
ataristate = self.postProcess(state[0])
saliency = saliency.cpu()
if threshold > 0.0:
saliency = F.threshold(saliency, threshold, 0.0)
# saliency+=state[0]
atarisaliency = self.postProcess(saliency.numpy())
img = torch.cat([torch.tensor(atarisaliency, dtype=torch.float).unsqueeze(0),
torch.tensor(ataristate, dtype=torch.float).unsqueeze(0),
torch.tensor(ataristate, dtype=torch.float).unsqueeze(0)])
img = img / torch.max(img)
img = img.transpose(0, 2).transpose(0, 1).numpy()
# print(np.max(img))
img[0:20, :] = atariimg[0:20, :] / 255.0
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
return img
def getAbsoluteGuidedBPImage(self, state, atariimg, mode='value', action=None, threshold=0.0, lag=-1):
if lag == -1:
saliency = self.convertToAbsoluteSaliency(
self.averageGuidedBP(state, mode=mode, action=action))
else:
saliency = self.convertToAbsoluteSaliency(
self.frameGuidedBP(state, mode=mode, action=action, lag=lag))
ataristate = self.postProcess(state[0])
saliency = saliency.cpu()
if threshold > 0.0:
saliency = F.threshold(saliency, threshold, 0.0)
# saliency += state[0]
atarisaliency = self.postProcess(saliency.numpy())
img = torch.cat([torch.tensor(atarisaliency, dtype=torch.float).unsqueeze(0),
torch.tensor(ataristate, dtype=torch.float).unsqueeze(0),
torch.tensor(ataristate, dtype=torch.float).unsqueeze(0)])
img = img / torch.max(img)
img = img.transpose(0, 2).transpose(0, 1).numpy()
# print(np.max(img))
img[0:20, :] = atariimg[0:20, :] / 255.0
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
return img
def getPosNegSaliencyImage(self, state, atariimg, mode='value', action=None, threshold=0.0, lag=-1):
if lag == -1:
possaliency, negsaliency = self.convertToPosNegSaliency(
self.averageSaliencyMap(state, mode=mode, action=action))
else:
possaliency, negsaliency = self.convertToPosNegSaliency(
self.frameSaliencyMap(state, mode=mode, action=action, lag=lag))
ataristate = self.postProcess(state[0])
#cv2.imshow("ataristate",ataristate)
#cv2.waitKey()
possaliency = possaliency.cpu()
negsaliency = negsaliency.cpu()
if threshold > 0.0:
possaliency = F.threshold(possaliency, threshold, 0.0)
negsaliency = F.threshold(negsaliency, threshold, 0.0)
# possaliency += state[0] # Add state to saliency maps in order to get gray game image
# negsaliency += state[0]
ataripossaliency = self.postProcess(possaliency.numpy())
atarinegsaliency = self.postProcess(negsaliency.numpy())
img = torch.cat([torch.zeros(atarinegsaliency.shape, dtype=torch.float).unsqueeze(0),
torch.zeros(ataripossaliency.shape, dtype=torch.float).unsqueeze(0),
torch.tensor(ataristate, dtype=torch.float).unsqueeze(0)])
img = img / torch.max(img)
img = img.transpose(0, 2).transpose(0, 1).numpy()
# print(np.max(img))
# img[0:20, :] = atariimg[0:20, :] / 255.0
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
#img = ataristate/np.max(ataristate)
return img
def getPosNegGuidedBPImage(self, state, atariimg, mode='value', action=None, threshold=0.0, lag=-1):
if lag == -1:
possaliency, negsaliency = self.convertToPosNegSaliency(
self.averageGuidedBP(state, mode=mode, action=action))
else:
possaliency, negsaliency = self.convertToPosNegSaliency(
self.frameGuidedBP(state, mode=mode, action=action, lag=lag))
ataristate = self.postProcess(state[0])
possaliency = possaliency.cpu()
negsaliency = negsaliency.cpu()
if threshold > 0.0:
possaliency = F.threshold(possaliency, threshold, 0.0)
negsaliency = F.threshold(negsaliency, threshold, 0.0)
# possaliency += state[0] # Add state to saliency maps in order to get gray game image
# negsaliency += state[0]
ataripossaliency = self.postProcess(possaliency.numpy())
atarinegsaliency = self.postProcess(negsaliency.numpy())
img = torch.cat([torch.tensor(atarinegsaliency, dtype=torch.float).unsqueeze(0),
torch.tensor(ataripossaliency, dtype=torch.float).unsqueeze(0),
torch.tensor(ataristate, dtype=torch.float).unsqueeze(0)])
img = img / torch.max(img)
img = img.transpose(0, 2).transpose(0, 1).numpy()
# print(np.max(img))
img[0:20, :] = atariimg[0:20, :] / 255.0
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
return img
def getPositiveSaliencyImage(self, state, atariimg, mode='value', action=None, threshold=0.0, lag=-1):
if lag == -1:
possaliency = self.convertToPositiveSaliency(self.averageSaliencyMap(state, mode=mode, action=action))
else:
possaliency = self.convertToPositiveSaliency(
self.frameSaliencyMap(state, mode=mode, action=action, lag=lag))
ataristate = self.postProcess(state[0])
possaliency = possaliency.cpu()
if threshold > 0.0:
possaliency = F.threshold(possaliency, threshold, 0.0)
# possaliency += state[0] # Add state to saliency maps in order to get gray game image
ataripossaliency = self.postProcess(possaliency.numpy())
img = torch.cat([torch.tensor(ataristate, dtype=torch.float).unsqueeze(0),
torch.tensor(ataripossaliency, dtype=torch.float).unsqueeze(0),
torch.tensor(ataristate, dtype=torch.float).unsqueeze(0)])
img = img / torch.max(img)
img = img.transpose(0, 2).transpose(0, 1).numpy()
# print(np.max(img))
img[0:20, :] = atariimg[0:20, :] / 255.0
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
return img
def getPositiveGuidedBPImage(self, state, atariimg, mode='value', action=None, threshold=0.0, lag=-1):
if lag == -1:
possaliency = self.convertToPositiveSaliency(self.averageGuidedBP(state, mode=mode, action=action))
else:
possaliency = self.convertToPositiveSaliency(self.frameGuidedBP(state, mode=mode, action=action, lag=lag))
ataristate = self.postProcess(state[0])
possaliency = possaliency.cpu()
if threshold > 0.0:
possaliency = F.threshold(possaliency, threshold, 0.0)
# possaliency += state[0] # Add state to saliency maps in order to get gray game image
ataripossaliency = self.postProcess(possaliency.numpy())
img = torch.cat([torch.tensor(ataristate, dtype=torch.float).unsqueeze(0),
torch.tensor(ataripossaliency, dtype=torch.float).unsqueeze(0),
torch.tensor(ataristate, dtype=torch.float).unsqueeze(0)])
img = img / torch.max(img)
img = img.transpose(0, 2).transpose(0, 1).numpy()
# print(np.max(img))
img[0:20, :] = atariimg[0:20, :] / 255.0
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
return img
def getNegativeSaliencyImage(self, state, atariimg, mode='value', action=None, threshold=0.0, lag=-1):
if lag == -1:
negsaliency = self.convertToNegativeSaliency(self.averageSaliencyMap(state, mode=mode, action=action))
else:
negsaliency = self.convertToNegativeSaliency(
self.frameSaliencyMap(state, mode=mode, action=action, lag=lag))
ataristate = self.postProcess(state[0])
negsaliency = negsaliency.cpu()
if threshold > 0.0:
negsaliency = F.threshold(negsaliency, threshold, 0.0)
# Add state to saliency maps in order to get gray game image
# negsaliency += state[0]
atarinegsaliency = self.postProcess(negsaliency.numpy())
img = torch.cat([torch.tensor(atarinegsaliency, dtype=torch.float).unsqueeze(0),
torch.tensor(ataristate, dtype=torch.float).unsqueeze(0),
torch.tensor(ataristate, dtype=torch.float).unsqueeze(0)])
img = img / torch.max(img)
img = img.transpose(0, 2).transpose(0, 1).numpy()
# print(np.max(img))
img[0:20, :] = atariimg[0:20, :] / 255.0
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
return img
def getNegativeGuidedBPImage(self, state, atariimg, mode='value', action=None, threshold=0.0, lag=-1):
if lag == -1:
negsaliency = self.convertToNegativeSaliency(self.averageGuidedBP(state, mode=mode, action=action))
else:
negsaliency = self.convertToNegativeSaliency(self.frameGuidedBP(state, mode=mode, action=action, lag=lag))
ataristate = self.postProcess(state[0])
negsaliency = negsaliency.cpu()
if threshold > 0.0:
negsaliency = F.threshold(negsaliency, threshold, 0.0)
# Add state to saliency maps in order to get gray game image
# negsaliency += state[0]
atarinegsaliency = self.postProcess(negsaliency.numpy())
img = torch.cat([torch.tensor(atarinegsaliency, dtype=torch.float).unsqueeze(0),
torch.tensor(ataristate, dtype=torch.float).unsqueeze(0),
torch.tensor(ataristate, dtype=torch.float).unsqueeze(0)])
img = img / torch.max(img)
img = img.transpose(0, 2).transpose(0, 1).numpy()
# print(np.max(img))
img[0:20, :] = atariimg[0:20, :] / 255.0
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
return img
def getSaliencyMapImage(self, state, atariimg, mode='value', action=None, threshold=0.0, lag=-1, type='PosNeg'):
if type != 'PosNeg' and type != 'Positive' and type != 'Negative' and type != 'Absolute':
raise ValueError("type must be 'PosNeg', 'Positive', 'Negative' or 'Absolute'")
elif type == 'PosNeg':
img = self.getPosNegSaliencyImage(state, atariimg, mode=mode, action=action, threshold=threshold, lag=lag)
elif type == 'Positive':
img = self.getPositiveSaliencyImage(state, atariimg, mode=mode, action=action, threshold=threshold, lag=lag)
elif type == 'Negative':
img = self.getNegativeSaliencyImage(state, atariimg, mode=mode, action=action, threshold=threshold, lag=lag)
elif type == 'Absolute':
img = self.getAbsoluteSaliencyImage(state, atariimg, mode=mode, action=action, threshold=threshold, lag=lag)
return img
def getGuidedBPImage(self, state, atariimg, mode='value', action=None, threshold=0.0, lag=-1, type='PosNeg'):
if type != 'PosNeg' and type != 'Positive' and type != 'Negative' and type != 'Absolute':
raise ValueError("type must be 'PosNeg', 'Positive', 'Negative' or 'Absolute'")
elif type == 'PosNeg':
img = self.getPosNegGuidedBPImage(state, atariimg, mode=mode, action=action, threshold=threshold, lag=lag)
elif type == 'Positive':
img = self.getPositiveGuidedBPImage(state, atariimg, mode=mode, action=action, threshold=threshold, lag=lag)
elif type == 'Negative':
img = self.getNegativeGuidedBPImage(state, atariimg, mode=mode, action=action, threshold=threshold, lag=lag)
elif type == 'Absolute':
img = self.getAbsoluteGuidedBPImage(state, atariimg, mode=mode, action=action, threshold=threshold, lag=lag)
return img
def act(self, state):
"""
Get state and do action
Two option can be selectedd if explore select random action
if exploit ask nnet for action
"""
act_protocol = 'Explore' if random.uniform(0, 1) <= self.epsilon else 'Exploit'
if act_protocol == 'Explore':
action = random.randrange(self.action_size)
else:
with torch.no_grad():
state = torch.tensor(state, dtype=torch.float, device=DEVICE).unsqueeze(0)
q_values = self.online_model.forward(state) # (1, action_size)
action = torch.argmax(q_values).item() # Returns the indices of the maximum value of all elements
return action
action_dict = {
'NOOP': 'x',
'FIRE': 'O',
'LEFT': '<-',
'RIGHT': '->',
'LEFTFIRE': '<-O',
'RIGHTFIRE': 'O->'
}
def showActionTree(env, agent, state, episode, step, number_steps_ahead):
global fig
plt.close('all')
SCALE = 4
Q_VALUE_SENSITIVITY = 30
actions = env.unwrapped.get_action_meanings()
snapshot = env.ale.cloneState()
current_snapshot = None
actionTree = nx.Graph()
actionTree.add_node('0', pos=(0, 0))
action = None
for i in range(number_steps_ahead):
prev_action = action
action = agent.act(state)
with torch.no_grad():
_state = torch.tensor(state, dtype=torch.float, device=DEVICE).unsqueeze(0)
q_values = agent.online_model.forward(_state)
q_values_softmax = torch.nn.functional.softmax(Q_VALUE_SENSITIVITY * q_values, dim=1)
current_snapshot = env.ale.cloneState()
for j in range(len(actions)):
next_state, reward, done, info = env.step(j)
actionTree.add_node(
'{}_{}'.format(str(i + 1), str(j)),
pos=(3 * (i + 1), (len(actions) - 1) / 2 - j),
image=env.ale.getScreenRGB()
)
actionTree.add_edge(
'0' if i == 0 else '{}_{}'.format(str(i), str(prev_action)),
'{}_{}'.format(str(i + 1), str(j)),
label='{}\n{}'.format(action_dict[actions[j]], round(q_values[0, j].item(), 2)),
width=max(SCALE * 4 * q_values_softmax[0, j].item(), 1)
)
env.ale.restoreState(current_snapshot)
next_state, reward, done, info = env.step(action)
next_state = agent.preProcess(next_state) # Process image
next_state = np.stack((next_state, state[0], state[1], state[2]))
state = next_state
pos = nx.get_node_attributes(actionTree, 'pos')
fig = plt.figure(episode * MAX_STEP + step, figsize=(SCALE * 12, SCALE * 5))
ax = fig.add_subplot(111)
nx.draw(actionTree, pos=pos, width=list(nx.get_edge_attributes(actionTree, 'width').values()), node_size=0)
nx.draw_networkx_edge_labels(actionTree, pos=pos, font_size=SCALE * 7,
edge_labels=nx.get_edge_attributes(actionTree, 'label'))
for i in range(number_steps_ahead):
for j in range(len(actions)):
coords = ax.transData.transform((3 * (i + 1), (len(actions) - 1) / 2 - j))
fig.figimage(actionTree.nodes['{}_{}'.format(str(i + 1), str(j))]['image'], xo=coords[0] - 50,
yo=coords[1] - 80, zorder=1)
env.ale.restoreState(snapshot)
return fig
def showActionTreeV2(env, agent, state, episode, step, number_steps_ahead, number_of_best_paths,
restricted_branching=False):
global fig
plt.close('all')
SCALE = 4
Q_VALUE_SENSITIVITY = 30
Q_VALUE_THRESHOLD = 0.5
actions = env.unwrapped.get_action_meanings()
def qValuesAndScreens(env, agent, state, episode, step, number_steps_ahead):
if number_steps_ahead == 0:
return []
action_paths = []
snapshot | |
m:
group = m.groupdict()
label_withdraw = peer_dict.setdefault('in_label_withdraw_records', {})
label_withdraw.update({'created': int(group['created'])})
label_withdraw.update({'freed': int(group['freed'])})
continue
# Local Address Withdraw Set: 0, Cleared: 0
m = p4.match(line)
if m:
group = m.groupdict()
label_address_request = peer_dict.setdefault('local_address_withdraw', {})
label_address_request.update({'set': int(group['set'])})
label_address_request.update({'cleared': int(group['cleared'])})
continue
# Transmit contexts enqueued: 0, dequeued: 0
m = p5.match(line)
if m:
group = m.groupdict()
transmit_dict = peer_dict.setdefault('transmit_contexts', {})
transmit_dict.update({'enqueued': int(group['enqueued'])})
transmit_dict.update({'dequeued': int(group['dequeued'])})
continue
# Total In label Request Records created: 0, freed: 0
m = p6.match(line)
if m:
group = m.groupdict()
statistic_dict = result_dict.setdefault('statistics', {})
total_label_dict = statistic_dict.setdefault('total_in_label_request_records', {})
total_label_dict.update({'created': int(group['created'])})
total_label_dict.update({'freed': int(group['freed'])})
continue
# Total In label Withdraw Records created: 0, freed: 0
m = p7.match(line)
if m:
group = m.groupdict()
total_withdraw_dict = statistic_dict.setdefault('total_in_label_withdraw_records', {})
total_withdraw_dict.update({'created': int(group['created'])})
total_withdraw_dict.update({'freed': int(group['freed'])})
continue
# Total Local Address Withdraw Records created: 0, freed: 0
m = p8.match(line)
if m:
group = m.groupdict()
total_local_address_dict = statistic_dict.setdefault('total_local_address_withdraw_records', {})
total_local_address_dict.update({'created': int(group['created'])})
total_local_address_dict.update({'freed': int(group['freed'])})
continue
# Label Request Acks:
m = p9.match(line)
if m:
# label_request_acks = True
temp_dict = statistic_dict.setdefault('label_request_acks', {})
continue
# Number of chkpt msg sent: 0
m = p10.match(line)
if m:
group = m.groupdict()
temp_dict.setdefault('number_of_chkpt_messages',{}).update({'sent': int(group['msg_sent'])})
continue
# Number of chkpt msg in queue: 0
m = p11.match(line)
if m:
session_sync_flag = False
group = m.groupdict()
temp_dict.setdefault('number_of_chkpt_messages', {}).update({'in_queue': int(group['queue'])})
continue
# Number of chkpt msg in state none: 0
m = p12.match(line)
if m:
session_sync_flag = False
group = m.groupdict()
temp_dict.setdefault('number_of_chkpt_messages', {}).update({'in_state_none': int(group['state_none'])})
continue
# Number of chkpt msg in state send: 0
m = p13.match(line)
if m:
session_sync_flag = False
group = m.groupdict()
temp_dict.setdefault('number_of_chkpt_messages', {}).update({'in_state_send': int(group['state_send'])})
continue
# Number of chkpt msg in state wait: 0
m = p14.match(line)
if m:
session_sync_flag = False
group = m.groupdict()
temp_dict.setdefault('number_of_chkpt_messages', {}).update({'in_state_wait': int(group['state_wait'])})
continue
# Label Withdraw Acks:
m = p15.match(line)
if m:
temp_dict = statistic_dict.setdefault('label_withdraw_acks', {})
continue
# Address Withdraw Acks:
m = p16.match(line)
if m:
temp_dict = statistic_dict.setdefault('address_withdraw_acks', {})
continue
# Session Sync:
m = p17.match(line)
if m:
session_sync_flag = True
session_sync_dict = statistic_dict.setdefault('session_sync', {})
continue
# Number of session-sync msg sent: 0
m = p18.match(line)
if m:
if session_sync_flag:
group = m.groupdict()
key = group['session_sync_keys'].lower().replace(' ','_')
session_sync_dict.update({key.replace('-','_'): int(group['session_sync_values'])})
continue
return result_dict
class ShowMplsLdpNeighborSchema(MetaParser):
"""Schema for show mpls ldp neighbor"""
schema = {
'vrf': {
Any(): {
'peers': {
Any(): {
'label_space_id':{
Any():{
'local_ldp_ident': str,
'tcp_connection': str,
'state': str,
'msg_sent': int,
'msg_rcvd': int,
'downstream': bool,
Optional('last_tib_rev_sent'): int,
Optional('password'): str,
Optional('uptime'): str,
Optional('peer_holdtime_ms'): int,
Optional('ka_interval_ms'): int,
Optional('peer_state'): str,
Optional('ldp_discovery_sources'): {
'interface':{
Any():{
Optional('ip_address'): {
Any(): {
Optional('holdtime_ms'): int,
Optional('hello_interval_ms'): int,
}
}
}
}
},
Optional('address_bound'): list,
Optional('nsr'): str,
Optional('capabilities'):{
'sent': {
Optional('ICCP'):{
'type': str,
'maj_ver': int,
'min_ver': int,
},
Optional('dynamic_anouncement'): str,
Optional('mldp_point_to_multipoint'): str,
Optional('mldp_multipoint_to_multipoint'): str,
Optional('typed_wildcard'): str,
},
Optional('received'): {
Optional('ICCP'):{
'type': str,
'maj_ver': int,
'min_ver': int,
},
Optional('dynamic_anouncement'): str,
Optional('mldp_point_to_multipoint'): str,
Optional('mldp_multipoint_to_multipoint'): str,
Optional('typed_wildcard'): str,
},
},
},
},
}
}
}
},
}
class ShowMplsLdpNeighbor(ShowMplsLdpNeighborSchema):
"""Parser for show mpls ldp neighbor,
show mpls ldp neighbor vrf <vrf>"""
cli_command = ['show mpls ldp neighbor', 'show mpls ldp neighbor vrf {vrf}']
def cli(self, vrf="", output=None):
if output is None:
if vrf:
cmd = self.cli_command[1].format(vrf=vrf)
else:
cmd = self.cli_command[0]
out = self.device.execute(cmd)
else:
out = output
if not vrf:
vrf = 'default'
# initial return dictionary
result_dict = {}
address_bound_flag = False
received_flag = False
sent_flag = False
# Peer LDP Ident: 10.169.197.252:0; Local LDP Ident 10.169.197.254:0
p1 = re.compile(r'^Peer +LDP +Ident: *(?P<peer_ldp>[\d\.]+):(?P<label_space_id>\d+); +Local +LDP +Ident +(?P<local_ldp>\S+)$')
# TCP connection: 10.169.197.252.646 - 10.169.197.254.20170
p2 = re.compile(r'^TCP +connection: *(?P<tcp_connection>[\S\s]+)$')
# State: Oper; Msgs sent/rcvd: 824/825; Downstream
# State: Oper; Msgs sent/rcvd: 824/825; Downstream; Last TIB rev sent 4103
# State: Oper; Msgs sent/rcvd: 5855/6371; Downstream on demand
p3 = re.compile(r'^State: *(?P<state>\w+); +Msgs +sent\/rcvd: *(?P<msg_sent>\d+)\/(?P<msg_rcvd>\d+);'
' +(?P<downstream>[\w\s]+)(; +Last +TIB +rev +sent +(?P<last_tib_rev_sent>\d+))?$')
# Up time: 04:26:14
# Up time: 3d21h; UID: 4; Peer Id 0
p4 = re.compile(r'^Up +time: *(?P<up_time>[\w\:]+)(; +UID: *(?P<uid>\d+); +Peer +Id +(?P<peer_id>\d+))?$')
# LDP discovery sources:
# GigabitEthernet0/0/0, Src IP addr: 10.169.197.93
# ATM3/0.1
p5 = re.compile(r'^(?P<interface>[A-Za-z]+[\d/.]+)((,|;) +Src +IP +addr: *(?P<src_ip_address>[\d\.]+))?$')
# holdtime: 15000 ms, hello interval: 5000 ms
p5_1 = re.compile(r'^holdtime: *(?P<holdtime>\d+) +ms, +hello +interval: *(?P<hello_interval>\d+) +ms$')
# Addresses bound to peer LDP Ident:
p6 = re.compile(r'^Addresses +bound +to +peer +LDP +Ident:$')
# 10.169.197.252 10.120.202.49 10.169.197.101 10.16.190.254
p7 = re.compile(r'^(?P<address_bound_peer_ldp>[\d\.\s]+)$')
# Peer holdtime: 180000 ms; KA interval: 60000 ms; Peer state: estab
p8 = re.compile(r'^Peer +holdtime: *(?P<peer_holdtime>\d+) +ms; +KA +interval: *(?P<ka_interval>\d+) +ms;'
' +Peer +state: +(?P<peer_state>\S+)$')
# Password: not required, none, in use
p9 = re.compile(r'^Password: +(?P<password>[\S\s]+)$')
#NSR: Not Ready
p10 = re.compile(r'^NSR: +(?P<nsr>[\S\s]+)$')
# Capabilities Sent:
p11 = re.compile(r'^Capabilities +Sent:$')
# [ICCP (type 0x0405) MajVer 1 MinVer 0]
p12 = re.compile(r'^\[ICCP \(type +(?P<type>\w+)\) +MajVer +(?P<maj_ver>\d+) +MinVer +(?P<min_ver>\d+)\]$')
# [Dynamic Announcement (0x0506)]
p13 = re.compile(r'^\[Dynamic +Announcement \((?P<dynamic_anouncement>\w+)\)\]$')
# [mLDP Point-to-Multipoint (0x0508)]
p14 = re.compile(r'^\[mLDP +Point\-to\-Multipoint \((?P<mldp_point_to_multipoint>\w+)\)\]$')
# [mLDP Multipoint-to-Multipoint (0x0509)]
p15 = re.compile(r'^\[mLDP +Multipoint\-to\-Multipoint \((?P<mldp_multipoint_to_multipoint>\w+)\)\]$')
# [Typed Wildcard (0x050B)]
p16 = re.compile(r'^\[Typed +Wildcard \((?P<typed_wildcard>\w+)\)\]$')
# Capabilities Received:
p17 = re.compile(r'^Capabilities +Received:$')
# [None]
p18 = re.compile(r'^\[None\]$')
for line in out.splitlines():
line = line.strip()
# Peer LDP Ident: 10.169.197.252:0; Local LDP Ident 10.169.197.254:0
m = p1.match(line)
if m:
group = m.groupdict()
address_bound_flag = False
peer_dict = result_dict.setdefault('vrf', {}).\
setdefault(vrf, {}).\
setdefault('peers', {}).\
setdefault(group['peer_ldp'], {}).\
setdefault('label_space_id', {}).\
setdefault(int(group['label_space_id']), {})
peer_dict.update({'local_ldp_ident':group['local_ldp']})
continue
# TCP connection: 10.169.197.252.646 - 10.169.197.254.20170
m = p2.match(line)
if m:
group = m.groupdict()
tcpconnection = group['tcp_connection']
peer_dict.update({'tcp_connection': tcpconnection})
continue
# State: Oper; Msgs sent/rcvd: 824/825; Downstream
# State: Oper; Msgs sent/rcvd: 824/825; Downstream; Last TIB rev sent 4103
m = p3.match(line)
if m:
group = m.groupdict()
peer_dict.update({'state': group['state'].lower()})
peer_dict.update({'msg_sent': int(group['msg_sent'])})
peer_dict.update({'msg_rcvd': int(group['msg_rcvd'])})
peer_dict.update({'downstream': True if 'downstream' in group['downstream'].lower() else False})
if group['last_tib_rev_sent']:
peer_dict.update({'last_tib_rev_sent': int(group['last_tib_rev_sent'])})
continue
# Up time: 04:26:14
m = p4.match(line)
if m:
group = m.groupdict()
peer_dict.update({'uptime': group['up_time']})
continue
# GigabitEthernet0/0/0, Src IP addr: 10.169.197.93
m = p5.match(line)
if m:
group = m.groupdict()
ldp_source_dict = peer_dict.setdefault('ldp_discovery_sources',{}).\
setdefault('interface',{}).\
setdefault(group['interface'],{})
if group['src_ip_address']:
ldp_source_ip_address_dict = ldp_source_dict.setdefault('ip_address',{}).\
setdefault(group['src_ip_address'],{})
continue
# holdtime: 15000 ms, hello interval: 5000 ms
m = p5_1.match(line)
if m:
group = m.groupdict()
ldp_source_ip_address_dict.update({'holdtime_ms': int(group['holdtime'])})
ldp_source_ip_address_dict.update({'hello_interval_ms': int(group['hello_interval'])})
continue
# Addresses bound to peer LDP Ident:
m = p6.match(line)
if m:
address_bound_flag = True
continue
# 10.169.197.252 10.120.202.49 10.169.197.101 10.16.190.254
m = p7.match(line)
if m:
group = m.groupdict()
address_bound_list = group['address_bound_peer_ldp'].split()
if address_bound_flag:
if 'address_bound' not in peer_dict:
peer_dict.update({'address_bound': address_bound_list})
else:
peer_dict['address_bound'].extend(address_bound_list)
continue
# Peer holdtime: 180000 ms; KA interval: 60000 ms; Peer state: estab
m = p8.match(line)
if m:
group = m.groupdict()
peer_dict.update({'peer_holdtime_ms': int(group['peer_holdtime'])})
peer_dict.update({'ka_interval_ms': int(group['ka_interval'])})
peer_dict.update({'peer_state': group['peer_state']})
continue
# Password: not required, none, in use
m = p9.match(line)
if m:
group = m.groupdict()
peer_dict.update({'password': group['password']})
continue
# NSR: Not Ready
m = p10.match(line)
if m:
group = m.groupdict()
peer_dict.update({'nsr': group['nsr']})
continue
# Capabilities Sent:
m = p11.match(line)
if m:
received_flag = False
sent_flag = True
temp_dict = peer_dict.setdefault('capabilities', {}).setdefault('sent', {})
continue
# [ICCP (type 0x0405) MajVer 1 MinVer 0]
m = p12.match(line)
if m:
group = m.groupdict()
iccp_dict = temp_dict.setdefault('ICCP',{})
iccp_dict.update({'type': group['type']})
iccp_dict.update({'maj_ver': int(group['maj_ver'])})
iccp_dict.update({'min_ver': int(group['min_ver'])})
continue
# [Dynamic Announcement (0x0506)]
m = p13.match(line)
if m:
group = m.groupdict()
temp_dict.update({'dynamic_anouncement': group['dynamic_anouncement']})
continue
# [mLDP Point-to-Multipoint (0x0508)]
m = p14.match(line)
if m:
group = m.groupdict()
temp_dict.update({'mldp_point_to_multipoint': group['mldp_point_to_multipoint']})
continue
# [mLDP Multipoint-to-Multipoint (0x0509)]
m = p15.match(line)
if m:
group = m.groupdict()
temp_dict.update({'mldp_multipoint_to_multipoint': group['mldp_multipoint_to_multipoint']})
continue
# [Typed Wildcard (0x050B)]
m = p16.match(line)
if m:
group = m.groupdict()
temp_dict.update({'typed_wildcard': group['typed_wildcard']})
continue
# Capabilities Received:
m = p17.match(line)
if m:
received_flag = True
sent_flag = False
temp_dict = peer_dict.setdefault('capabilities', {}).setdefault('received', {})
continue
# [None]
m = p18.match(line)
if m:
if received_flag:
peer_dict['capabilities'].pop('received')
if sent_flag:
peer_dict['capabilities'].pop('sent')
continue
return result_dict
class ShowMplsLdpNeighborDetail(ShowMplsLdpNeighbor):
"""Parser for show mpls ldp neighbor detail,
show mpls ldp neighbor vrf <vrf> detail"""
cli_command = ['show mpls ldp neighbor detail', 'show mpls ldp neighbor vrf {vrf} detail']
def cli(self, vrf="", output=None):
if output is None:
if vrf:
cmd = self.cli_command[1].format(vrf=vrf)
else:
cmd = self.cli_command[0]
out = self.device.execute(cmd)
else:
out = output
return super().cli(vrf=vrf, output=out)
class ShowMplsLdpBindingsSchema(MetaParser):
"""
Schema for show mpls | |
"""
Regex notes:
Get recipe title
(?:^# (.*)\s+)
Get the img tag or whatever is between the title line and the next section
(?:^# .*\s*)(^(?:<.+>\s*?)*?)\s*(?:##[^#])
Get the overview section rows in a single group
(?:^## Overview\s+)(^(?:.*\s*?)*?)\s*(?:##[^#])
Get the entire ingredients sections, including blanks rows, and all sub lists of ingredients
(?:^## Ingredients\s+)(^(?:.*\s)*?)\s*(?:##[^#])
# This method capture REQUIRES a '## ' afterwards - otherwise backtracks... need to fix
Get the entire methods section including blank rows, '---' rows, and sub method rows and titles
# Since there might be nothing after this section, just try to remove them after grabbing the entire methods block
(?:^## Method\s+)(^(?:.*\s*?)*?)\s*(?:##[^#])
Grab everything: (?:^## Method\s+)(^(?:.*\s)+)
so use this to grab EVERYTHING, and the group(0) components to remove the other things
then sub out group(1) of this: (^## Notes\W.*\s*^(?:.*\s)+)
and this of group 1: (^## (?:(?:Ref)|(?:Ack)|?:Adap).*\s^(?:.*\s*)+)
and finally this group 1: (^## Tags\W.*\s*^(?:.*\s)+)
Get the Note section rows, including any blank lines
(?:^## Notes\s*)(^(?:.*\s)*?)\s*(?:##[^#])
Get the reference section, if it is named with "References" or with "Acknowledgements"
(?:^## (?:(?:Ref)|(?:Ack)).*\s*)(^(?:.*\s)*?)\s*(?:##[^#])
Get the tags section, including any blank lines. Must be last section
(?:^## Tags\s*)(^(?:.*\s)*)
"""
import re
import sys
from typing import List
items = List[str]
"""
Functions that "perfectly" extract recipe info from md files
"""
"""
A sublist must have at least 1 item, but the title is optional
"""
class Sublist:
def __init__( self, items: items, title: str = None ):
self.title = title
self.items = items
def __len__( self ):
if self.items:
return len(self.items)
else:
return 0
class Section:
def __init__( self, sublists = list().clear() ):
if sublists is not None:
self.sublists = sublists
else:
self.sublists = []
# if not sublists:
# self.sublists = []
# else:
# self.sublists = sublists
def append_sublist( self, section: Sublist ):
if self.sublists is None:
self.sublists = list()
self.sublists.append( section )
return self
def num_sublists( self ):
return len(self.sublists)
def num_items( self ):
total = 0
if self.sublists is not None:
sl = [ sublist for sublist in self.sublists ]
total = 0
for sl in sl:
total += len(sl.items)
return total
def valid( txt ):
return txt != ""
# Returns the title string, None otherwise
def get_title( txt ):
ret = None
match = re.search( r'(?:^# (.*)\s+)', txt, flags=re.M )
if match and valid( match.group(0) ) and valid( match.group(1) ):
ret = match.group(1)
else:
assert(match)
assert(match.group(0))
assert(match.group(1))
return ret
# Returns a tuple ("Title", "Link")
# If the img reference section exists, the "link" attribute must be present
# The title attribute is optional
def get_img_link( txt ):
match = re.search( r'(?:^# .*\s*)(^(?:<.+>\s*?)*?)\s*(?:##[^#])', txt, flags=re.M )
ret_title = None
ret_link = None
title = None
link = None
if match and valid( match.group(1) ):
# TODO: Read lines and return a tuple
link_section = match.group(1)
link_data_match = re.search( r"<(?:.*title\s*=\s*\")(.+)(?:\"\s*src\s*=\s*\")(.*)(?:\"\s*>)", link_section, flags=re.M )
title_match = re.search( r"(?:<.*title\s*=\s*\")([^\"]*)", link_section, flags=re.M )
link_match = re.search( r"(?:<.*src\s*=\s*\")([^\"]*)", link_section, flags=re.M )
assert( link_match )
assert( valid( link_match.group(1) ) )
if(title_match):
title = title_match.group(1)
if(link_match):
link = link_match.group(1)
if valid( link ):
if valid( title ):
ret_title = title
ret_link = link
else:
assert( valid(link) )
return ret_title, ret_link
# Returns a list of the lines in the Overview ["Servings: 4", "prep time", "cook time", "total time", .. ]
def get_overview( txt ):
ret = None
ov_lines_blk_match = None
filled_lines = list()
final_lines = list()
if re.search( r'(?:^## Overview\s+)', txt, flags=re.M ):
ov_lines_blk_match = re.search( r'(?:^## Overview\s+)(^(?:.*\s*?)*?)\s*(?:##[^#])', txt, flags=re.M )
if ov_lines_blk_match:
lines = ov_lines_blk_match.group(1).splitlines()
filled_lines = [line for line in lines if line != ""]
assert( len(filled_lines) > 0 )
# Strip any bullet points, or number prefixes in the lines
final_lines = [re.sub( r"\W*\d*\.?\W*(.*)", r"\g<1>", line, flags=re.M) for line in filled_lines]
assert( len(final_lines) > 0 )
for line in final_lines:
assert( len(line) > 0 )
assert( line != "" )
ret = final_lines
return ret
#
def get_ingredients( txt ):
ret = None
ing_section = Section()
in_sublist = False
# Get all the text under the ingredients header in group 1
match = re.search( r'(?:^## Ingredients\s+)(^(?:.*\s)*?)\s*(?:##[^#])', txt, flags=re.M )
if match:
# Remove and blank/empty lines
subbed_blanks = re.sub( r"\s*$", r"", match.group(1), flags=re.M )
ing_txt = subbed_blanks
ing_lines = subbed_blanks.split('\n')
# If a recipe starts a sublist, then any ungrouped regular listed items
# Must come before all the sublists. The sublists must be last in the section.
for line in ing_lines:
assert(line != "")
item_match = re.search( r'^\W\s*([\w].+)$', line, flags=re.M)
heading_match = re.search( r'^#+\s*([\w].+)$', line, flags=re.M )
blank_match = re.search( r'^\s*$', line, flags=re.M )
# print(" " + line)
assert(item_match or heading_match or blank_match)
# Grab regular items in a simple list
if item_match and not in_sublist:
stripped_item = re.sub( r'^\s*([- \*]*\s*)?([\w].+)$', r"\g<2>", item_match.group(1), flags=re.M )
ing_section.append_sublist( Sublist( items = [item_match.group(1)] ) )
ret = ing_section
assert(heading_match is None)
assert(blank_match is None)
# Everything after this is in a sublist
elif heading_match:
sublist_matches = re.finditer( r'^#+\s*([\w].+)$\s*((?:[-\d\(].*\s*)+)', match.group(1), flags=re.M )
in_sublist = True
for subsection in sublist_matches:
subsection_items = list()
stripped_subsection_title = re.sub( r'^([\w].*?)(\W*)?$', r"\g<1>", subsection.group(1), flags=re.M )
for ln in re.split( r'\n', subsection.group(2), 50, flags=re.M ):
if ln != "":
stripped_item = re.sub( r'^\s*([- \*]*\s*)?([\w].+)$', r"\g<2>", ln, flags=re.M )
subsection_items.append(stripped_item)
# print(f"{stripped_subsection_title}: {subsection_items}")
ing_section.append_sublist( Sublist( items = subsection_items, title = stripped_subsection_title ) )
ret = ing_section
assert(item_match is None)
assert(blank_match is None)
break
elif blank_match:
assert(item_match is None)
assert(heading_match is None)
else:
assert(False)
return ret
def get_method( txt ):
ret = None
mtd_section = Section()
in_sublist = False
assert( mtd_section.num_items() == 0 )
# Get all the text under the methods header in group 1
match = re.search( r'(?:^## Method.*\s+)(^(?:.*\s)+)', txt, flags=re.M )
assert(match)
if match:
# Remove all the sections under the methods section
# This needs to work even if the recipe is missing each of these sections:
# Notes, refs and acks, tags
subbed_notes = re.sub( r"(^## Notes\W.*\s*^(?:.*\s)+)", r"", match.group(1), flags=re.M|re.I )
subbed_references = re.sub( r"(^## (?:(?:Ref)|(?:Ack)|(?:Adap)).*\s^(?:.*\s*)+)", r"", subbed_notes, flags=re.M|re.I )
subbed_tags = re.sub( r"(^## Tags\W.*\s*^(?:.*\s)+)", r"", subbed_references, flags=re.M|re.I )
# Remove any blank lines, and '---' separator lines
subbed_separators = re.sub( r"^-+$", r"", subbed_tags, flags=re.M )
subbed_blanks = re.sub( r"\s*$", r"", subbed_separators, flags=re.M )
method_txt = subbed_blanks
method_lines = subbed_blanks.split('\n')
# If a recipe starts a sublist, then any ungrouped regular listed items
# Must come before all the sublists. The sublists must be last in the section.
for line in method_lines:
assert(line != "")
item_match = re.search( r'^(?![#\-]+\s+.*)\d*\.* *(.*)$', line, flags=re.M)
heading_match = re.search( r'^#+\s*(\w.+)$', line, flags=re.M )
blank_match = re.search( r'^\s*$', line, flags=re.M )
separator_match = re.search( r'^-+$', line, flags=re.M )
assert(item_match or heading_match or blank_match or separator_match)
# Grab regular items in a simple list
if item_match and item_match.group(1) != "" and in_sublist == False:
mtd_section.append_sublist( Sublist( items = [item_match.group(1)] ) )
ret = mtd_section
assert(heading_match is None)
assert(blank_match is None)
# Everything after this is in a sublist
elif heading_match:
sublist_matches = re.finditer( r'^#+\s*([\w].+)$\s*((?:[-\d\(\w].*\s*)+)', method_txt, flags=re.M )
# Dont revisit the header match section - everything else is part of a sublist
in_sublist = True
for subsection in sublist_matches:
subsection_items = list()
stripped_subsection_title = re.sub( r'^[^\w#]+\d*\s*(.*)$', r"\g<1>", subsection.group(1), flags=re.M )
for ln in re.split( r'\n', subsection.group(2).strip(), 50, flags=re.M ):
if ln != "":
stripped_item = re.sub( r'(?:^[\d\-\.\s]*)(.*)$', r"\g<1>", ln, flags=re.M )
if stripped_item != "":
subsection_items.append(stripped_item)
mtd_section.append_sublist( Sublist( items = subsection_items, title = stripped_subsection_title ) )
ret = mtd_section
assert(item_match is None)
assert(blank_match is None)
assert(separator_match is None)
break
elif blank_match or separator_match:
assert(item_match is None)
assert(heading_match is None)
pass
else:
assert(False)
return mtd_section
def get_notes( txt ):
ret = None
notes_lines_blk_match = None
filled_lines = list()
final_lines = list()
if re.search( r'(?:^## Notes\s+)', txt, flags=re.M ):
notes_lines_blk_match = re.search( r'(?:^## Notes\s*)(^(?:.*\s)*?)\s*(?:##[^#])', txt, flags=re.M )
if notes_lines_blk_match:
lines = notes_lines_blk_match.group(1).splitlines()
filled_lines = [line for line in lines if line != ""]
assert( len(filled_lines) > 0 )
# Strip any bullet points, or number prefixes in the lines
final_lines = [re.sub( r"\W*\d*\.?\W*(.*)", r"\g<1>", line, flags=re.M) for line in filled_lines]
assert( len(final_lines) > 0 )
for line in final_lines:
assert( len(line) > 0 )
assert( line != "" )
ret = final_lines
| |
active dashboards': ['netcool realtime'],
'netcool service': ['netcool'],
'netcool service monitor': ['netcool service', 'service monitor'],
'netcool system': ['netcool'],
'netcool system service monitor': ['netcool system'],
'netcool/service monitor for network usage': ['netcool service monitor'],
'netcool/service monitor reporter': ['netcool service monitor'],
'netezza': ['software'],
'netezza high': ['netezza'],
'netezza high capacity': ['netezza high'],
'netezza high capacity appliance': ['netezza high'],
'netezza high capacity appliance c1000': ['netezza high'],
'netnovo': ['software'],
'netnovo application': ['netnovo'],
'network': ['root'],
'network administrator': ['specialist'],
'network architect': ['architect', 'network'],
'network configuration': ['network'],
'network configuration and change management': ['network configuration'],
'network database': ['navigational database'],
'network design': ['design artifact'],
'network device': ['device'],
'network downtime': ['downtime'],
'network drive': ['network device'],
'network equipment provider': ['company'],
'network gateway': ['network device'],
'network management': ['network', 'specialist', 'support specialist'],
'network methodology': ['methodology'],
'network model': ['navigational database'],
'network outage': ['downtime'],
'network packet': ['data artifact'],
'network performance reporting': ['network'],
'network product': ['product'],
'network protocol': ['protocol'],
'network provisioning': ['service provisioning'],
'network security': ['security'],
'network security policy': ['security policy'],
'network service': ['service'],
'network services product': ['network product'],
'network skill': ['technical skill'],
'network specialist': ['specialist'],
'network standard': ['standard'],
'network support': ['support'],
'network support specialist': [ 'network',
'specialist',
'support specialist'],
'network tool': ['software'],
'networking': ['teamwork'],
'neural language model': ['nueral networks'],
'neural nets': ['neural network'],
'neural network': ['deep learning'],
'neurobiology': ['scientific skill'],
'neurology': ['neuroscience'],
'neuropsychology': ['neuroscience', 'psychology'],
'neuroscience': ['scientific skill'],
'new employee': ['employee'],
'new hire': ['employee'],
'new manager': ['employee'],
'news media': ['media'],
'newtonian physics': ['physics'],
'nfx series network services': ['network services product'],
'nginx': [ 'load balancer',
'open source software',
'reverse proxy',
'web server'],
'nltk': ['open source nlp software', 'software'],
'no career growth': ['no change'],
'no change': ['negative situation'],
'no conversation': ['team culture', 'work culture'],
'no problem': ['positive situation'],
'no promotion': ['no change'],
'no skills growth': ['no change'],
'node red': ['technical framework'],
'nodejs': ['javascript', 'web language'],
'noisy intermediate scale quantum': ['quantum computing'],
'nokia': ['company'],
'nokia nuage sdn': ['software defined network'],
'non functional requirement': ['requirement'],
'non linear dimension reduction model': ['dimension reduction model'],
'non linear programming': ['optimization method'],
'non profit company': ['company'],
'nordea': ['company'],
'nordic': ['company'],
'norwegian wood': ['company'],
'nosql': ['database'],
'notebook interface': ['software library'],
'novartis': ['company'],
'novia scotia bank': ['financial company'],
'ns lookup': ['network tool'],
'nuage networks': ['nokia'],
'nubifer cloud portal': ['cloud computing platform'],
'nuclear physics': ['physics'],
'nueral networks': ['supervised learning'],
'number theory': ['math skill'],
'numecent': ['application virtualization'],
'numerical analysis': ['math skill'],
'numerical method': ['math skill'],
'numerical methods': ['machine learning'],
'numpy': ['python library'],
'nutch': ['apache software'],
'ny state': ['company'],
'object oriented': ['capability'],
'object pascal': ['programming language'],
'oceanography': ['geology'],
'offering': ['activity'],
'offering manager': ['manager'],
'office': ['physical location'],
'offline': ['connectivity'],
'offshore': ['team'],
'offshore team': ['team'],
'ohio state': ['company'],
'olap': ['software'],
'old': ['age'],
'oltp': ['software'],
'omegamon': ['software'],
'omegamon z/os': ['omegamon'],
'omegamon z/os management': ['omegamon z/os'],
'omegamon z/os management console': ['omegamon z/os'],
'omni channel': ['channel'],
'omnifind': ['software'],
'omnifind discovery edition': ['omnifind'],
'omnifind enterprise edition': ['omnifind'],
'omnifind yahoo! edition': ['omnifind'],
'oncology': ['medical skill'],
'one class support vector machine': ['outlier detection'],
'online': ['connectivity'],
'online training': ['training'],
'onsite': ['location'],
'onsite training': ['onsite', 'training'],
'ontology': ['knowledge graph'],
'oozie': ['apache software'],
'open cloud computing interface': ['cloud computing platform'],
'open source': ['software'],
'open source framework': ['framework'],
'open source license': ['license'],
'open source nlp software': ['open source software'],
'open source software': ['software'],
'open telekom cloud': ['cloud computing platform'],
'open_stack': ['software platform'],
'openjs nodejs application developer': ['linux certification'],
'openjs nodejs services developer': ['linux certification'],
'openpages': ['software'],
'openpages grc platform': ['openpages'],
'openshift': ['container software'],
'openshift dedicated': ['openshift'],
'openshift online': ['openshift', 'public cloud'],
'openshift origin': ['openshift'],
'openshift platform': ['infrastructure platform'],
'openshift virtualization': ['openshift'],
'openshift.io': ['openshift', 'software as a service'],
'openstack': ['software platform'],
'operating model': ['business model'],
'operating system': ['software'],
'operating system environment': ['operating system'],
'operating system environment manager for z/os': ['operating system'],
'operating system virtualization': ['virtualization'],
'operation analyst': ['analyst', 'operational role'],
'operation research': ['research skill'],
'operational': ['technical role'],
'operational analytics': ['analytics'],
'operational decision management': ['software'],
'operational decision manager for z/os': [ 'operational decision '
'management'],
'operational decision manager pattern v8.0': [ 'operational decision '
'management'],
'operational decision manager pattern v8.0 on red hat enterprise linux server': [ 'operational '
'decision '
'management'],
'operational model': ['model'],
'operational role': ['technical role'],
'operational skill': ['skill'],
'operations': ['software'],
'operations analyst': ['analyst', 'operational role'],
'operations architect': ['architect'],
'operations management': ['manager'],
'operations manager': ['manager'],
'operations manager for z/vm': ['operations'],
'operations production analyst': ['operations analyst'],
'operations research': ['research skill'],
'operations role': ['individual role'],
'opportunity': ['state'],
'opportunity owner': ['sales role'],
'optical network': ['network'],
'optics': ['density based clustering'],
'optim': ['software'],
'optim database': ['optim'],
'optim database administrator': ['optim database'],
'optim database administrator for db2': ['optim database'],
'optim database administrator for db2 for linux and unix and windows': [ 'optim '
'database'],
'optim development studio': ['optim'],
'optim high': ['optim'],
'optim high performance': ['optim high'],
'optim high performance unload': ['optim high'],
'optim high performance unload for db2': ['optim high'],
'optim high performance unload for db2 for linux and unix and windows': [ 'optim '
'high'],
'optim move for db2': ['optim'],
'optim performance': ['optim'],
'optim performance manager': ['optim performance'],
'optim performance manager for db2 for linux and unix and windows': [ 'optim '
'performance'],
'optim purequery': ['optim'],
'optim purequery runtime': ['optim purequery'],
'optim purequery runtime for linux and unix and windows': [ 'optim '
'purequery'],
'optim purequery runtime for z/os': ['optim purequery'],
'optim z/os': ['optim'],
'optimization': ['activity'],
'optimization algorithm': ['algorithm'],
'optimization method': ['machine learning'],
'optimization skill': ['technical skill'],
'oracle': ['company', 'relational database'],
'oracle access management suite plus 11g essentials': [ 'oracle '
'certification'],
'oracle advanced controls applications 2014 essentials': [ 'oracle '
'certification'],
'oracle application certification': ['oracle certification'],
'oracle application development framework 12c essentials': [ 'oracle '
'application '
'certification'],
'oracle application grid 11g essentials': [ 'oracle application '
'certification'],
'oracle application integration architecture 11g essentials': [ 'oracle '
'application '
'certification'],
'oracle architect certification': ['oracle certification'],
'oracle big data appliance': ['oracle'],
'oracle bone script': ['oracle'],
'oracle business activity monitoring': ['oracle'],
'oracle business intelligence': ['oracle', 'oracle certification'],
'oracle business intelligence (obi) foundation suite 11g essentials': [ 'oracle',
'oracle '
'certification'],
'oracle business intelligence beans': ['oracle'],
'oracle business intelligence suite enterprise edition': ['oracle'],
'oracle business process management suite 12c essentials': [ 'oracle '
'certification'],
'oracle business rules': ['oracle'],
'oracle call interface': ['oracle'],
'oracle cash management': ['oracle'],
'oracle ccb': ['oracle'],
'oracle certification': ['certification'],
'oracle clinical': ['oracle'],
'oracle cloud': ['cloud computing platform'],
'oracle cloud application foundation essentials': [ 'oracle cloud '
'certification'],
'oracle cloud certification': ['oracle certification'],
'oracle cloud fn': ['oracle cloud'],
'oracle cloud platform': ['oracle cloud'],
'oracle cluster file system': ['oracle'],
'oracle cluster file system release 2': ['oracle cluster file system'],
'oracle cluster registry': ['oracle'],
'oracle clusterware': ['oracle'],
'oracle coherence': ['oracle'],
'oracle commerce': ['oracle'],
'oracle commerce 11 guided search implementation essentials': [ 'oracle '
'commerce '
'certification'],
'oracle commerce 11 platform development implementation essentials': [ 'oracle '
'commerce '
'certification'],
'oracle commerce certification': ['oracle certification'],
'oracle communications calendar server': ['oracle'],
'oracle communications messaging server': ['oracle'],
'oracle content management': ['oracle'],
'oracle crm': ['oracle'],
'oracle crm on demand essentials': ['oracle certification'],
'oracle customer care and billing': ['oracle'],
'oracle customer hub and oracle data quality essentials': [ 'oracle '
'certification'],
'oracle data cartridge': ['oracle'],
'oracle data guard': ['oracle'],
'oracle data integrator': ['oracle'],
'oracle data integrator 12c essentials': ['oracle certification'],
'oracle data mining': ['oracle'],
'oracle database': ['oracle', 'relational database'],
'oracle database appliance': ['oracle database'],
'oracle designer': ['oracle'],
'oracle developer studio': ['oracle'],
'oracle developer suite': ['oracle', 'software'],
'oracle directory server enterprise edition': ['oracle'],
'oracle discoverer': ['oracle'],
'oracle e-business suite (ebs) r12 human capital management essentials': [ 'oracle '
'ebusiness '
'certification'],
'oracle e-business suite (ebs) r12 project essentials': [ 'oracle '
'ebusiness '
'certification'],
'oracle e-business suite r12.1 general ledger essentials': [ 'oracle '
'ebusiness '
'certification'],
'oracle e-business suite r12.1 inventory essentials': [ 'oracle '
'ebusiness '
'certification'],
'oracle e-business suite r12.1 order management essentials': [ 'oracle '
'ebusiness '
'certification'],
'oracle e-business suite r12.1 payables essentials': [ 'oracle ebusiness '
'certification'],
'oracle e-business suite r12.1 purchasing essentials': [ 'oracle '
'ebusiness '
'certification'],
'oracle e-business suite r12.1 receivables essentials': [ 'oracle '
'ebusiness | |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for V1 API."""
from collections import OrderedDict
import logging
import time
import six
from ironic_inspector_client.common import http
from ironic_inspector_client.common.i18n import _
DEFAULT_API_VERSION = (1, 0)
"""Server API version used by default."""
MAX_API_VERSION = (1, 8)
"""Maximum API version this client was designed to work with.
This does not mean that other versions won't work at all - the server might
still support them.
"""
# using huge timeout by default, as precise timeout should be set in
# ironic-inspector settings
DEFAULT_RETRY_INTERVAL = 10
"""Default interval (in seconds) between retries when waiting for introspection
to finish."""
DEFAULT_MAX_RETRIES = 3600
"""Default number of retries when waiting for introspection to finish."""
LOG = logging.getLogger(__name__)
class WaitTimeoutError(Exception):
"""Timeout while waiting for nodes to finish introspection."""
class ClientV1(http.BaseClient):
"""Client for API v1.
Create this object to use Python API, for example::
import ironic_inspector_client
client = ironic_inspector_client.ClientV1(session=keystone_session)
This code creates a client with API version *1.0* and a given Keystone
`session
<http://docs.openstack.org/developer/keystoneauth/using-sessions.html>`_.
The service URL is fetched from the service catalog in this case. Optional
arguments ``service_type``, ``interface`` and ``region_name`` can be
provided to modify how the URL is looked up.
If the catalog lookup fails, the local host with port 5050 is tried.
However, this behaviour is deprecated and should not be relied on.
Also an explicit ``inspector_url`` can be passed to bypass service catalog.
Optional ``api_version`` argument is a minimum API version that a server
must support. It can be a tuple (MAJ, MIN), string "MAJ.MIN" or integer
(only major, minimum supported minor version is assumed).
:ivar rules: Reference to the introspection rules API.
Instance of :py:class:`ironic_inspector_client.v1.RulesAPI`.
"""
def __init__(self, **kwargs):
"""Create a client.
See :py:class:`ironic_inspector_client.common.http.HttpClient` for the
list of acceptable arguments.
:param kwargs: arguments to pass to the BaseClient constructor.
api_version is set to DEFAULT_API_VERSION by default.
"""
kwargs.setdefault('api_version', DEFAULT_API_VERSION)
super(ClientV1, self).__init__(**kwargs)
self.rules = RulesAPI(self.request)
def introspect(self, uuid, new_ipmi_password=None, new_ipmi_username=None):
"""Start introspection for a node.
:param uuid: node UUID or name
:param new_ipmi_password: if set, *Ironic Inspector* will update IPMI
password to this value. DEPRECATED.
:param new_ipmi_username: if new_ipmi_password is set, this values sets
new IPMI user name. Defaults to one in
driver_info. DEPRECATED.
:raises: :py:class:`.ClientError` on error reported from a server
:raises: :py:class:`.VersionNotSupported` if requested api_version
is not supported
:raises: *requests* library exception on connection problems.
"""
if not isinstance(uuid, six.string_types):
raise TypeError(
_("Expected string for uuid argument, got %r") % uuid)
if new_ipmi_username and not new_ipmi_password:
raise ValueError(
_("Setting IPMI user name requires a new password"))
if new_ipmi_password:
LOG.warning('Setting IPMI credentials via ironic-inspector '
'is deprecated, this feature will be removed '
'in the Pike release')
params = {'new_ipmi_username': new_ipmi_username,
'new_ipmi_password': new_ipmi_password}
self.request('post', '/introspection/%s' % uuid, params=params)
def reprocess(self, uuid):
"""Reprocess stored introspection data.
:param uuid: node UUID or name.
:raises: :py:class:`.ClientError` on error reported from a server
:raises: :py:class:`.VersionNotSupported` if requested api_version
is not supported
:raises: *requests* library exception on connection problems.
:raises: TypeError if uuid is not a string.
"""
if not isinstance(uuid, six.string_types):
raise TypeError(_("Expected string for uuid argument, got"
" %r instead") % uuid)
return self.request('post',
'/introspection/%s/data/unprocessed' %
uuid)
def list_statuses(self, marker=None, limit=None):
"""List introspection statuses.
Supports pagination via the marker and limit params. The items are
sorted by the server according to the `started_at` attribute, newer
items first.
:param marker: pagination maker, UUID or None
:param limit: pagination limit, int or None
:raises: :py:class:`.ClientError` on error reported from a server
:raises: :py:class:`.VersionNotSupported` if requested api_version
is not supported
:raises: *requests* library exception on connection problems.
:return: a list of status dictionaries with the keys:
`error` an error string or None,
`finished` True/False,
`finished_at` an ISO8601 timestamp or None,
`links` with a self-link URL,
`started_at` an ISO8601 timestamp,
`uuid` the node UUID
"""
if not (marker is None or isinstance(marker, six.string_types)):
raise TypeError(_('Expected a string value of the marker, got '
'%s instead') % marker)
if not (limit is None or isinstance(limit, int)):
raise TypeError(_('Expected an integer value of the limit, got '
'%s instead') % limit)
params = {
'marker': marker,
'limit': limit,
}
response = self.request('get', '/introspection', params=params)
return response.json()['introspection']
def get_status(self, uuid):
"""Get introspection status for a node.
:param uuid: node UUID or name.
:raises: :py:class:`.ClientError` on error reported from a server
:raises: :py:class:`.VersionNotSupported` if requested api_version
is not supported
:raises: *requests* library exception on connection problems.
:return: dictionary with the keys:
`error` an error string or None,
`finished` True/False,
`finished_at` an ISO8601 timestamp or None,
`links` with a self-link URL,
`started_at` an ISO8601 timestamp,
`uuid` the node UUID
"""
if not isinstance(uuid, six.string_types):
raise TypeError(
_("Expected string for uuid argument, got %r") % uuid)
return self.request('get', '/introspection/%s' % uuid).json()
def wait_for_finish(self, uuids, retry_interval=DEFAULT_RETRY_INTERVAL,
max_retries=DEFAULT_MAX_RETRIES,
sleep_function=time.sleep):
"""Wait for introspection finishing for given nodes.
:param uuids: collection of node UUIDs or names.
:param retry_interval: sleep interval between retries.
:param max_retries: maximum number of retries.
:param sleep_function: function used for sleeping between retries.
:raises: :py:class:`.WaitTimeoutError` on timeout
:raises: :py:class:`.ClientError` on error reported from a server
:raises: :py:class:`.VersionNotSupported` if requested api_version
is not supported
:raises: *requests* library exception on connection problems.
:return: dictionary UUID -> status (the same as in get_status).
"""
result = {}
# Number of attempts = number of retries + first attempt
for attempt in range(max_retries + 1):
new_active_uuids = []
for uuid in uuids:
status = self.get_status(uuid)
if status.get('finished'):
result[uuid] = status
else:
new_active_uuids.append(uuid)
if new_active_uuids:
if attempt != max_retries:
uuids = new_active_uuids
LOG.debug('Still waiting for introspection results for '
'%(count)d nodes, attempt %(attempt)d of '
'%(total)d',
{'count': len(new_active_uuids),
'attempt': attempt + 1,
'total': max_retries + 1})
sleep_function(retry_interval)
else:
return result
raise WaitTimeoutError(_("Timeout while waiting for introspection "
"of nodes %s") % new_active_uuids)
def get_data(self, uuid, raw=False):
"""Get introspection data from the last introspection of a node.
:param uuid: node UUID or name.
:param raw: whether to return raw binary data or parsed JSON data
:returns: bytes or a dict depending on the 'raw' argument
:raises: :py:class:`.ClientError` on error reported from a server
:raises: :py:class:`.VersionNotSupported` if requested api_version
is not supported
:raises: *requests* library exception on connection problems.
:raises: TypeError if uuid is not a string
"""
if not isinstance(uuid, six.string_types):
raise TypeError(
_("Expected string for uuid argument, got %r") % uuid)
resp = self.request('get', '/introspection/%s/data' % uuid)
if raw:
return resp.content
else:
return resp.json()
def abort(self, uuid):
"""Abort running introspection for a node.
:param uuid: node UUID or name.
:raises: :py:class:`.ClientError` on error reported from a server
:raises: :py:class:`.VersionNotSupported` if requested api_version
is not supported
:raises: *requests* library exception on connection problems.
:raises: TypeError if uuid is not a string.
"""
if not isinstance(uuid, six.string_types):
raise TypeError(_("Expected string for uuid argument, got"
" %r") % uuid)
return self.request('post', '/introspection/%s/abort' % uuid)
def get_interface_data(self, node_ident, interface, field_sel):
"""Get interface data for the input node and interface
To get LLDP data, collection must be enabled by the kernel parameter
ipa-collect-lldp=1, and the inspector plugin ``basic_lldp`` must
be enabled.
:param node_ident: node UUID or name
:param interface: interface name
:param field_sel: list of all fields for which to get data
:returns interface data in OrderedDict
"""
# Use OrderedDict to maintain order of user-entered fields
iface_data = OrderedDict()
data = self.get_data(node_ident)
all_interfaces = data.get('all_interfaces', [])
# Make sure interface name is valid
if interface not in all_interfaces:
return iface_data
# If lldp data not available this will still return interface,
# mac, node_ident etc.
lldp_proc = all_interfaces[interface].get('lldp_processed', {})
for f in field_sel:
if f == 'node_ident':
iface_data[f] = node_ident
elif f == 'interface':
iface_data[f] = interface
elif f | |
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handle an argument for a parameter specified in the constructor of a class"""
from collections import defaultdict
from sys import version_info
from typing import Any
from typing import Dict
from typing import Generic
from typing import Optional
from typing import Type
from typing import TYPE_CHECKING
from typing import TypeVar
from typing import Union
if (version_info.major > 3) or (version_info.major == 3 and version_info.minor >= 8):
from typing import Literal
else:
from typing_extensions import Literal # noqa: F401 # pragma: no cover
if TYPE_CHECKING: # pragma: no cover
from adorn.orchestrator.orchestrator import Orchestrator
from adorn.data.parameter import Parameter
from adorn.data.constructor import Constructor
from adorn.exception.type_check_error import (
ExtraLiteralError,
KeyValueError,
MalformedDependencyError,
MalformedLiteralError,
MissingDependencyError,
MissingLiteralError,
TooDeepLiteralError,
TypeCheckError,
UnaryLiteralError,
)
from adorn.params import Params
from adorn.unit.complex import _T, Complex
from adorn.unit.simple import Simple
DictStrStr = TypeVar("DictStrStr", bound=Literal[dict()])
class ParameterValue(Simple):
""":class:`~adorn.unit.unit.Unit` for handling an argument for a parameter specified in a constructor
:class:`~adorn.data.parameter.Parameter` contains the type and state from the constructor,
which ``ParameterValue`` allows you to utilize when type checking or instantiating the
argument associated with the parameter
""" # noqa: B950
_registry = defaultdict(dict)
def contains(self, obj: Any, orchestrator: "Orchestrator") -> bool:
"""Check if the ``obj`` is an instance of :class:`~adorn.data.parameter.Parameter`
Args:
obj (Any): potentially an instance of
:class:`~adorn.data.parameter.Parameter`
orchestrator (Orchestrator): container of all types, typically
used to recurse nested types
Returns:
bool: if ``True``, ``obj`` is an instance of
:class:`~adorn.data.parameter.Parameter`
""" # noqa: B950
return isinstance(obj, Parameter) and (self.get(obj, orchestrator) is not None)
def get(self, key: Any, orchestrator: "Orchestrator") -> Optional["ParameterValue"]:
"""Get the relevant class to handle the given :class:`~adorn.data.parameter.Parameter` instance.
Args:
key (Any): potentially an instance of
:class:`~adorn.data.parameter.Parameter``
orchestrator (Orchestrator): container of all types, typically
used to recurse nested types
Returns:
Optional[ParameterValue]: if ``key`` is an instance of
:class:`~adorn.data.parameter.Parameter` then a subclass of
``ParameterValue`` will be returned, otherwise ``None``
""" # noqa: B950
if not isinstance(key, Parameter):
return None
# we default all Parameter's to use Identity
return self._get(key=key, orchestrator=orchestrator) or (
self._registry[ParameterValue]["identity"]
if orchestrator.contains(key.cls)
else None
)
@ParameterValue.register("identity")
class Identity(ParameterValue):
"""Ignore the additional state in the :class:`~adorn.data.parameter.Parameter`
We extract the type information from :class:`~adorn.data.parameter.Parameter`
and pass off the object back to the :class:`~adorn.orchestrator.Orchestrator`
"""
@classmethod
def _from_obj(
cls, target_cls: Parameter, orchestrator: "Orchestrator", obj: Dict[Any, Any]
) -> Any:
"""Delegate to the :class:`~adorn.orchestrator.Orchestrator` to instantiate the argument, ``obj``
Args:
target_cls (Parameter): instance of
:class:`~adorn.data.parameter.Parameter`
orchestrator (Orchestrator): container of all types, used to instantiate
the argument
obj (Any): an instance, containing the information to instantiate
an instance of :attr:`~adorn.data.parameter.Parameter.cls`
Returns:
Any: An instance of :attr:`~adorn.data.parameter.Parameter.cls`
""" # noqa: B950, RST304
return orchestrator.from_obj(target_cls.cls, obj)
@classmethod
def _type_check(
cls, target_cls: Parameter, orchestrator: "Orchestrator", obj: Dict[Any, Any]
) -> Optional[TypeCheckError]:
"""Delegate to the :class:`~adorn.orchestrator.Orchestrator` to type check the argument, ``obj``
Args:
target_cls (Parameter): instance of
:class:`~adorn.data.parameter.Parameter`
orchestrator (Orchestrator): container of all types, used to type
check the argument
obj (Any): an instance, containing the information to instantiate
an instance of :attr:`~adorn.data.parameter.Parameter.cls`
Returns:
Optional[TypeCheckError]: If ``TypeCheckError``, there is an
issue that would prevent ``obj`` from being converted to an
instance of :attr:`~adorn.data.parameter.Parameter.cls`,
otherwise ``None``
""" # noqa: B950, RST304
if (target_cls.cls == Any) or (
getattr(target_cls.cls, "__origin__", None) is None
and isinstance(obj, target_cls.cls)
):
# object is already instantiated, so we assume it is ok
return None
return orchestrator.type_check(target_cls.cls, obj)
class Dependent(ParameterValue):
"""``Dependent`` allows for a parameter to leverage information from other parameters.""" # noqa: B950
@staticmethod
def get_constructor(target_cls: Parameter, obj: Dict[Any, Any]) -> Constructor:
"""Generate :class:`~adorn.data.constructor.Constructor` for relevant sub-type
Args:
target_cls (Parameter): container of the parent class
obj (Dict[Any, Any]): arguments to instantiate
:attr:`~adorn.data.parameter.Parameter.cls`
Returns:
Constructor: The constructor for the sub class specified
in ``obj``
""" # noqa: RST304
subcls = target_cls.args[0].resolve_class_name(obj["type"])
return Constructor(subcls)
@staticmethod
def get_args(
target_cls: Parameter,
literal_dict: Dict[str, str],
dependent_from_obj: bool = False,
max_depth: Optional[int] = None,
) -> Dict[str, Any]:
"""Map the key in literal_dict to its requested dependent state
Args:
target_cls (Parameter): container of the local state
literal_dict (Dict[str, str]): the request for dependent state
dependent_from_obj (bool): if ``True``, the request is grabbing
instantiated values, otherwise configured values
max_depth (Optional[int]): the level at which local state will
be inspected
Returns:
Dict[str, Any]: relevant local state needed by the parameter
"""
# TODO: currently default args are not supported, this could
# be accomplished by using the constructor of the object that
# contains the needed state
acc = dict()
for k, v in literal_dict.items():
local_state = target_cls.local_state
keys = v.split(".")
key_len = len(keys)
upper_bound = (
max_depth if max_depth is not None and dependent_from_obj else key_len
)
not_bad = True
idx = 0
while not_bad and idx < upper_bound:
key = keys[idx]
if idx == 0 or not dependent_from_obj:
# initial key or a DependentTypeCheck._type_check
if key not in local_state:
not_bad = False
else:
local_state = local_state[key]
else:
if hasattr(local_state, key):
local_state = getattr(local_state, key)
else:
not_bad = False
idx += 1
if not_bad:
acc[k] = local_state
return acc
@classmethod
def check_args(
cls, target_cls: Parameter, orchestrator: "Orchestrator"
) -> Optional[TypeCheckError]:
"""Ensure type level args are correct
Exceptions:
- :class:`~adorn.exception.type_check_error.MalformedDependencyError`:
the ``Dependent`` type didn't specify the appropriate number of
arguments
- :class:`~adorn.exception.type_check_error.MissingLiteralError`:
the first argument, zero based counting, wasn't a ``typing.Literal``
- :class:`~adorn.exception.type_check_error.UnaryLiteralError`:
``typing.Literal`` specifies more than one argument
- :class:`~adorn.exception.type_check_error.MalformedLiteralError`:
``typing.Literal`` wrapped a value that wasn't of type
``Dict[str, str]``
Args:
target_cls (Parameter): parameter that is dependent on
state in the constructor
orchestrator (Orchestrator): container of types, used
to check the argument specified in the ``typing.Literal``
Returns:
Optional[TypeCheckError]: ``TypeCheckError`` specifying an
error with the type level dependent argument, otherwise
``None``
"""
if target_cls.args is None or len(target_cls.args) != 2:
# need to specify a type and a mapping
return MalformedDependencyError(target_cls=target_cls)
potential_literal = target_cls.args[1]
literal_origin = getattr(potential_literal, "__origin__", None)
if literal_origin != Literal:
# first arg must be wrapped in a literal
return MissingLiteralError(target_cls=target_cls)
literal_arg = getattr(potential_literal, "__args__", None)
if literal_arg is None or len(literal_arg) != 1:
# may only provide a single literal arg
return UnaryLiteralError(target_cls=target_cls)
literal_arg = literal_arg[0]
literal_arg_check = orchestrator.type_check(Dict[str, str], literal_arg)
if literal_arg_check is not None:
# arg to a literal must be Dict[str, str]
return MalformedLiteralError(
target_cls=target_cls,
literal_type=Dict[str, str],
child=literal_arg_check,
)
@classmethod
def check_literal_dict(
cls,
target_cls: Parameter,
obj: Dict[Any, Any],
dependent_from_obj: bool = False,
) -> Optional[TypeCheckError]:
"""Ensure literal dict is logical given the class and local state
Exceptions:
- :class:`~adorn.exception.type_check_error.ExtraLiteralError`:
``typing.Literal`` requested state that weren't part of the
constructor of the parameter
- :class:`~adorn.exception.type_check_error.TooDeepLiteralError`:
``typing.Literal`` requested state that was more than one layer deep.
Args:
target_cls (Parameter): container of the local state and dependency request
obj (Dict[Any, Any]): arguments for the parameter
dependent_from_obj (bool): if ``True``, the request is grabbing
instantiated values, otherwise configured values
Returns:
Optional[TypeCheckError]: ``TypeCheckError`` if the request for state
didn't work with the state that existed, otherwise ``None``
"""
constructor = cls.get_constructor(target_cls=target_cls, obj=obj)
literal_dict = target_cls.args[1].__args__[0]
# check key is actually a parameter to the given class's constructor
additional_keys = [
i for i in literal_dict.keys() if i not in constructor.parameters
]
if additional_keys:
return ExtraLiteralError(target_cls, additional_keys)
# check values, max, go one layer deep
split_values = [(i.split("."), i) for i in literal_dict.values()]
too_deep = [j for i, j in split_values if len(i) > 2]
if too_deep:
return TooDeepLiteralError(target_cls, too_deep)
# check that value exists in the local state
args = cls.get_args(
target_cls=target_cls,
literal_dict=literal_dict,
dependent_from_obj=dependent_from_obj,
max_depth=1 if dependent_from_obj else None,
)
literal_dict_key_check = cls.check_literal_dict_keys(
target_cls=target_cls, literal_dict=literal_dict, args=args
)
if literal_dict_key_check is not None:
return literal_dict_key_check
@classmethod
def check_literal_dict_keys(
cls, target_cls: Parameter, literal_dict: Dict[str, str], args: Dict[str, Any]
) -> Optional[TypeCheckError]:
"""Ensure all requested args exist in local state
Exceptions:
| |
_snap.TChA_SearchChBack(self, *args)
def SearchStr(self, *args):
"""
SearchStr(TChA self, TChA Str, int const & BChN=0) -> int
Parameters:
Str: TChA const &
BChN: int const &
SearchStr(TChA self, TChA Str) -> int
Parameters:
Str: TChA const &
SearchStr(TChA self, TStr Str, int const & BChN=0) -> int
Parameters:
Str: TStr const &
BChN: int const &
SearchStr(TChA self, TStr Str) -> int
Parameters:
Str: TStr const &
SearchStr(TChA self, char const * CStr, int const & BChN=0) -> int
Parameters:
CStr: char const *
BChN: int const &
SearchStr(TChA self, char const * CStr) -> int
Parameters:
CStr: char const *
"""
return _snap.TChA_SearchStr(self, *args)
def IsStrIn(self, *args):
"""
IsStrIn(TChA self, TStr Str) -> bool
Parameters:
Str: TStr const &
"""
return _snap.TChA_IsStrIn(self, *args)
def IsPrefix(self, *args):
"""
IsPrefix(TChA self, char const * CStr, int const & BChN=0) -> bool
Parameters:
CStr: char const *
BChN: int const &
IsPrefix(TChA self, char const * CStr) -> bool
Parameters:
CStr: char const *
IsPrefix(TChA self, TStr Str) -> bool
Parameters:
Str: TStr const &
IsPrefix(TChA self, TChA Str) -> bool
Parameters:
Str: TChA const &
"""
return _snap.TChA_IsPrefix(self, *args)
def IsSuffix(self, *args):
"""
IsSuffix(TChA self, char const * CStr) -> bool
Parameters:
CStr: char const *
IsSuffix(TChA self, TStr Str) -> bool
Parameters:
Str: TStr const &
IsSuffix(TChA self, TChA Str) -> bool
Parameters:
Str: TChA const &
"""
return _snap.TChA_IsSuffix(self, *args)
def IsChIn(self, *args):
"""
IsChIn(TChA self, char const & Ch) -> bool
Parameters:
Ch: char const &
"""
return _snap.TChA_IsChIn(self, *args)
def ChangeCh(self, *args):
"""
ChangeCh(TChA self, char const & SrcCh, char const & DstCh)
Parameters:
SrcCh: char const &
DstCh: char const &
"""
return _snap.TChA_ChangeCh(self, *args)
def ToUc(self):
"""
ToUc(TChA self) -> TChA
Parameters:
self: TChA *
"""
return _snap.TChA_ToUc(self)
def ToLc(self):
"""
ToLc(TChA self) -> TChA
Parameters:
self: TChA *
"""
return _snap.TChA_ToLc(self)
def ToTrunc(self):
"""
ToTrunc(TChA self) -> TChA
Parameters:
self: TChA *
"""
return _snap.TChA_ToTrunc(self)
def CompressWs(self):
"""
CompressWs(TChA self)
Parameters:
self: TChA *
"""
return _snap.TChA_CompressWs(self)
def Swap(self, *args):
"""
Swap(TChA self, int const & ChN1, int const & ChN2)
Parameters:
ChN1: int const &
ChN2: int const &
Swap(TChA self, TChA ChA)
Parameters:
ChA: TChA &
"""
return _snap.TChA_Swap(self, *args)
def GetPrimHashCd(self):
"""
GetPrimHashCd(TChA self) -> int
Parameters:
self: TChA const *
"""
return _snap.TChA_GetPrimHashCd(self)
def GetSecHashCd(self):
"""
GetSecHashCd(TChA self) -> int
Parameters:
self: TChA const *
"""
return _snap.TChA_GetSecHashCd(self)
def LoadTxt(*args):
"""
LoadTxt(PSIn const & SIn, TChA ChA)
Parameters:
SIn: PSIn const &
ChA: TChA &
"""
return _snap.TChA_LoadTxt(*args)
LoadTxt = staticmethod(LoadTxt)
def SaveTxt(self, *args):
"""
SaveTxt(TChA self, PSOut const & SOut)
Parameters:
SOut: PSOut const &
"""
return _snap.TChA_SaveTxt(self, *args)
TChA.Load = new_instancemethod(_snap.TChA_Load,None,TChA)
TChA.Save = new_instancemethod(_snap.TChA_Save,None,TChA)
TChA.SaveXml = new_instancemethod(_snap.TChA_SaveXml,None,TChA)
TChA.__eq__ = new_instancemethod(_snap.TChA___eq__,None,TChA)
TChA.__ne__ = new_instancemethod(_snap.TChA___ne__,None,TChA)
TChA.__lt__ = new_instancemethod(_snap.TChA___lt__,None,TChA)
TChA.__iadd__ = new_instancemethod(_snap.TChA___iadd__,None,TChA)
TChA.GetMemUsed = new_instancemethod(_snap.TChA_GetMemUsed,None,TChA)
TChA.__call__ = new_instancemethod(_snap.TChA___call__,None,TChA)
TChA.CStr = new_instancemethod(_snap.TChA_CStr,None,TChA)
TChA.Clr = new_instancemethod(_snap.TChA_Clr,None,TChA)
TChA.Len = new_instancemethod(_snap.TChA_Len,None,TChA)
TChA.Empty = new_instancemethod(_snap.TChA_Empty,None,TChA)
TChA.Ins = new_instancemethod(_snap.TChA_Ins,None,TChA)
TChA.Del = new_instancemethod(_snap.TChA_Del,None,TChA)
TChA.DelLastCh = new_instancemethod(_snap.TChA_DelLastCh,None,TChA)
TChA.Push = new_instancemethod(_snap.TChA_Push,None,TChA)
TChA.Pop = new_instancemethod(_snap.TChA_Pop,None,TChA)
TChA.Trunc = new_instancemethod(_snap.TChA_Trunc,None,TChA)
TChA.Reverse = new_instancemethod(_snap.TChA_Reverse,None,TChA)
TChA.AddCh = new_instancemethod(_snap.TChA_AddCh,None,TChA)
TChA.AddChTo = new_instancemethod(_snap.TChA_AddChTo,None,TChA)
TChA.AddBf = new_instancemethod(_snap.TChA_AddBf,None,TChA)
TChA.PutCh = new_instancemethod(_snap.TChA_PutCh,None,TChA)
TChA.GetCh = new_instancemethod(_snap.TChA_GetCh,None,TChA)
TChA.LastCh = new_instancemethod(_snap.TChA_LastCh,None,TChA)
TChA.LastLastCh = new_instancemethod(_snap.TChA_LastLastCh,None,TChA)
TChA.GetSubStr = new_instancemethod(_snap.TChA_GetSubStr,None,TChA)
TChA.CountCh = new_instancemethod(_snap.TChA_CountCh,None,TChA)
TChA.SearchCh = new_instancemethod(_snap.TChA_SearchCh,None,TChA)
TChA.SearchChBack = new_instancemethod(_snap.TChA_SearchChBack,None,TChA)
TChA.SearchStr = new_instancemethod(_snap.TChA_SearchStr,None,TChA)
TChA.IsStrIn = new_instancemethod(_snap.TChA_IsStrIn,None,TChA)
TChA.IsPrefix = new_instancemethod(_snap.TChA_IsPrefix,None,TChA)
TChA.IsSuffix = new_instancemethod(_snap.TChA_IsSuffix,None,TChA)
TChA.IsChIn = new_instancemethod(_snap.TChA_IsChIn,None,TChA)
TChA.ChangeCh = new_instancemethod(_snap.TChA_ChangeCh,None,TChA)
TChA.ToUc = new_instancemethod(_snap.TChA_ToUc,None,TChA)
TChA.ToLc = new_instancemethod(_snap.TChA_ToLc,None,TChA)
TChA.ToTrunc = new_instancemethod(_snap.TChA_ToTrunc,None,TChA)
TChA.CompressWs = new_instancemethod(_snap.TChA_CompressWs,None,TChA)
TChA.Swap = new_instancemethod(_snap.TChA_Swap,None,TChA)
TChA.GetPrimHashCd = new_instancemethod(_snap.TChA_GetPrimHashCd,None,TChA)
TChA.GetSecHashCd = new_instancemethod(_snap.TChA_GetSecHashCd,None,TChA)
TChA.SaveTxt = new_instancemethod(_snap.TChA_SaveTxt,None,TChA)
TChA_swigregister = _snap.TChA_swigregister
TChA_swigregister(TChA)
def TChA_LoadTxt(*args):
"""
TChA_LoadTxt(PSIn const & SIn, TChA ChA)
Parameters:
SIn: PSIn const &
ChA: TChA &
"""
return _snap.TChA_LoadTxt(*args)
class TChAIn(TSIn):
"""Proxy of C++ TChAIn class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(TChAIn self, TChA ChA, int const & _BfC=0) -> TChAIn
Parameters:
ChA: TChA const &
_BfC: int const &
__init__(TChAIn self, TChA ChA) -> TChAIn
Parameters:
ChA: TChA const &
"""
_snap.TChAIn_swiginit(self,_snap.new_TChAIn(*args))
def New(*args):
"""
New(TChA ChA) -> PSIn
Parameters:
ChA: TChA const &
"""
return _snap.TChAIn_New(*args)
New = staticmethod(New)
__swig_destroy__ = _snap.delete_TChAIn
TChAIn_swigregister = _snap.TChAIn_swigregister
TChAIn_swigregister(TChAIn)
def TChAIn_New(*args):
"""
TChAIn_New(TChA ChA) -> PSIn
Parameters:
ChA: TChA const &
"""
return _snap.TChAIn_New(*args)
class TRStr(object):
"""Proxy of C++ TRStr class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
Bf = _swig_property(_snap.TRStr_Bf_get, _snap.TRStr_Bf_set)
Refs = _swig_property(_snap.TRStr_Refs_get, _snap.TRStr_Refs_set)
__swig_destroy__ = _snap.delete_TRStr
def __init__(self, *args):
"""
__init__(TRStr self) -> TRStr
__init__(TRStr self, int const & Len) -> TRStr
Parameters:
Len: int const &
__init__(TRStr self, char const * CStr) -> TRStr
Parameters:
CStr: char const *
__init__(TRStr self, char const * CStr, int const & MxLen) -> TRStr
Parameters:
CStr: char const *
MxLen: int const &
__init__(TRStr self, char const * CStr1, char const * CStr2) -> TRStr
Parameters:
CStr1: char const *
CStr2: char const *
__init__(TRStr self, char const & Ch) -> TRStr
Parameters:
Ch: char const &
__init__(TRStr self, char const & Ch1, char const & Ch2) -> TRStr
Parameters:
Ch1: char const &
Ch2: char const &
__init__(TRStr self, TSIn SIn, bool const & IsSmall) -> TRStr
Parameters:
SIn: TSIn &
IsSmall: bool const &
"""
_snap.TRStr_swiginit(self,_snap.new_TRStr(*args))
def Save(self, *args):
"""
Save(TRStr self, TSOut SOut, bool const & IsSmall)
Parameters:
SOut: TSOut &
IsSmall: bool const &
"""
return _snap.TRStr_Save(self, *args)
def GetMemUsed(self):
"""
GetMemUsed(TRStr self) -> int
Parameters:
self: TRStr const *
"""
return _snap.TRStr_GetMemUsed(self)
def MkRef(self):
"""
MkRef(TRStr self)
Parameters:
self: TRStr *
"""
return _snap.TRStr_MkRef(self)
def UnRef(self):
"""
UnRef(TRStr self)
Parameters:
self: TRStr *
"""
return _snap.TRStr_UnRef(self)
def CStr(self, *args):
"""
CStr(TRStr self) -> char const
CStr(TRStr self) -> char *
Parameters:
self: TRStr *
"""
return _snap.TRStr_CStr(self, *args)
def Empty(self):
"""
Empty(TRStr self) -> bool
Parameters:
self: TRStr const *
"""
return _snap.TRStr_Empty(self)
def Len(self):
"""
Len(TRStr self) -> int
Parameters:
self: TRStr const *
"""
return _snap.TRStr_Len(self)
def PutCh(self, *args):
"""
PutCh(TRStr self, int const & ChN, char const & Ch)
Parameters:
ChN: int const &
Ch: char const &
"""
return _snap.TRStr_PutCh(self, *args)
def GetCh(self, *args):
"""
GetCh(TRStr self, int const & ChN) -> char
Parameters:
ChN: int const &
"""
return _snap.TRStr_GetCh(self, *args)
def IsUc(self):
"""
IsUc(TRStr self) -> bool
Parameters:
self: TRStr const *
"""
return _snap.TRStr_IsUc(self)
def ToUc(self):
"""
ToUc(TRStr self)
Parameters:
self: TRStr *
"""
return _snap.TRStr_ToUc(self)
def IsLc(self):
"""
IsLc(TRStr self) -> bool
Parameters:
self: TRStr const *
"""
return _snap.TRStr_IsLc(self)
def ToLc(self):
"""
ToLc(TRStr self)
Parameters:
self: TRStr *
"""
return _snap.TRStr_ToLc(self)
def ToCap(self):
"""
ToCap(TRStr self)
Parameters:
self: TRStr *
"""
return _snap.TRStr_ToCap(self)
def ConvUsFromYuAscii(self):
"""
ConvUsFromYuAscii(TRStr self)
Parameters:
self: TRStr *
"""
return _snap.TRStr_ConvUsFromYuAscii(self)
def CmpI(*args):
"""
CmpI(char const * CStr1, char const * CStr2) -> int
Parameters:
CStr1: char const *
CStr2: char const *
"""
return _snap.TRStr_CmpI(*args)
CmpI = staticmethod(CmpI)
def GetPrimHashCd(self):
"""
GetPrimHashCd(TRStr self) -> int
Parameters:
self: TRStr const *
"""
return _snap.TRStr_GetPrimHashCd(self)
def GetSecHashCd(self):
"""
GetSecHashCd(TRStr self) -> int
Parameters:
self: TRStr const *
"""
return _snap.TRStr_GetSecHashCd(self)
def GetNullRStr():
"""GetNullRStr() -> TRStr"""
return _snap.TRStr_GetNullRStr()
GetNullRStr = staticmethod(GetNullRStr)
TRStr.Save = new_instancemethod(_snap.TRStr_Save,None,TRStr)
TRStr.GetMemUsed = new_instancemethod(_snap.TRStr_GetMemUsed,None,TRStr)
TRStr.MkRef = new_instancemethod(_snap.TRStr_MkRef,None,TRStr)
TRStr.UnRef = new_instancemethod(_snap.TRStr_UnRef,None,TRStr)
TRStr.CStr = new_instancemethod(_snap.TRStr_CStr,None,TRStr)
TRStr.Empty = new_instancemethod(_snap.TRStr_Empty,None,TRStr)
TRStr.Len = new_instancemethod(_snap.TRStr_Len,None,TRStr)
TRStr.PutCh = new_instancemethod(_snap.TRStr_PutCh,None,TRStr)
TRStr.GetCh = new_instancemethod(_snap.TRStr_GetCh,None,TRStr)
TRStr.IsUc = new_instancemethod(_snap.TRStr_IsUc,None,TRStr)
TRStr.ToUc = new_instancemethod(_snap.TRStr_ToUc,None,TRStr)
TRStr.IsLc = new_instancemethod(_snap.TRStr_IsLc,None,TRStr)
TRStr.ToLc = new_instancemethod(_snap.TRStr_ToLc,None,TRStr)
TRStr.ToCap = new_instancemethod(_snap.TRStr_ToCap,None,TRStr)
TRStr.ConvUsFromYuAscii = new_instancemethod(_snap.TRStr_ConvUsFromYuAscii,None,TRStr)
TRStr.GetPrimHashCd = new_instancemethod(_snap.TRStr_GetPrimHashCd,None,TRStr)
TRStr.GetSecHashCd = new_instancemethod(_snap.TRStr_GetSecHashCd,None,TRStr)
TRStr_swigregister = _snap.TRStr_swigregister
TRStr_swigregister(TRStr)
def TRStr_CmpI(*args):
"""
TRStr_CmpI(char const * CStr1, char const * CStr2) -> int
Parameters:
CStr1: char const *
CStr2: char const *
"""
return _snap.TRStr_CmpI(*args)
def TRStr_GetNullRStr():
"""TRStr_GetNullRStr() -> TRStr"""
return _snap.TRStr_GetNullRStr()
class TStr(object):
"""Proxy of C++ TStr class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
__swig_destroy__ = _snap.delete_TStr
def __init__(self, *args):
"""
__init__(TStr self) -> TStr
__init__(TStr self, TStr Str) -> TStr
Parameters:
Str: TStr const &
__init__(TStr self, TChA ChA) -> TStr
Parameters:
ChA: TChA const &
__init__(TStr self, TSStr SStr) -> TStr
Parameters:
SStr: TSStr const &
__init__(TStr self, char const * CStr) -> TStr
Parameters:
CStr: char const *
__init__(TStr self, char const & Ch) -> TStr
Parameters:
Ch: char const &
__init__(TStr self, TMem Mem) -> | |
point
set_default_creator(new_dict, sub_name, sub_email)
new_dict['extras'] = reduced_extras
except KeyError as ex:
log.debug('''Expected key ['%s'] not found, attempting to move common core keys to subdictionary''',
ex.message)
#this can happen when a form fails validation, as all the data will now be as key,value pairs, not under extras,
#so we'll move them to the expected point again to fill in the values
# e.g.
# { 'foo':'bar','publisher':'somename'} becomes {'foo':'bar', 'custom_meta':{'publisher':'somename'}}
keys_to_remove = []
log.debug('common core metadata: {0}'.format(common_metadata))
for key, value in new_dict.iteritems():
#TODO remove debug
log.debug('checking key: {0}'.format(key))
if key in common_metadata:
#TODO remove debug
log.debug('adding key: {0}'.format(key))
new_dict['custom_meta'][key] = value
keys_to_remove.append(key)
# grab the submitter name and email to set default first creator name and email
if key == 'sub_name':
sub_name = value
if key == 'sub_email':
sub_email = value
else:
# check if the key matches any of the repeatable metadata element
if key in repeatable_elements:
for repeat_item in value:
new_dict['custom_meta'][key].append(repeat_item)
for key in keys_to_remove:
del new_dict[key]
# add the default creator if no creator exists at this point
set_default_creator(new_dict, sub_name, sub_email)
# remove any repeatable elements marked as deleted from the dict
for element in repeatable_elements:
valid_repeatables = [c for c in new_dict['custom_meta'][element] if c['delete'] != '1']
new_dict['custom_meta'][element] = valid_repeatables
return new_dict
@classmethod
def _load_repeatable_elements_to_dict(cls, dict_to_load_to, repeatable_element_keys, extra_data):
for element_key in repeatable_element_keys:
data_key_parts = element_key.split(':')
element_dataset_index = int(data_key_parts[1])
if element_dataset_index == len(dict_to_load_to['custom_meta'][data_key_parts[0]]):
element = {data_key_parts[2]: cls._get_extra_value(extra_data, element_key)}
dict_to_load_to['custom_meta'][data_key_parts[0]].append(element)
else:
dict_to_load_to['custom_meta'][data_key_parts[0]][element_dataset_index][data_key_parts[2]] = \
cls._get_extra_value(extra_data, element_key)
return dict_to_load_to
@classmethod
def _get_sorted_repeatable_element_keys(cls, extra_data, element_name):
def get_key(item):
# if the item is 'creators:0:name'
# after the split we will have parts = ['creators', 0, 'name']
parts = item.split(":")
return int(parts[1])
element_key_list = []
for extra in extra_data:
data_key_parts = extra['key'].split(':')
if data_key_parts[0] == element_name and len(data_key_parts) == 3:
element_key_list.append(extra['key'])
return sorted(element_key_list, key=get_key)
@classmethod
def _get_extra_value(cls, extra_dict, key):
for extra in extra_dict:
if extra['key'] == key:
return extra['value']
return None
def __create_vocabulary(cls, name, *values):
'''Create vocab and tags, if they don't exist already.
name: the name or unique id of the vocabulary e.g. 'flower_colors'
values: the values that the vocabulary can take on e.g. ('blue', 'orange', 'purple', 'white', 'yellow)
'''
user = p.toolkit.get_action('get_site_user')({'ignore_auth': True}, {})
context = {'user': user['name']}
log.debug("Creating vocab '{0}'".format(name))
data = {'name': name}
vocab = p.toolkit.get_action('vocabulary_create')(context, data)
log.debug('Vocab created: {0}'.format(vocab))
for tag in values:
log.debug(
"Adding tag {0} to vocab {1}'".format(tag, name))
data = {'name': tag, 'vocabulary_id': vocab['id']}
p.toolkit.get_action('tag_create')(context, data)
return vocab
@classmethod
def __update_vocabulary(cls, name, *values):
user = p.toolkit.get_action('get_site_user')({'ignore_auth': True}, {})
context = {'user': user['name']}
log.debug("Updating vocab '{0}'".format(name))
data = {'id': name}
vocab = p.toolkit.get_action('vocabulary_show')(context, data)
data = {'name': name, 'id': vocab['id']}
vocab = p.toolkit.get_action('vocabulary_update')(context, data)
log.debug('Vocab updated: {0}'.format(vocab))
for tag in values:
log.debug(
"Adding tag {0} to vocab {1}'".format(tag, name))
data = {'name': tag, 'vocabulary_id': vocab['id']}
p.toolkit.get_action('tag_create')(context, data)
return vocab
@classmethod
def __add_tag_to_vocabulary(cls, name, *values):
user = p.toolkit.get_action('get_site_user')({'ignore_auth': True}, {})
context = {'user': user['name']}
log.debug("Updating vocab '{0}'".format(name))
data = {'id': name}
vocab = p.toolkit.get_action('vocabulary_show')(context, data)
log.debug('Vocab updated: {0}'.format(vocab))
for tag in values:
log.debug(
"Adding tag {0} to vocab {1}'".format(tag, name))
data = {'name': tag, 'vocabulary_id': vocab['id']}
p.toolkit.get_action('tag_create')(context, data)
data = {'id': name}
vocab = p.toolkit.get_action('vocabulary_show')(context, data)
return vocab
# template helper function
@classmethod
def get_research_focus(cls):
''' log.debug('get_research_focus() called')
Jinja2 template helper function, gets the vocabulary for research focus
'''
# NOTE: any time you want to include new tag for the vocabulary term 'research_focus' add the tag name
# to the following list (research_focus_tags). Nothing else need to be changed
research_focus_tags = [u'RFA1', u'RFA2', u'RFA3', u'other', u'CI', u'EOD']
vocab_name = 'research_focus'
research_focus = cls.__get_tags(vocab_name, research_focus_tags)
return research_focus
# template helper function
@classmethod
def get_update_frequency(cls):
'''
log.debug('get_update_frequency() called')
Jinja2 template helper function, gets the vocabulary for update_frequency
'''
# NOTE: any time you want to include new tag for the vocabulary term 'update_frequency' add the tag name
# to the following list. Nothing else need to be changed
update_frequency_tags = ['none', 'real time', 'hourly', 'daily', 'weekly', 'monthly', 'yearly', 'other']
vocab_name = 'update_frequency'
update_frequency = cls.__get_tags(vocab_name, update_frequency_tags)
return update_frequency
# template helper function
@classmethod
def get_study_area(cls):
''' log.debug('get_study_area() called')
Jinja2 template helper function, gets the vocabulary for access levels
'''
# NOTE: any time you want to include new tag for the vocabulary term 'study_area' add the tag name
# to the following list (study_area_tags). Nothing else need to be changed
study_area_tags = [u'other', u'WRMA-Wasatch Range Metropolitan Area', u'Logan River Watershed',
u'Red Butte Creek Watershed', u'Provo River Watershed', u'Multiple Watersheds']
vocab_name = 'study_area'
study_area = cls.__get_tags(vocab_name, study_area_tags)
return study_area
# template helper function
@classmethod
def get_types(cls):
''' log.debug('type() called')
Jinja2 template helper function, gets the vocabulary for type
'''
# NOTE: any time you want to include new tag for the vocabulary term 'type' add the tag name
# to the following list (type_tags). Nothing else need to be changed
type_tags = ['collection', 'dataset', 'image', 'interactive resource', 'model', 'service', 'software', 'text']
vocab_name = 'type'
types = cls.__get_tags(vocab_name, type_tags)
return types
# template helper function
@classmethod
def get_status(cls):
''' log.debug('get_study_area() called')
Jinja2 template helper function, gets the vocabulary for status
'''
# NOTE: any time you want to include new tag for the vocabulary term 'status' add the tag name
# to the following list (status_tags). Nothing else need to be changed
status_tags = [u'complete', u'ongoing', u'planned', u'unknown']
vocab_name = 'status'
status = cls.__get_tags(vocab_name, status_tags)
return status
@classmethod
def __get_tags(cls, vocab_name, tags):
user = p.toolkit.get_action('get_site_user')({'ignore_auth': True}, {})
context = {'user': user['name']}
try:
data = {'id': vocab_name} # we can use the id or name for id param
vocab = p.toolkit.get_action('vocabulary_show')(context, data)
existing_tags = [tag['display_name'] for tag in vocab['tags']]
# check if we need to create additional tags for this vocabulary term
tags_to_add = [tag_name for tag_name in tags if tag_name not in existing_tags]
if len(tags_to_add) > 0:
vocab = cls.__add_tag_to_vocabulary(vocab_name, *tags_to_add)
except:
log.debug("vocabulary_show failed, meaning the vocabulary for %s doesn't exist", vocab_name)
vocab = cls.__create_vocabulary(vocab_name, *tags)
new_tags = [x['display_name'] for x in vocab['tags']]
log.debug("vocab tags: %s" % new_tags)
return new_tags
#See ckan.plugins.interfaces.IDatasetForm
def is_fallback(self):
# Return False so that we use the CKAN's default for
# /dataset/new and /dataset/edit
return False
#See ckan.plugins.interfaces.IDatasetForm
def package_types(self):
# This plugin doesn't handle any special package types, it just
# registers itself as the default (above).
return ['dataset']
def package_form(self):
return super(MetadataPlugin, self).package_form()
#See ckan.plugins.interfaces.IDatasetForm
def update_config(self, config):
# Instruct CKAN to look in the ```templates``` directory for customized templates and snippets
p.toolkit.add_template_directory(config, 'templates')
# add the extension's public dir path so that
# ckan can find any resources used from this path
# get the current dir path (here) for this plugin
here = os.path.dirname(__file__)
rootdir = os.path.dirname(os.path.dirname(here))
our_public_dir = os.path.join(rootdir, 'ckanext', 'Metadata', 'public')
config['extra_public_paths'] = ','.join([our_public_dir, config.get('extra_public_paths', '')])
#See ckan.plugins.interfaces.IDatasetForm
def _modify_package_schema(self, schema):
#log.debug("_modify_package_schema called")
not_empty = p.toolkit.get_validator('not_empty')
tag_string_convert = p.toolkit.get_validator('tag_string_convert')
for update in schema_updates_for_create:
schema.update(update)
# update the ckan's tag_string element making it required - which would force the user to enter
# at least on keyword (tag item)
schema.update({'tag_string': [not_empty, tag_string_convert]})
schema['resources']['name'][0] = not_empty
schema.update({'creators': creator_schema()}) # needed for repeatable elements
schema.update({'contributors': contributor_schema()}) # needed for repeatable elements
schema.update({'variables': variable_schema()}) # needed for repeatable elements
return schema
#See ckan.plugins.interfaces.IDatasetForm
def create_package_schema(self):
log.debug('create_package_schema')
schema = super(MetadataPlugin, self).create_package_schema()
schema = self._modify_package_schema(schema)
return schema
#See ckan.plugins.interfaces.IDatasetForm
def update_package_schema(self):
#log.debug('update_package_schema')
schema = super(MetadataPlugin, self).update_package_schema()
schema = self._modify_package_schema(schema)
return schema
#See ckan.plugins.interfaces.IDatasetForm
def show_package_schema(self):
schema = super(MetadataPlugin, self).show_package_schema()
ignore_missing = p.toolkit.get_validator('ignore_missing')
# Don't show vocab tags mixed in with normal 'free' tags
# (e.g. on dataset pages, or on the search page)
schema['tags']['__extras'].append(p.toolkit.get_converter('free_tags_only'))
for update in schema_updates_for_show:
schema.update(update)
schema.update({'creators': [convert_from_extras, ignore_missing]}) # needed for repeatable elements
schema.update({'contributors': [convert_from_extras, ignore_missing]}) # needed for repeatable elements
schema.update({'variables': [convert_from_extras, ignore_missing]}) # needed for repeatable elements
return schema
#Method below allows functions and other methods to be called from the Jinja template using the h variable
def get_helpers(self):
return {'get_research_focus': self.get_research_focus,
'required_metadata': required_metadata,
'load_data_into_dict': self.load_data_into_dict,
'check_if_dataset_using_older_schema': check_if_dataset_using_older_schema,
'study_area': | |
}
return result
@ignore_warnings(category=ConvergenceWarning)
def find_seasonality_changepoints(
self,
df,
time_col,
value_col,
seasonality_components_df=pd.DataFrame({
"name": ["tod", "tow", "conti_year"],
"period": [24.0, 7.0, 1.0],
"order": [3, 3, 5],
"seas_names": ["daily", "weekly", "yearly"]}),
resample_freq="H",
regularization_strength=0.6,
actual_changepoint_min_distance="30D",
potential_changepoint_distance=None,
potential_changepoint_n=50,
no_changepoint_distance_from_end=None,
no_changepoint_proportion_from_end=0.0,
trend_changepoints=None):
"""Finds the seasonality change points (defined as the time points where seasonality
magnitude changes, i.e., the time series becomes "fatter" or "thinner".)
Subtracts the estimated trend from the original time series first,
then uses regression-based regularization methods to select important seasonality
change points. Regressors are built from truncated Fourier series.
If you have run ``find_trend_changepoints`` before running ``find_seasonality_changepoints``
with the same df, the estimated trend will be automatically used for removing trend in
``find_seasonality_changepoints``.
Otherwise, ``find_trend_changepoints`` will be run automatically with the same parameters
as you passed to ``find_seasonality_changepoints``. If you do not want to use the same
parameters, run ``find_trend_changepoints`` with your desired parameter before calling
``find_seasonality_changepoints``.
The algorithm does an aggregation with a user-defined frequency, default hourly.
The regression features consists of ``potential_changepoint_n`` + 1 blocks of
predictors. The first block consists of Fourier series according to
``seasonality_components_df``, and other blocks are a copy of the first block
truncated at the corresponding potential change point.
If ``potential_changepoint_distance`` is not given, ``potential_changepoint_n``
potential change points are evenly distributed over the time period, else
``potential_changepoint_n`` is overridden by::
total_time_length / ``potential_changepoint_distance``
Users can specify either ``no_changepoint_proportion_from_end`` to specify what proportion
from the end of data they do not want changepoints, or ``no_changepoint_distance_from_end``
(overrides ``no_changepoint_proportion_from_end``) to specify how long from the end they
do not want change points.
Then all potential change points will be selected by adaptive lasso, with the initial
estimator specified by ``adaptive_lasso_initial_estimator``. The regularization strength
is specified by ``regularization_strength``, which lies between 0 and 1.
A rule-based guard function is applied at the end to ensure change points are not
too close, as specified by ``actual_changepoint_min_distance``.
Parameters
----------
df: `pandas.DataFrame`
The data df
time_col : `str`
Time column name in ``df``
value_col : `str`
Value column name in ``df``
seasonality_components_df : `pandas.DataFrame`
The df to generate seasonality design matrix, which is compatible with
``seasonality_components_df`` in
`~greykite.algo.changepoint.adalasso.changepoint_detector.ChangepointDetector.find_seasonality_changepoints`
resample_freq : `DateOffset, Timedelta or str`, default "H".
The frequency to aggregate data.
Coarser aggregation leads to fitting longer term trends.
regularization_strength : `float` in [0, 1] or `None`, default 0.6.
The regularization for change points. Greater value implies fewer change points.
0 indicates all change points, and 1 indicates no change point.
If `None`, the turning parameter will be selected by cross-validation.
If a value is given, it will be used as the tuning parameter.
Here "None" is not recommended, because seasonality change has different levels,
and automatic selection by cross-validation may produce more change points than
desired. Practically, 0.6 is a good choice for most cases. Tuning around
0.6 is recommended.
actual_changepoint_min_distance : `DateOffset`, `Timedelta` or `str`, default "30D"
The minimal distance allowed between detected change points. If consecutive change points
are within this minimal distance, the one with smaller absolute change coefficient will
be dropped.
Note: maximal unit is 'D', i.e., you may use units no more than 'D' such as
'10D', '5H', '100T', '200S'. The reason is that 'W', 'M' or higher has either
cycles or indefinite number of days, thus is not parsable by pandas as timedelta.
potential_changepoint_distance : `DateOffset`, `Timedelta`, `str` or None, default None
The distance between potential change points.
If provided, will override the parameter ``potential_changepoint_n``.
Note: maximal unit is 'D', i.e., you may only use units no more than 'D' such as
'10D', '5H', '100T', '200S'. The reason is that 'W', 'M' or higher has either
cycles or indefinite number of days, thus is not parsable by pandas as timedelta.
potential_changepoint_n : `int`, default 50
Number of change points to be evenly distributed, recommended 1 per month, based
on the training data length.
no_changepoint_distance_from_end : `DateOffset`, `Timedelta`, `str` or None, default None
The length of time from the end of training data, within which no change point will be placed.
If provided, will override the parameter ``no_changepoint_proportion_from_end``.
Note: maximal unit is 'D', i.e., you may only use units no more than 'D' such as
'10D', '5H', '100T', '200S'. The reason is that 'W', 'M' or higher has either
cycles or indefinite number of days, thus is not parsable by pandas as timedelta.
no_changepoint_proportion_from_end : `float` in [0, 1], default 0.0.
``potential_changepoint_n`` change points will be placed evenly over the whole training period,
however, only change points that are not located within the last ``no_changepoint_proportion_from_end``
proportion of training period will be used for change point detection.
trend_changepoints : `list` or None
A list of user specified trend change points, used to estimated the trend to be removed
from the time series before detecting seasonality change points. If provided, the algorithm
will not check existence of detected trend change points or run ``find_trend_changepoints``,
but will use these change points directly for trend estimation.
Return
------
result : `dict`
result dictionary with keys:
``"seasonality_feature_df"`` : `pandas.DataFrame`
The augmented df for seasonality changepoint detection, in other words, the design matrix for
the regression model. Columns:
- "cos1_tod_daily": cosine daily seasonality regressor of first order at change point 0.
- "sin1_tod_daily": sine daily seasonality regressor of first order at change point 0.
- ...
- "cos1_conti_year_yearly": cosine yearly seasonality regressor of first order at
change point 0.
- "sin1_conti_year_yearly": sine yearly seasonality regressor of first order at
change point 0.
- ...
- "cos{daily_seasonality_order}_tod_daily_cp{potential_changepoint_n}" : cosine
daily seasonality regressor of {yearly_seasonality_order}th order at change point
{potential_changepoint_n}.
- "sin{daily_seasonality_order}_tod_daily_cp{potential_changepoint_n}" : sine
daily seasonality regressor of {yearly_seasonality_order}th order at change point
{potential_changepoint_n}.
- ...
- "cos{yearly_seasonality_order}_conti_year_yearly_cp{potential_changepoint_n}" : cosine
yearly seasonality regressor of {yearly_seasonality_order}th order at change point
{potential_changepoint_n}.
- "sin{yearly_seasonality_order}_conti_year_yearly_cp{potential_changepoint_n}" : sine
yearly seasonality regressor of {yearly_seasonality_order}th order at change point
{potential_changepoint_n}.
``"seasonality_changepoints"`` : `dict`[`list`[`datetime`]]
The dictionary of detected seasonality change points for each component.
Keys are component names, and values are list of change points.
``"seasonality_estimation"`` : `pandas.Series`
The estimated seasonality with detected seasonality change points.
The series has the same length as ``original_df``. Index is timestamp, and values
are the estimated seasonality at each timestamp.
The seasonality estimation is the estimated of seasonality effect with trend estimated
by `~greykite.algo.changepoint.adalasso.changepoints_utils.estimate_trend_with_detected_changepoints`
removed.
``"seasonality_components_df`` : `pandas.DataFrame`
The processed ``seasonality_components_df``. Daily component row is removed if
inferred frequency or aggregation frequency is at least one day.
"""
# Checks parameter rationality.
if potential_changepoint_n < 0:
raise ValueError("potential_changepoint_n can not be negative. "
"A large number such as 50 is recommended")
if df.dropna().shape[0] < 5:
raise ValueError("Change point detector does not work for less than "
"5 observations. Please increase sample size.")
if no_changepoint_proportion_from_end < 0 or no_changepoint_proportion_from_end > 1:
raise ValueError("``no_changepoint_proportion_from_end`` needs to be between 0 and 1.")
if no_changepoint_distance_from_end is not None:
check_freq_unit_at_most_day(no_changepoint_distance_from_end, "no_changepoint_distance_from_end")
data_length = pd.to_datetime(df[time_col].iloc[-1]) - pd.to_datetime(df[time_col].iloc[0])
no_changepoint_proportion_from_end = to_offset(no_changepoint_distance_from_end).delta / data_length
if potential_changepoint_distance is not None:
check_freq_unit_at_most_day(potential_changepoint_distance, "potential_changepoint_distance")
data_length = pd.to_datetime(df[time_col].iloc[-1]) - pd.to_datetime(df[time_col].iloc[0])
potential_changepoint_n = data_length // to_offset(potential_changepoint_distance).delta
if regularization_strength is None:
warnings.warn("regularization_strength is set to None. This will trigger cross-validation to "
"select the tuning parameter which might result in too many change points. "
"Keep the default value or tuning around it is recommended.")
if regularization_strength is not None and (regularization_strength < 0 or regularization_strength > 1):
raise ValueError("regularization_strength must be between 0.0 and 1.0.")
df[time_col] = pd.to_datetime(df[time_col])
# If user provides a list of trend change points, these points will be used to estimate trend.
if trend_changepoints is not None:
trend_estimation = estimate_trend_with_detected_changepoints(
df=df,
time_col=time_col,
value_col=value_col,
changepoints=trend_changepoints
)
self.trend_changepoints = trend_changepoints
self.trend_estimation = trend_estimation
self.original_df = df
self.time_col = time_col
self.value_col = value_col
self.y = df[value_col]
self.y.index = df[time_col]
# If user doesn't provide trend change points, the trend change points will be found automatically.
else:
# Checks if trend change point is available.
# Runs trend change point | |
import numpy as np
from PIL import Image, ImageDraw, ImageFont, ImageMath
from pyray.shapes.solid.polyhedron import *
from pyray.axes import *
from pyray.rotation import *
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
#############################################################################
## Scene 1 - Platonic solids popping up.
basedir = '..\\images\\RotatingCube\\'
txt = "This is a Tetrahedron"
tt = Tetartoid(0.33,0)
for i in range(0, 31):
r = general_rotation(np.array([0.5,0.5,0.5]),2*np.pi*i/30)
#r = rotation(3,np.pi/15*i)
im = Image.new("RGB", (2048, 2048), (1,1,1))
draw = ImageDraw.Draw(im,'RGBA')
tt.render_solid_planes(draw, r, shift=np.array([1000, 1000, 0]), scale=150*i/10.0)
writeStaggeredText(txt, draw, i, speed=2)
im.save(basedir + "im" + str(i) + ".png")
#############################################################################
## Scene 2 - It has 12 symmetries.
basedir = '..\\images\\RotatingCube\\'
txt = "It can be slowly converted to this solid"
for i in range(0, 31):
tt = Tetartoid(0.4,0.1*i/30)
r = general_rotation(np.array([0.5,0.5,0.5]),2*np.pi*i/30)
#r = rotation(3,np.pi/15*i)
im = Image.new("RGB", (2048, 2048), (1,1,1))
draw = ImageDraw.Draw(im,'RGBA')
tt.render_solid_planes(draw, r, shift=np.array([1000, 1000, 0]), scale=450*(1+i/60))
writeStaggeredText(txt, draw, i, speed=2)
im.save(basedir + "im" + str(i) + ".png")
#############################################################################
## Scene a - Step by step tetartoid face
## Confirms that the tetartoid face given by wikipedia traces out a pentagon.
basedir = '..\\images\\RotatingCube\\'
pts = tetartoid_face(1,2,3)
w=10
im = Image.new("RGB", (2048, 2048), (1,1,1))
draw = ImageDraw.Draw(im,'RGBA')
for i in range(5):
j1 = pts[i]
j = j1*30+1000
draw.ellipse((j[0]-w,j[1]-w,j[0]+w,j[1]+w),fill=(255,255,255))
render_solid_planes(faces,draw,r,scale=150)
im.save(basedir + "im" + str(i) + ".png")
#############################################################################
## Scene b - Draws out tetartoid faces via tetrahedral rotations.
## Confirms that once we draw a face, we can rotate it by the tetrahedral
## rotation group to form a tetartoid.
basedir = '..\\images\\RotatingCube\\'
pts = tetartoid_face(1,2,3)
rots = tetrahedral_rotations()
r = general_rotation(np.array([0.5,0.5,0.5]),2*np.pi*10/30)
w=10
faces = []
faces.append(pts)
for i in range(len(rots)):
faces.append(np.dot(pts,rots[i]))
#render_solid_planes(faces,draw,r,scale=150)
#im.save(basedir + "im" + str(i) + ".png")
for i in range(31):
im = Image.new("RGB", (2048, 2048), (1,1,1))
draw = ImageDraw.Draw(im,'RGBA')
r = general_rotation(np.array([0.5,0.5,0.5]),2*np.pi*i/30)
render_solid_planes(faces,draw,r,scale=150)
im.save(basedir + "im" + str(i) + ".png")
#############################################################################
## Scene c - teserract that is shaded.
basedir = '..\\images\\RotatingCube\\'
cu = Cube(4)
for i in range(0,31):
r = rotation(4, 2*np.pi*i/30)
faces = [orient_face(np.dot(j.face_matrix-np.array([.5,.5,.5,.5]),r)[[0,1,3,2]][:,:-1]) for j in cu.faces]
im = Image.new("RGB", (2048, 2048), (1,1,1))
draw = ImageDraw.Draw(im,'RGBA')
render_solid_planes(faces,draw,np.eye(3),scale=350)
im.save(basedir + "im" + str(i) + ".png")
#############################################################################
## Scene d - Quadrilateral flap rotating about hinge.
basedir = '..\\images\\RotatingCube\\'
a = np.random.uniform(size=2)
b = np.random.uniform(size=2)
midd = (a+b)/2
perpend = (a-b)*np.array([-1,1])
p = 0.8
c = midd + p*perpend
p0 = 0.45
pt1 = p0*a+(1-p0)*b
p1=1.3
pt1 = pt1 - perpend*p1
plane = np.array([a,c,b,pt1])
plane = np.append(plane,np.zeros(4)[...,None],1)
plane = np.append(plane,np.ones(4)[...,None],1)
for i in range(0,10):
im = Image.new("RGB", (2048, 2048), (1,1,1))
draw = ImageDraw.Draw(im,'RGBA')
r = axis_rotation(plane[1],plane[0],2*np.pi*i/30)
plane1 = np.dot(r,plane.T).T
plane1 = plane1[:,np.array([0,1,2])]
render_solid_planes([plane1],draw,np.eye(3),cut_back_face=False)
im.save(basedir + "im" + str(i) + ".png")
#############################################################################
## Scene e - Forming half mesh for trigonal trapezohedron.
basedir = '..\\images\\RotatingCube\\'
a = np.array([1,0])
b = np.array([-1,0])
midd = (a+b)/2
perpend = np.dot(planar_rotation(np.pi/2),(a-b))
p = 0.8
c = midd + p*perpend
perpendicular = np.sqrt(sum((c-midd)**2))
base = np.sqrt(sum((a-midd)**2))
## The two equal angles of the isocoloes triangle used to form the quadrilateral.
theta = np.arctan(perpendicular/base)
## The angle between the two equal sides of the quadrilateral.
theta1 = 2*(np.pi/2-theta)
vec1 = plane[3]-plane[0]
vec1 = vec1/np.sqrt(sum(vec1*vec1))
vec2 = plane[3]-plane[2]
vec2 = vec2/np.sqrt(sum(vec2*vec2))
theta2 = np.arccos(np.dot(vec1,vec2))
## The rotation needed to convert mesh plot to solid.
phi = np.arccos((np.cos(theta1)**2-np.cos(theta1))/np.sin(theta1)**2)
phi2 = -np.arccos((np.cos(theta2)**2-np.cos(theta2))/np.sin(theta2)**2)
p0 = 0.3
pt1 = p0*a+(1-p0)*b
p1=0.75
pt1 = pt1 - perpend*p1
plane = np.array([a,c,b,pt1])
r = rotation(3,np.pi/5*0)
plane = np.append(plane,np.zeros(4)[...,None],1)
plane = np.dot(plane, r)
plane_per = np.cross(plane[0]-plane[1], plane[1]-plane[2])
plane = np.append(plane,np.ones(4)[...,None],1)
plane_per = -1*np.concatenate((plane_per,[0]),axis=0) ## The sign here seems to matter. Need to understand how.
#plane_per = np.array([0,0,3.2,0])
for i in range(0,31):
im = Image.new("RGB", (2048, 2048), (1,1,1))
draw = ImageDraw.Draw(im,'RGBA')
r = axis_rotation(plane[1], plane[1]+plane_per, theta1*i/30)
r1 = axis_rotation(plane[1], plane[1]+plane_per, -theta1*i/30)
side_mid = (plane[0]+plane[3])/2
r2 = axis_rotation(side_mid, side_mid+plane_per, -np.pi*i/30)
plane2 = np.dot(r,plane.T).T
plane2 = plane2[:,np.array([0,1,2])]
plane3 = np.dot(r1,plane.T).T
plane3 = plane3[:,np.array([0,1,2])]
plane4 = np.dot(r2,plane.T).T
plane4 = plane4[:,np.array([0,1,2])]
plane1 = plane[:,np.array([0,1,2])]
render_solid_planes([plane1,plane2,plane3,plane4],draw,np.eye(3),cut_back_face=False, scale=150, make_edges=True)
im.save(basedir + "im" + str(i) + ".png")
im.close()
for i in range(0,31):
im = Image.new("RGB", (2048, 2048), (1,1,1))
draw = ImageDraw.Draw(im,'RGBA')
r = axis_rotation(plane[1],plane[1]+plane_per, theta1)
rr = axis_rotation(plane[1],plane[2], i/30*phi)
r = np.dot(rr,r)
r1 = axis_rotation(plane[1],plane[1]+plane_per, -theta1)
rr1 = axis_rotation(plane[1],plane[0], -i/30*phi) ## Need to understand how sign works here.
r1 = np.dot(rr1,r1)
r2 = axis_rotation(side_mid, side_mid+plane_per, -np.pi)
rr2 = axis_rotation(plane[3], plane[0], i/30*np.pi/3)
r2 = np.dot(rr2,r2)
plane2 = np.dot(r,plane.T).T
plane2 = plane2[:,np.array([0,1,2])]
plane3 = np.dot(r1,plane.T).T
plane3 = plane3[:,np.array([0,1,2])]
plane4 = np.dot(r2,plane.T).T
plane4 = plane4[:,np.array([0,1,2])]
plane1 = plane[:,np.array([0,1,2])]
render_solid_planes([plane1,plane2,plane3,plane4],draw,np.eye(3),cut_back_face=False, scale=150)
im.save(basedir + "im" + str(31+i) + ".png")
im.close()
#############################################################################
## Scene e.1 - Forming trigonal trapezohedron with rotations.
m1= np.array([[1,0,0], [0,-1,0], [0,0,-1]])
t=2*np.pi/3
c=np.cos(t)
s=np.sin(t)
m2= np.array([ [c,s,0], [-s,c,0], [0,0,1]])
x0=2
y0=0.5
z0=1
z = z0*(-s*x0+(c-1)*y0)*x0/((c-1)*x0+s*y0)/y0
p = np.zeros((3,8))
p[:,0] = [x0,y0,z0]
p[:,1] = np.dot(m2,p[:,0])
p[:,2] = np.dot(m2,p[:,1])
p[:,3] = np.dot(m1,p[:,0])
p[:,4] = np.dot(m2,p[:,3])
p[:,5] = np.dot(m2,p[:,4])
p[:,6] = [0,0,z]
p[:,7] = [0,0,-z]
#fig = plt.figure()
#ax = fig.add_subplot(111, projection='3d')
#ax.scatter(p[0,:], p[1,:], p[2,:], c='r', marker='o')
#plt.show()
colors = ['red','green','blue','yellow','orange','white','grey','purple']
basedir = '..\\images\\RotatingCube\\'
def draw_pts():
for j in range(30):
im = Image.new("RGB", (2048, 2048), (1,1,1))
draw = ImageDraw.Draw(im,'RGBA')
r = rotation(3,np.pi/30*j)
p1 = np.dot(r,p)
p1 = p1*200+1000.0
for i in range(p1.shape[1]):
v = p1[:,i]
draw.ellipse((v[0]-5, v[1]-5, v[0]+5, v[1]+5),fill=colors[i], outline=colors[i])
im.save(basedir + "im" + str(j) + ".png")
plane1 = np.array([p[:,i] for i in [0,3,1,6]])
plane2 = np.array([p[:,i] for i in [4,7,5,2]])
plane3 = np.array([p[:,i] for i in [0,3,7,5]])
plane4 = np.array([p[:,i] for i in [1,6,2,4]])
plane5 = np.array([p[:,i] for i in [2,5,0,6]])
plane6 = np.array([p[:,i] for i in [1,3,7,4]])
planes = np.array([plane1,plane2,plane3,plane4,plane5,plane6])
def draw_planes():
for j in range(31):
im = Image.new("RGB", (2048, 2048), (1,1,1))
draw = ImageDraw.Draw(im,'RGBA')
r = rotation(3,np.pi/30*j)
#r = general_rotation(np.array([0.0,0.0,1.0]),2*np.pi*j/30)
render_solid_planes(planes,draw,r,cut_back_face=True, scale=200)
im.save(basedir + "im" + str(j) + ".png")
im.close()
#############################################################################
## Scene f - Visualize dual planes of tetartoid.
basedir = '..\\images\\RotatingCube\\'
tt = Tetartoid(s=0.45,t=0.14)
dual_faces = []
for j in range(20):
dual_face = np.array([np.mean(tt.planes[i],axis=0) for i in tt.dual_face_indices[j]])
dual_face = orient_face(dual_face)
dual_faces.append(dual_face)
for i in range(0, 31):
r = general_rotation(np.array([0.5,0.5,0.5]),2*np.pi*i/30)
#r = rotation(3,np.pi/15*i)
im = Image.new("RGB", (2048, 2048), (1,1,1))
draw = ImageDraw.Draw(im,'RGBA')
tt.render_solid_planes(draw, r, shift=np.array([1000, 1000, 0]), scale=1000, trnsp=120, make_edges=True)
render_solid_planes(dual_faces,draw,r,scale=1000, trnsp=200)
im.save(basedir + "im" + str(i) + ".png")
#############################################################################
## Scene x - All platonic solids.
basedir = '..\\images\\RotatingCube\\'
ic = Icosahedron()
dd = Dodecahedron()
tt = Tetrahedron()
oc = Octahedron()
tr = Tetartoid(0.4,0.1)
for i in range(0, 31):
r = general_rotation(np.array([0.5,0.5,0.5]),2*np.pi*i/30)
#r = rotation(3,np.pi/15*i)
im = Image.new("RGB", (2048, 2048), (1,1,1))
draw = ImageDraw.Draw(im,'RGBA')
ic.render_solid_planes(draw, r, shift=np.array([500, 500, 0]), scale=150)
dd.render_solid_planes(draw, r, shift=np.array([1500, 1500, 0]), scale=150)
tt.render_solid_planes(draw, r, shift=np.array([1500, 500, 0]), scale=150)
oc.render_solid_planes(draw, r, shift=np.array([500, 1500, 0]), scale=300)
tr.render_solid_planes(draw, r, shift=np.array([1000, 1000, 0]), scale=450)
im.save(basedir + "im" + str(i) + ".png")
#############################################################################
## Scene y - Icosahedron morphing.
basedir = '..\\images\\RotatingCube\\'
for i in range(0, 31):
tr = Tetartoid(s=0.333, t=0.2*i/30)
r = general_rotation(np.array([0.5,0.5,0.5]),2*np.pi*i/30)
im = Image.new("RGB", (2048, 2048), (1,1,1))
draw = ImageDraw.Draw(im,'RGBA')
tr.render_solid_planes(draw, r, shift=np.array([1000, 1000, 0]), scale=650)
im.save(basedir + "im" + str(i) + ".png")
def tetartoid(draw, r, s=0.3, t=0.04, scale=500, shift=np.array([1000.0, 1000.0, 0])):
'''
Draws a tetartoid. Based on the answer by Arentino here -
https://math.stackexchange.com/questions/1393370/what-are-the-rules-for-a-tetartoid-pentagon
args:
s: 0 <= s <= 0.5.
'''
tet_orig = np.array([
[1,1,1],
[-1,-1,1],
[1,-1,-1],
[-1,1,-1]
])
# Make it a tetrahedron with unit edges.
tet_orig = tet_orig/2/np.sqrt(2)
r1 = rotation(3, np.pi/10)
tet_orig = np.dot(tet_orig, r1)
v = np.dot(r, np.transpose(tet_orig)) * scale
v = np.transpose(v) + shift[:3]
# For each edge V_i V_j, construct two points P_ij and P_ji having a fixed distance s from V_i and V_j.
p = np.zeros((4,4,3))
for i in range(4):
for j in range(i+1, 4):
p[i, j] = (1-s)*v[i] + s*v[j]
p[j, i] = s*v[i] + (1-s)*v[j]
# Join the center C_ijk of every face V_i V_j V_k with P_ij, P_jk and P_ik.
# First, let's just obtain the centers.
c = np.zeros((4, 3))
for i in range(4):
face = [f for f in range(4) if f != i] #TODO: Is there a better way to exclude indices?
c[i] = sum(v[face]) / 3
# Now let o be the tetartoid center.
o = shift
# Consider the six planes o v_i v_j passing through the center and each edge.
# From point p_ij, draw the perpendicular line to o v_i v_j and take on it
# a point q_ij such that p_ij q_ij = t.
q = np.zeros((4, 4, 3))
for i in range(4):
for j in range(i+1, 4):
directn = np.cross(v[i]-o, v[j]-o)
directn = directn / np.sqrt(sum(directn**2))
q[i, j] = p[i, j] + directn * t * scale
q[j, i] = p[j, i] - directn * t * scale
planes = [
[c[3], q[2,0], q[0,2], v[0], q[0,1]],
[c[3], q[1,2], q[2,1], v[2], q[2,0]],
[c[3], q[0,1], q[1,0], v[1], q[1,2]],
[c[1], q[0,2], q[2,0], v[2], q[2,3]],
[c[1], q[2,3], q[3,2], v[3], q[3,0]],
[c[1], q[3,0], q[0,3], v[0], q[0,2]],
[c[2], q[1,0], q[0,1], v[0], q[0,3]],
[c[2], q[3,1], q[1,3], v[1], q[1,0]],
[c[2], q[0,3], q[3,0], v[3], | |
<filename>torch/fft/__init__.py
import sys
import torch
from torch._C import _add_docstr, _fft # type: ignore
Tensor = torch.Tensor
# Note: This not only adds the doc strings for the spectral ops, but
# connects the torch.fft Python namespace to the torch._C._fft builtins.
fft = _add_docstr(_fft.fft_fft, r"""
fft(input, n=None, dim=-1, norm=None) -> Tensor
Computes the one dimensional discrete Fourier transform of :attr:`input`.
Note:
The Fourier domain representation of any real signal satisfies the
Hermitian property: `X[i] = conj(X[-i])`. This function always returns both
the positive and negative frequency terms even though, for real inputs, the
negative frequencies are redundant. :func:`~torch.fft.rfft` returns the
more compact one-sided representation where only the positive frequencies
are returned.
Args:
input (Tensor): the input tensor
n (int, optional): Signal length. If given, the input will either be zero-padded
or trimmed to this length before computing the FFT.
dim (int, optional): The dimension along which to take the one dimensional FFT.
norm (str, optional): Normalization mode. For the forward transform
(:func:`~torch.fft.fft`), these correspond to:
* ``"forward"`` - normalize by ``1/n``
* ``"backward"`` - no normalization
* ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the FFT orthonormal)
Calling the backward transform (:func:`~torch.fft.ifft`) with the same
normalization mode will apply an overall normalization of ``1/n`` between
the two transforms. This is required to make :func:`~torch.fft.ifft`
the exact inverse.
Default is ``"backward"`` (no normalization).
Example:
>>> import torch.fft
>>> t = torch.arange(4)
>>> t
tensor([0, 1, 2, 3])
>>> torch.fft.fft(t)
tensor([ 6.+0.j, -2.+2.j, -2.+0.j, -2.-2.j])
>>> t = tensor([0.+1.j, 2.+3.j, 4.+5.j, 6.+7.j])
>>> torch.fft.fft(t)
tensor([12.+16.j, -8.+0.j, -4.-4.j, 0.-8.j])
""")
ifft = _add_docstr(_fft.fft_ifft, r"""
ifft(input, n=None, dim=-1, norm=None) -> Tensor
Computes the one dimensional inverse discrete Fourier transform of :attr:`input`.
Args:
input (Tensor): the input tensor
n (int, optional): Signal length. If given, the input will either be zero-padded
or trimmed to this length before computing the IFFT.
dim (int, optional): The dimension along which to take the one dimensional IFFT.
norm (str, optional): Normalization mode. For the backward transform
(:func:`~torch.fft.ifft`), these correspond to:
* ``"forward"`` - no normalization
* ``"backward"`` - normalize by ``1/n``
* ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the IFFT orthonormal)
Calling the forward transform (:func:`~torch.fft.fft`) with the same
normalization mode will apply an overall normalization of ``1/n`` between
the two transforms. This is required to make :func:`~torch.fft.ifft`
the exact inverse.
Default is ``"backward"`` (normalize by ``1/n``).
Example:
>>> import torch.fft
>>> t = torch.tensor([ 6.+0.j, -2.+2.j, -2.+0.j, -2.-2.j])
>>> torch.fft.ifft(t)
tensor([0.+0.j, 1.+0.j, 2.+0.j, 3.+0.j])
""")
fftn = _add_docstr(_fft.fft_fftn, r"""
fftn(input, s=None, dim=None, norm=None) -> Tensor
Computes the N dimensional discrete Fourier transform of :attr:`input`.
Note:
The Fourier domain representation of any real signal satisfies the
Hermitian property: ``X[i_1, ..., i_n] = conj(X[-i_1, ..., -i_n])``. This
function always returns all positive and negative frequency terms even
though, for real inputs, half of these values are redundant.
:func:`~torch.fft.rfftn` returns the more compact one-sided representation
where only the positive frequencies of the last dimension are returned.
Args:
input (Tensor): the input tensor
s (Tuple[int], optional): Signal size in the transformed dimensions.
If given, each dimension ``dim[i]`` will either be zero-padded or
trimmed to the length ``s[i]`` before computing the FFT.
If a length ``-1`` is specified, no padding is done in that dimension.
Default: ``s = [input.size(d) for d in dim]``
dim (Tuple[int], optional): Dimensions to be transformed.
Default: all dimensions, or the last ``len(s)`` dimensions if :attr:`s` is given.
norm (str, optional): Normalization mode. For the forward transform
(:func:`~torch.fft.fftn`), these correspond to:
* ``"forward"`` - normalize by ``1/n``
* ``"backward"`` - no normalization
* ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the FFT orthonormal)
Where ``n = prod(s)`` is the logical FFT size.
Calling the backward transform (:func:`~torch.fft.ifftn`) with the same
normalization mode will apply an overall normalization of ``1/n``
between the two transforms. This is required to make
:func:`~torch.fft.ifftn` the exact inverse.
Default is ``"backward"`` (no normalization).
Example:
>>> import torch.fft
>>> x = torch.rand(10, 10, dtype=torch.complex64)
>>> fftn = torch.fft.fftn(t)
The discrete Fourier transform is separable, so :func:`~torch.fft.fftn`
here is equivalent to two one-dimensional :func:`~torch.fft.fft` calls:
>>> two_ffts = torch.fft.fft(torch.fft.fft(x, dim=0), dim=1)
>>> torch.allclose(fftn, two_ffts)
""")
ifftn = _add_docstr(_fft.fft_ifftn, r"""
ifftn(input, s=None, dim=None, norm=None) -> Tensor
Computes the N dimensional inverse discrete Fourier transform of :attr:`input`.
Args:
input (Tensor): the input tensor
s (Tuple[int], optional): Signal size in the transformed dimensions.
If given, each dimension ``dim[i]`` will either be zero-padded or
trimmed to the length ``s[i]`` before computing the IFFT.
If a length ``-1`` is specified, no padding is done in that dimension.
Default: ``s = [input.size(d) for d in dim]``
dim (Tuple[int], optional): Dimensions to be transformed.
Default: all dimensions, or the last ``len(s)`` dimensions if :attr:`s` is given.
norm (str, optional): Normalization mode. For the backward transform
(:func:`~torch.fft.ifftn`), these correspond to:
* ``"forward"`` - no normalization
* ``"backward"`` - normalize by ``1/n``
* ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the IFFT orthonormal)
Where ``n = prod(s)`` is the logical IFFT size.
Calling the forward transform (:func:`~torch.fft.fftn`) with the same
normalization mode will apply an overall normalization of ``1/n`` between
the two transforms. This is required to make :func:`~torch.fft.ifftn`
the exact inverse.
Default is ``"backward"`` (normalize by ``1/n``).
Example:
>>> import torch.fft
>>> x = torch.rand(10, 10, dtype=torch.complex64)
>>> ifftn = torch.fft.ifftn(t)
The discrete Fourier transform is separable, so :func:`~torch.fft.ifftn`
here is equivalent to two one-dimensional :func:`~torch.fft.ifft` calls:
>>> two_iffts = torch.fft.ifft(torch.fft.ifft(x, dim=0), dim=1)
>>> torch.allclose(ifftn, two_iffts)
""")
rfft = _add_docstr(_fft.fft_rfft, r"""
rfft(input, n=None, dim=-1, norm=None) -> Tensor
Computes the one dimensional Fourier transform of real-valued :attr:`input`.
The FFT of a real signal is Hermitian-symmetric, ``X[i] = conj(X[-i])`` so
the output contains only the positive frequencies below the Nyquist frequency.
To compute the full output, use :func:`~torch.fft.fft`
Args:
input (Tensor): the real input tensor
n (int, optional): Signal length. If given, the input will either be zero-padded
or trimmed to this length before computing the real FFT.
dim (int, optional): The dimension along which to take the one dimensional real FFT.
norm (str, optional): Normalization mode. For the forward transform
(:func:`~torch.fft.rfft`), these correspond to:
* ``"forward"`` - normalize by ``1/n``
* ``"backward"`` - no normalization
* ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the FFT orthonormal)
Calling the backward transform (:func:`~torch.fft.irfft`) with the same
normalization mode will apply an overall normalization of ``1/n`` between
the two transforms. This is required to make :func:`~torch.fft.irfft`
the exact inverse.
Default is ``"backward"`` (no normalization).
Example:
>>> import torch.fft
>>> t = torch.arange(4)
>>> t
tensor([0, 1, 2, 3])
>>> torch.fft.rfft(t)
tensor([ 6.+0.j, -2.+2.j, -2.+0.j])
Compare against the full output from :func:`~torch.fft.fft`:
>>> torch.fft.fft(t)
tensor([ 6.+0.j, -2.+2.j, -2.+0.j, -2.-2.j])
Notice that the symmetric element ``T[-1] == T[1].conj()`` is omitted.
At the Nyquist frequency ``T[-2] == T[2]`` is it's own symmetric pair,
and therefore must always be real-valued.
""")
irfft = _add_docstr(_fft.fft_irfft, r"""
irfft(input, n=None, dim=-1, norm=None) -> Tensor
Computes the inverse of :func:`~torch.fft.rfft`.
:attr:`input` is interpreted as a one-sided Hermitian signal in the Fourier
domain, as produced by :func:`~torch.fft.rfft`. By the Hermitian property, the
output will be real-valued.
Note:
Some input frequencies must be real-valued to satisfy the Hermitian
property. In these cases the imaginary component will be ignored.
For example, any imaginary component in the zero-frequency term cannot
be represented in a real output and so will always be ignored.
Note:
The correct interpretation of the Hermitian input depends on the length of
the original data, as given by :attr:`n`. This is because each input shape
could correspond to either an odd or even length signal. By default, the
signal is assumed to be even length and odd signals will not round-trip
properly. So, it is recommended to always pass the signal length :attr:`n`.
Args:
input (Tensor): the input tensor representing a half-Hermitian signal
n (int, optional): Output signal length. This determines the length of the
output signal. If given, the input will either be zero-padded or trimmed to this
length before computing the real IFFT.
Defaults to even output: ``n=2*(input.size(dim) - 1)``.
dim (int, optional): The dimension along which to take the one dimensional real IFFT.
norm (str, optional): Normalization mode. For the | |
<gh_stars>0
#!/usr/bin/env python
# -*- coding: ISO-8859-15 -*-
#
# generated by wxGlade 0.6.8 (standalone edition) on Sun Oct 02 12:29:06 2016
#
import wx
# begin wxGlade: dependencies
import gettext
from gettext import gettext as _
# end wxGlade
class chronoFrame(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: chronoFrame.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
# Menu Bar
self.chronoframe_menubar = wx.MenuBar()
self.file = wx.Menu()
self.exitmenuitem = wx.MenuItem(self.file, wx.ID_EXIT, _("Exit Chronolapse"), "", wx.ITEM_NORMAL)
self.file.Append(self.exitmenuitem)
self.chronoframe_menubar.Append(self.file, _("File"))
self.aboutmenu = wx.Menu()
self.aboutmenuitem = wx.MenuItem(self.aboutmenu, wx.ID_ANY, _("About"), _("About Chronolapse"), wx.ITEM_NORMAL)
self.aboutmenu.Append(self.aboutmenuitem)
self.chronoframe_menubar.Append(self.aboutmenu, _("About"))
self.SetMenuBar(self.chronoframe_menubar)
# Menu Bar end
self.notebook_1 = wx.Notebook(self, wx.ID_ANY, style=0)
self.notebook_1_capturepane = wx.Panel(self.notebook_1, wx.ID_ANY)
self.label_3 = wx.StaticText(self.notebook_1_capturepane, wx.ID_ANY, _("Capture:"))
self.screenshotcheck = wx.CheckBox(self.notebook_1_capturepane, wx.ID_ANY, _("Screenshots"))
self.screenshotconfigurebutton = wx.Button(self.notebook_1_capturepane, wx.ID_ANY, _("Configure"))
self.webcamcheck = wx.CheckBox(self.notebook_1_capturepane, wx.ID_ANY, _("Camera"))
self.configurewebcambutton = wx.Button(self.notebook_1_capturepane, wx.ID_ANY, _("Configure"))
self.filename_format_timestamp = wx.RadioButton(self.notebook_1_capturepane, wx.ID_ANY, _("Timestamp Filenames"), style=wx.RB_GROUP)
self.filename_format_sequential = wx.RadioButton(self.notebook_1_capturepane, wx.ID_ANY, _("Sequential Filenames"))
self.label_2 = wx.StaticText(self.notebook_1_capturepane, wx.ID_ANY, _("Seconds Between Captures:"))
self.frequencytext = wx.TextCtrl(self.notebook_1_capturepane, wx.ID_ANY, _("60"))
self.startbutton = wx.Button(self.notebook_1_capturepane, wx.ID_ANY, _("Start Capture"))
self.forcecapturebutton = wx.Button(self.notebook_1_capturepane, wx.ID_ANY, _("Force Capture"))
self.notebook_1_pippane = wx.Panel(self.notebook_1, wx.ID_ANY)
self.label_1 = wx.StaticText(self.notebook_1_pippane, wx.ID_ANY, _("Picture in Picture:"))
self.label_4 = wx.StaticText(self.notebook_1_pippane, wx.ID_ANY, _("Main Image Folder:"))
self.pipmainimagefoldertext = wx.TextCtrl(self.notebook_1_pippane, wx.ID_ANY, "")
self.pipmainimagefolderbrowse = wx.Button(self.notebook_1_pippane, wx.ID_ANY, _("..."))
self.label_12 = wx.StaticText(self.notebook_1_pippane, wx.ID_ANY, _("PIP Image Folder:"))
self.pippipimagefoldertext = wx.TextCtrl(self.notebook_1_pippane, wx.ID_ANY, "")
self.pippipimagefolderbrowse = wx.Button(self.notebook_1_pippane, wx.ID_ANY, _("..."))
self.label_13 = wx.StaticText(self.notebook_1_pippane, wx.ID_ANY, _("Output Folder:"))
self.pipoutputimagefoldertext = wx.TextCtrl(self.notebook_1_pippane, wx.ID_ANY, "")
self.pipoutputimagefolderbrowse = wx.Button(self.notebook_1_pippane, wx.ID_ANY, _("..."))
self.label_14 = wx.StaticText(self.notebook_1_pippane, wx.ID_ANY, _("PIP Size:"))
self.pipsizecombo = wx.ComboBox(self.notebook_1_pippane, wx.ID_ANY, choices=[_("Small"), _("Medium"), _("Large")], style=wx.CB_DROPDOWN)
self.label_15 = wx.StaticText(self.notebook_1_pippane, wx.ID_ANY, _("PIP Position:"))
self.pippositioncombo = wx.ComboBox(self.notebook_1_pippane, wx.ID_ANY, choices=[_("Top"), _("Top-Right"), _("Right"), _("Bottom-Right"), _("Bottom"), _("Bottom-Left"), _("Left"), _("Top-Left")], style=wx.CB_DROPDOWN)
self.pipignoreunmatchedcheck = wx.CheckBox(self.notebook_1_pippane, wx.ID_ANY, _("Ignore un-matched images"))
self.pipcreatebutton = wx.Button(self.notebook_1_pippane, wx.ID_ANY, _("Create PIP"))
self.notebook_1_videopane = wx.Panel(self.notebook_1, wx.ID_ANY)
self.VideoLabel = wx.StaticText(self.notebook_1_videopane, wx.ID_ANY, _("Video:"))
self.label_22 = wx.StaticText(self.notebook_1_videopane, wx.ID_ANY, _("Source Images:"))
self.videosourcetext = wx.TextCtrl(self.notebook_1_videopane, wx.ID_ANY, "")
self.videosourcebrowse = wx.Button(self.notebook_1_videopane, wx.ID_ANY, _("..."))
self.label_23 = wx.StaticText(self.notebook_1_videopane, wx.ID_ANY, _("Destination Folder:"))
self.videodestinationtext = wx.TextCtrl(self.notebook_1_videopane, wx.ID_ANY, "")
self.videodestinationbrowse = wx.Button(self.notebook_1_videopane, wx.ID_ANY, _("..."))
self.label_26 = wx.StaticText(self.notebook_1_videopane, wx.ID_ANY, _("MEncoder Path:"))
self.mencoderpathtext = wx.TextCtrl(self.notebook_1_videopane, wx.ID_ANY, "")
self.mencoderpathbrowse = wx.Button(self.notebook_1_videopane, wx.ID_ANY, _("..."))
self.label_25 = wx.StaticText(self.notebook_1_videopane, wx.ID_ANY, _("Video Codec:"))
self.videocodeccombo = wx.ComboBox(self.notebook_1_videopane, wx.ID_ANY, choices=[_("mpeg4"), _("mpeg2video"), _("wmv1"), _("wmv2")], style=wx.CB_DROPDOWN)
self.randomname = wx.StaticText(self.notebook_1_videopane, wx.ID_ANY, _("Frame Rate:"))
self.videoframeratetext = wx.TextCtrl(self.notebook_1_videopane, wx.ID_ANY, _("25"))
self.movielengthlabel = wx.StaticText(self.notebook_1_videopane, wx.ID_ANY, _("Estimated Movie Length: 0 m 0 s"))
self.videocreatebutton = wx.Button(self.notebook_1_videopane, wx.ID_ANY, _("Encode Video"))
self.static_line_1 = wx.StaticLine(self.notebook_1_videopane, wx.ID_ANY)
self.AudioLabel = wx.StaticText(self.notebook_1_videopane, wx.ID_ANY, _("Audio:"))
self.label_22_copy = wx.StaticText(self.notebook_1_videopane, wx.ID_ANY, _("Video Source:"))
self.audiosourcevideotext = wx.TextCtrl(self.notebook_1_videopane, wx.ID_ANY, "")
self.audiosourcevideobrowse = wx.Button(self.notebook_1_videopane, wx.ID_ANY, _("..."))
self.label_23_copy = wx.StaticText(self.notebook_1_videopane, wx.ID_ANY, _("Audio Source:"))
self.audiosourcetext = wx.TextCtrl(self.notebook_1_videopane, wx.ID_ANY, "")
self.audiosourcebrowse = wx.Button(self.notebook_1_videopane, wx.ID_ANY, _("..."))
self.label_26_copy = wx.StaticText(self.notebook_1_videopane, wx.ID_ANY, _("Output Folder:"))
self.audiooutputfoldertext = wx.TextCtrl(self.notebook_1_videopane, wx.ID_ANY, "")
self.audiooutputfolderbrowse = wx.Button(self.notebook_1_videopane, wx.ID_ANY, _("..."))
self.createaudiobutton = wx.Button(self.notebook_1_videopane, wx.ID_ANY, _("Add Audio"))
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_MENU, self.exitMenuClicked, self.exitmenuitem)
self.Bind(wx.EVT_MENU, self.aboutMenuClicked, self.aboutmenuitem)
self.Bind(wx.EVT_BUTTON, self.screenshotConfigurePressed, self.screenshotconfigurebutton)
self.Bind(wx.EVT_BUTTON, self.webcamConfigurePressed, self.configurewebcambutton)
self.Bind(wx.EVT_BUTTON, self.startCapturePressed, self.startbutton)
self.Bind(wx.EVT_BUTTON, self.forceCapturePressed, self.forcecapturebutton)
self.Bind(wx.EVT_BUTTON, self.pipMainImageBrowsePressed, self.pipmainimagefolderbrowse)
self.Bind(wx.EVT_BUTTON, self.pipPipImageBrowsePressed, self.pippipimagefolderbrowse)
self.Bind(wx.EVT_BUTTON, self.pipOutputBrowsePressed, self.pipoutputimagefolderbrowse)
self.Bind(wx.EVT_BUTTON, self.createPipPressed, self.pipcreatebutton)
self.Bind(wx.EVT_BUTTON, self.videoSourceBrowsePressed, self.videosourcebrowse)
self.Bind(wx.EVT_BUTTON, self.videoDestinationBrowsePressed, self.videodestinationbrowse)
self.Bind(wx.EVT_BUTTON, self.mencoderPathBrowsePressed, self.mencoderpathbrowse)
self.Bind(wx.EVT_TEXT, self.framerateTextChanged, self.videoframeratetext)
self.Bind(wx.EVT_BUTTON, self.createVideoPressed, self.videocreatebutton)
self.Bind(wx.EVT_BUTTON, self.audioSourceVideoBrowsePressed, self.audiosourcevideobrowse)
self.Bind(wx.EVT_BUTTON, self.audioSourceBrowsePressed, self.audiosourcebrowse)
self.Bind(wx.EVT_BUTTON, self.audioOutputFolderBrowsePressed, self.audiooutputfolderbrowse)
self.Bind(wx.EVT_BUTTON, self.createAudioPressed, self.createaudiobutton)
# end wxGlade
def __set_properties(self):
# begin wxGlade: chronoFrame.__set_properties
self.SetTitle(_("ChronoLapse by Keeyai"))
self.SetSize((511, 438))
self.label_3.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, "MS Shell Dlg 2"))
self.screenshotcheck.SetToolTip(_("Check this to capture screenshots"))
self.screenshotcheck.SetValue(1)
self.screenshotconfigurebutton.SetToolTip(_("Click to configure screenshot captures"))
self.webcamcheck.SetToolTip(_("Check to enable webcam captures"))
self.webcamcheck.SetValue(1)
self.configurewebcambutton.SetToolTip(_("Click to configure camera captures"))
self.filename_format_timestamp.SetToolTip(_("Saves screenshots and camera captures with the timestamp in the filename."))
self.filename_format_timestamp.SetValue(1)
self.filename_format_sequential.SetToolTip(_("Saves screenshots and camera captures as sequential numbers. Required by some external encoding libraries."))
self.frequencytext.SetToolTip(_("The number of seconds in between captures. Set to 0 for no automatic capturing."))
self.startbutton.SetToolTip(_("Click to start/stop capturing"))
self.forcecapturebutton.SetToolTip(_("Click to force CL to capture right now. Use for important frames or for creating stop motions."))
self.label_1.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, "MS Shell Dlg 2"))
self.pipmainimagefoldertext.SetMinSize((200, -1))
self.pipmainimagefolderbrowse.SetMinSize((25, -1))
self.pipmainimagefolderbrowse.SetToolTip(_("Click to Browse"))
self.pippipimagefoldertext.SetMinSize((200, -1))
self.pippipimagefolderbrowse.SetMinSize((25, -1))
self.pippipimagefolderbrowse.SetToolTip(_("Click to Browse"))
self.pipoutputimagefoldertext.SetMinSize((25, -1))
self.pipoutputimagefolderbrowse.SetMinSize((25, -1))
self.pipoutputimagefolderbrowse.SetToolTip(_("Click to Browse"))
self.pipsizecombo.SetToolTip(_("Select the size of the smaller image"))
self.pipsizecombo.SetSelection(0)
self.pippositioncombo.SetToolTip(_("Select the position of the smaller image"))
self.pippositioncombo.SetSelection(1)
self.pipignoreunmatchedcheck.SetToolTip(_("Check to ignore image names that are in one folder but not the other"))
self.pipignoreunmatchedcheck.Hide()
self.pipignoreunmatchedcheck.SetValue(1)
self.pipcreatebutton.SetToolTip(_("Create PIP"))
self.VideoLabel.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, "MS Shell Dlg 2"))
self.videosourcetext.SetMinSize((200, -1))
self.videosourcebrowse.SetMinSize((25, -1))
self.videosourcebrowse.SetToolTip(_("Click to Browse"))
self.videodestinationtext.SetMinSize((200, -1))
self.videodestinationbrowse.SetMinSize((25, -1))
self.videodestinationbrowse.SetToolTip(_("Click to Browse"))
self.mencoderpathtext.SetMinSize((200, -1))
self.mencoderpathtext.SetToolTip(_("Set this to the MEncoder executable"))
self.mencoderpathbrowse.SetMinSize((25, -1))
self.mencoderpathbrowse.SetToolTip(_("Click to Browse"))
self.videocodeccombo.SetToolTip(_("Select which codec to use when encoding your video"))
self.videocodeccombo.SetSelection(0)
self.videoframeratetext.SetMinSize((25, -1))
self.videoframeratetext.SetToolTip(_("Set how many images per second you want to show in your movie"))
self.videocreatebutton.SetToolTip(_("Create the Video"))
self.AudioLabel.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, "MS Shell Dlg 2"))
self.audiosourcevideotext.SetMinSize((200, -1))
self.audiosourcevideobrowse.SetMinSize((25, -1))
self.audiosourcevideobrowse.SetToolTip(_("Click to Browse"))
self.audiosourcetext.SetMinSize((200, -1))
self.audiosourcebrowse.SetMinSize((25, -1))
self.audiosourcebrowse.SetToolTip(_("Click to Browse"))
self.audiooutputfoldertext.SetMinSize((200, -1))
self.audiooutputfoldertext.SetToolTip(_("Set this to the folder where you want the finished video"))
self.audiooutputfolderbrowse.SetMinSize((25, -1))
self.audiooutputfolderbrowse.SetToolTip(_("Click to Browse"))
# end wxGlade
def __do_layout(self):
# begin wxGlade: chronoFrame.__do_layout
sizer_1 = wx.BoxSizer(wx.VERTICAL)
grid_sizer_18 = wx.FlexGridSizer(10, 1, 0, 0)
grid_sizer_18_copy_copy = wx.FlexGridSizer(5, 2, 0, 0)
grid_sizer_34 = wx.FlexGridSizer(1, 2, 0, 0)
grid_sizer_35 = wx.FlexGridSizer(1, 2, 0, 0)
grid_sizer_31 = wx.FlexGridSizer(1, 2, 0, 0)
grid_sizer_23 = wx.FlexGridSizer(1, 3, 0, 5)
grid_sizer_4 = wx.FlexGridSizer(1, 2, 0, 0)
grid_sizer_3 = wx.FlexGridSizer(1, 2, 0, 0)
grid_sizer_18_copy = wx.FlexGridSizer(3, 2, 0, 0)
grid_sizer_30 = wx.FlexGridSizer(1, 2, 0, 0)
grid_sizer_28 = wx.FlexGridSizer(1, 2, 0, 0)
grid_sizer_27 = wx.FlexGridSizer(1, 2, 0, 0)
grid_sizer_5 = wx.FlexGridSizer(5, 1, 0, 0)
grid_sizer_13 = wx.FlexGridSizer(3, 2, 0, 0)
grid_sizer_17 = wx.FlexGridSizer(1, 2, 0, 0)
grid_sizer_14 = wx.FlexGridSizer(1, 2, 0, 0)
grid_sizer_12 = wx.FlexGridSizer(5, 2, 0, 0)
grid_sizer_25 = wx.FlexGridSizer(1, 2, 0, 0)
grid_sizer_22 = wx.FlexGridSizer(1, 2, 0, 0)
grid_sizer_21 = wx.FlexGridSizer(1, 2, 0, 0)
grid_sizer_1 = wx.FlexGridSizer(4, 1, 0, 0)
grid_sizer_26 = wx.FlexGridSizer(1, 2, 0, 0)
grid_sizer_15 = wx.FlexGridSizer(4, 2, 0, 0)
grid_sizer_20 = wx.FlexGridSizer(1, 2, 0, 0)
grid_sizer_16 = wx.FlexGridSizer(1, 2, 0, 0)
grid_sizer_1.Add(self.label_3, 0, 0, 0)
grid_sizer_16.Add(self.screenshotcheck, 0, 0, 0)
grid_sizer_16.Add(self.screenshotconfigurebutton, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
grid_sizer_16.AddGrowableCol(0)
grid_sizer_16.AddGrowableCol(1)
grid_sizer_15.Add(grid_sizer_16, 1, wx.EXPAND, 0)
grid_sizer_20.Add(self.webcamcheck, 0, 0, 0)
grid_sizer_20.Add(self.configurewebcambutton, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
grid_sizer_20.AddGrowableCol(0)
grid_sizer_20.AddGrowableCol(1)
grid_sizer_15.Add(grid_sizer_20, 1, wx.EXPAND, 0)
grid_sizer_15.Add(self.filename_format_timestamp, 0, 0, 0)
grid_sizer_15.Add(self.filename_format_sequential, 0, 0, 0)
grid_sizer_15.Add(self.label_2, 0, 0, 0)
grid_sizer_15.Add(self.frequencytext, 0, 0, 0)
grid_sizer_15.Add((20, 20), 0, 0, 0)
grid_sizer_15.AddGrowableCol(0)
grid_sizer_15.AddGrowableCol(1)
grid_sizer_1.Add(grid_sizer_15, 1, wx.EXPAND, 0)
grid_sizer_26.Add(self.startbutton, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
grid_sizer_26.Add(self.forcecapturebutton, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
grid_sizer_26.AddGrowableCol(0)
grid_sizer_26.AddGrowableCol(1)
grid_sizer_1.Add(grid_sizer_26, 1, wx.EXPAND, 0)
self.notebook_1_capturepane.SetSizer(grid_sizer_1)
grid_sizer_1.AddGrowableRow(2)
grid_sizer_1.AddGrowableCol(0)
grid_sizer_5.Add(self.label_1, 0, 0, 0)
grid_sizer_12.Add(self.label_4, 0, 0, 0)
grid_sizer_21.Add(self.pipmainimagefoldertext, 0, wx.EXPAND, 0)
grid_sizer_21.Add(self.pipmainimagefolderbrowse, 0, 0, 0)
grid_sizer_21.AddGrowableCol(0)
grid_sizer_12.Add(grid_sizer_21, 1, wx.EXPAND, 0)
grid_sizer_12.Add(self.label_12, 0, 0, 0)
grid_sizer_22.Add(self.pippipimagefoldertext, 0, wx.EXPAND, 0)
grid_sizer_22.Add(self.pippipimagefolderbrowse, 0, 0, 0)
grid_sizer_22.AddGrowableCol(0)
grid_sizer_12.Add(grid_sizer_22, 1, wx.EXPAND, 0)
grid_sizer_12.Add(self.label_13, 0, 0, 0)
grid_sizer_25.Add(self.pipoutputimagefoldertext, 0, wx.EXPAND, 0)
grid_sizer_25.Add(self.pipoutputimagefolderbrowse, 0, 0, 0)
grid_sizer_25.AddGrowableCol(0)
grid_sizer_12.Add(grid_sizer_25, 1, wx.EXPAND, 0)
grid_sizer_12.AddGrowableCol(0)
grid_sizer_12.AddGrowableCol(1)
grid_sizer_5.Add(grid_sizer_12, 1, wx.EXPAND, 0)
grid_sizer_14.Add(self.label_14, 0, 0, 0)
grid_sizer_14.Add(self.pipsizecombo, 0, 0, 0)
grid_sizer_14.AddGrowableCol(0)
grid_sizer_14.AddGrowableCol(1)
grid_sizer_13.Add(grid_sizer_14, 1, wx.EXPAND, 0)
grid_sizer_17.Add(self.label_15, 0, 0, 0)
grid_sizer_17.Add(self.pippositioncombo, 0, 0, 0)
grid_sizer_17.AddGrowableCol(0)
grid_sizer_17.AddGrowableCol(1)
grid_sizer_13.Add(grid_sizer_17, 1, wx.EXPAND, 0)
grid_sizer_13.AddGrowableCol(0)
grid_sizer_13.AddGrowableCol(1)
grid_sizer_5.Add(grid_sizer_13, 1, wx.EXPAND, 0)
grid_sizer_5.Add(self.pipignoreunmatchedcheck, 0, 0, 0)
grid_sizer_5.Add(self.pipcreatebutton, 0, wx.ALIGN_BOTTOM | wx.ALIGN_CENTER_HORIZONTAL, 0)
self.notebook_1_pippane.SetSizer(grid_sizer_5)
grid_sizer_5.AddGrowableRow(4)
grid_sizer_5.AddGrowableCol(0)
grid_sizer_18.Add(self.VideoLabel, 0, 0, 0)
grid_sizer_18_copy.Add(self.label_22, 0, 0, 0)
grid_sizer_27.Add(self.videosourcetext, 0, wx.EXPAND, 0)
grid_sizer_27.Add(self.videosourcebrowse, 0, 0, 0)
grid_sizer_27.AddGrowableCol(0)
grid_sizer_18_copy.Add(grid_sizer_27, 1, wx.EXPAND, 0)
grid_sizer_18_copy.Add(self.label_23, 0, 0, 0)
grid_sizer_28.Add(self.videodestinationtext, 0, wx.EXPAND, 0)
grid_sizer_28.Add(self.videodestinationbrowse, 0, 0, 0)
grid_sizer_28.AddGrowableCol(0)
grid_sizer_18_copy.Add(grid_sizer_28, 1, wx.EXPAND, 0)
grid_sizer_18_copy.Add(self.label_26, 0, 0, 0)
grid_sizer_30.Add(self.mencoderpathtext, 0, wx.EXPAND, 0)
grid_sizer_30.Add(self.mencoderpathbrowse, 0, 0, 0)
grid_sizer_30.AddGrowableCol(0)
grid_sizer_18_copy.Add(grid_sizer_30, 1, wx.EXPAND, 0)
grid_sizer_18_copy.AddGrowableRow(2)
grid_sizer_18_copy.AddGrowableCol(1)
grid_sizer_18.Add(grid_sizer_18_copy, 1, wx.EXPAND, 0)
grid_sizer_3.Add(self.label_25, 0, 0, 0)
grid_sizer_3.Add(self.videocodeccombo, 0, 0, 0)
grid_sizer_3.AddGrowableCol(1)
grid_sizer_23.Add(grid_sizer_3, 1, wx.EXPAND, 0)
grid_sizer_4.Add(self.randomname, 0, 0, 0)
grid_sizer_4.Add(self.videoframeratetext, 0, 0, 0)
grid_sizer_4.AddGrowableCol(1)
grid_sizer_23.Add(grid_sizer_4, 1, wx.EXPAND, 0)
grid_sizer_23.Add(self.movielengthlabel, 0, 0, 0)
grid_sizer_23.AddGrowableCol(0)
grid_sizer_23.AddGrowableCol(1)
grid_sizer_23.AddGrowableCol(2)
grid_sizer_18.Add(grid_sizer_23, 1, wx.EXPAND, 0)
grid_sizer_18.Add(self.videocreatebutton, 0, wx.ALIGN_BOTTOM | wx.ALIGN_CENTER_HORIZONTAL, 0)
grid_sizer_18.Add((20, 20), 0, 0, 0)
grid_sizer_18.Add(self.static_line_1, 0, wx.EXPAND, 0)
grid_sizer_18.Add(self.AudioLabel, 0, 0, 0)
grid_sizer_18_copy_copy.Add(self.label_22_copy, 0, 0, 0)
grid_sizer_31.Add(self.audiosourcevideotext, 0, wx.EXPAND, 0)
grid_sizer_31.Add(self.audiosourcevideobrowse, 0, 0, 0)
grid_sizer_31.AddGrowableCol(0)
grid_sizer_18_copy_copy.Add(grid_sizer_31, 1, wx.EXPAND, 0)
grid_sizer_18_copy_copy.Add(self.label_23_copy, 0, 0, 0)
grid_sizer_35.Add(self.audiosourcetext, 0, wx.EXPAND, 0)
grid_sizer_35.Add(self.audiosourcebrowse, 0, 0, 0)
grid_sizer_35.AddGrowableCol(0)
grid_sizer_18_copy_copy.Add(grid_sizer_35, 1, wx.EXPAND, 0)
grid_sizer_18_copy_copy.Add(self.label_26_copy, 0, 0, 0)
grid_sizer_34.Add(self.audiooutputfoldertext, 0, wx.EXPAND, 0)
grid_sizer_34.Add(self.audiooutputfolderbrowse, 0, wx.ALIGN_BOTTOM | wx.ALIGN_CENTER_HORIZONTAL, 0)
grid_sizer_34.AddGrowableCol(0)
grid_sizer_18_copy_copy.Add(grid_sizer_34, 1, wx.EXPAND, 0)
grid_sizer_18_copy_copy.AddGrowableRow(3)
grid_sizer_18_copy_copy.AddGrowableCol(1)
grid_sizer_18.Add(grid_sizer_18_copy_copy, 1, | |
**kwargs: object) -> None:
"""
Log 'msg % args' with severity 'DEBUG'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
Keyword arguments should be JSON serializable.
``logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)``
"""
self.log_msg(logging.DEBUG, msg, args, kwargs)
def info(self, msg: str, *args: object, **kwargs: object) -> None:
"""
Log 'msg % args' with severity 'INFO'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
Keyword arguments should be JSON serializable.
``logger.info("Houston, we have a %s", "interesting problem", exc_info=1)``
"""
self.log_msg(logging.INFO, msg, args, kwargs)
def warning(self, msg: str, *args: object, **kwargs: object) -> None:
"""
Log 'msg % args' with severity 'WARNING'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
Keyword arguments should be JSON serializable.
``logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1)``
"""
self.log_msg(logging.WARNING, msg, args, kwargs)
def error(self, msg: str, *args: object, **kwargs: object) -> None:
"""
Log 'msg % args' with severity 'ERROR'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
``logger.error("Houston, we have a %s", "major problem", exc_info=1)``
"""
self.log_msg(logging.ERROR, msg, args, kwargs)
def exception(self, msg: str, *args: object, exc_info: bool = True, **kwargs: object) -> None:
"""
Convenience method for logging an ERROR with exception information.
"""
self.error(msg, *args, exc_info=exc_info, **kwargs)
def critical(self, msg: str, *args: object, **kwargs: object) -> None:
"""
Log 'msg % args' with severity 'CRITICAL'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
``logger.critical("Houston, we have a %s", "major disaster", exc_info=1)``
"""
self.log_msg(logging.CRITICAL, msg, args, kwargs)
@stable_api
class ResourceHandler(object):
"""
A baseclass for classes that handle resources. New handler are registered with the
:func:`~inmanta.agent.handler.provider` decorator.
The implementation of a handler should use the ``self._io`` instance to execute io operations. This io objects
makes abstraction of local or remote operations. See :class:`~inmanta.agent.io.local.LocalIO` for the available
operations.
:param agent: The agent that is executing this handler.
:param io: The io object to use.
"""
def __init__(self, agent: "inmanta.agent.agent.AgentInstance", io: Optional["IOBase"] = None) -> None:
self._agent = agent
if io is None:
raise Exception("Unsupported: no resource mgmt in RH")
else:
self._io = io
self._client: Optional[protocol.SessionClient] = None
# explicit ioloop reference, as we don't want the ioloop for the current thread, but the one for the agent
self._ioloop = agent.process._io_loop
def run_sync(self, func: typing.Callable[[], typing.Awaitable[T]]) -> T:
"""
Run a the given async function on the ioloop of the agent. It will block the current thread until the future
resolves.
:param func: A function that returns a yieldable future.
:return: The result of the async function.
"""
f: Future[T] = Future()
# This function is not typed because of generics, the used methods and currying
def run() -> None:
try:
result = func()
if result is not None:
from tornado.gen import convert_yielded
result = convert_yielded(result)
concurrent.chain_future(result, f)
except Exception as e:
f.set_exception(e)
self._ioloop.add_callback(run)
return f.result()
def set_cache(self, cache: AgentCache) -> None:
self.cache = cache
def get_client(self) -> protocol.SessionClient:
"""
Get the client instance that identifies itself with the agent session.
:return: A client that is associated with the session of the agent that executes this handler.
"""
if self._client is None:
self._client = protocol.SessionClient("agent", self._agent.sessionid)
return self._client
def can_reload(self) -> bool:
"""
Can this handler reload?
:return: Return true if this handler needs to reload on requires changes.
"""
return False
def do_reload(self, ctx: HandlerContext, resource: resources.Resource) -> None:
"""
Perform a reload of this resource.
:param ctx: Context object to report changes and logs to the agent and server.
:param resource: The resource to reload.
"""
def pre(self, ctx: HandlerContext, resource: resources.Resource) -> None:
"""
Method executed before a handler operation (Facts, dryrun, real deployment, ...) is executed. Override this method
to run before an operation.
:param ctx: Context object to report changes and logs to the agent and server.
:param resource: The resource to query facts for.
"""
def post(self, ctx: HandlerContext, resource: resources.Resource) -> None:
"""
Method executed after an operation. Override this method to run after an operation.
:param ctx: Context object to report changes and logs to the agent and server.
:param resource: The resource to query facts for.
"""
def close(self) -> None:
pass
def _diff(self, current: resources.Resource, desired: resources.Resource) -> typing.Dict[str, typing.Dict[str, typing.Any]]:
"""
Calculate the diff between the current and desired resource state.
:param current: The current state of the resource
:param desired: The desired state of the resource
:return: A dict with key the name of the field and value another dict with "current" and "desired" as keys for
fields that require changes.
"""
changes = {}
# check attributes
for field in current.__class__.fields:
current_value = getattr(current, field)
desired_value = getattr(desired, field)
if current_value != desired_value and desired_value is not None:
changes[field] = {"current": current_value, "desired": desired_value}
return changes
def check_resource(self, ctx: HandlerContext, resource: resources.Resource) -> resources.Resource:
"""
Check the current state of a resource
:param ctx: Context object to report changes and logs to the agent and server.
:param resource: The resource to check the current state of.
:return: A resource to represents the current state. Use the :func:`~inmanta.resources.Resource.clone` to create
clone of the given resource that can be modified.
"""
raise NotImplementedError()
def list_changes(self, ctx: HandlerContext, resource: resources.Resource) -> typing.Dict[str, typing.Dict[str, typing.Any]]:
"""
Returns the changes required to bring the resource on this system in the state described in the resource entry.
This method calls :func:`~inmanta.agent.handler.ResourceHandler.check_resource`
:param ctx: Context object to report changes and logs to the agent and server.
:param resource: The resource to check the current state of.
:return: A dict with key the name of the field and value another dict with "current" and "desired" as keys for
fields that require changes.
"""
current = self.check_resource(ctx, resource)
return self._diff(current, resource)
def do_changes(self, ctx: HandlerContext, resource: resources.Resource, changes: Dict[str, Dict[str, object]]) -> None:
"""
Do the changes required to bring the resource on this system in the state of the given resource.
:param ctx: Context object to report changes and logs to the agent and server.
:param resource: The resource to check the current state of.
:param changes: The changes that need to occur as reported by
:func:`~inmanta.agent.handler.ResourceHandler.list_changes`
"""
raise NotImplementedError()
def deploy(
self,
ctx: HandlerContext,
resource: resources.Resource,
requires: Dict[ResourceIdStr, ResourceState],
) -> None:
"""
This method is always be called by the agent, even when one of the requires of the given resource
failed to deploy. The default implementation of this method will deploy the given resource when all its
requires were deployed successfully. Override this method if a different condition determines whether the
resource should deploy.
:param ctx: Context object to report changes and logs to the agent and server.
:param resource: The resource to deploy
:param requires: A dictionary mapping the resource id of each dependency of the given resource to its resource state.
"""
def _call_resource_did_dependency_change() -> typing.Awaitable[Result]:
return self.get_client().resource_did_dependency_change(
tid=self._agent.environment, rvid=resource.id.resource_version_str()
)
def _should_reload() -> bool:
if not self.can_reload():
return False
result = self.run_sync(_call_resource_did_dependency_change)
if not result.result:
raise Exception("Failed to determine whether resource should reload")
if result.code != 200:
error_msg_from_server = f": {result.result['message']}" if "message" in result.result else ""
raise Exception(f"Failed to determine whether resource should reload{error_msg_from_server}")
return result.result["data"]
def filter_resources_in_unexpected_state(
reqs: Dict[ResourceIdStr, ResourceState]
) -> Dict[ResourceIdStr, ResourceState]:
"""
Return a sub-dictionary of reqs with only those resources that are in an unexpected state.
"""
unexpected_states = {
const.ResourceState.available,
const.ResourceState.dry,
const.ResourceState.undefined,
const.ResourceState.skipped_for_undefined,
}
return {rid: state for rid, state in reqs.items() if state in unexpected_states}
resources_in_unexpected_state = filter_resources_in_unexpected_state(requires)
if resources_in_unexpected_state:
ctx.set_status(const.ResourceState.skipped)
ctx.warning(
"Resource %(resource)s skipped because a dependency is in an unexpected state: %(unexpected_states)s",
resource=resource.id.resource_version_str(),
unexpected_states=str({rid: state.value for rid, state in resources_in_unexpected_state.items()}),
)
return
failed_dependencies = [req for req, status in requires.items() if status != ResourceState.deployed]
if not any(failed_dependencies):
self.execute(ctx, resource)
if _should_reload():
self.do_reload(ctx, resource)
| |
# encoding=utf-8
import sys
#
import math
# noinspection PyUnresolvedReferences
import maya.cmds as cmds
# noinspection PyUnresolvedReferences
import maya.api.OpenMaya as OpenMaya
# noinspection PyUnresolvedReferences
import maya.api.OpenMayaUI as OpenMayaUI
# Use 2.0 API
def maya_useNewAPI():
pass
# Command
class curveToMeshCmd(object):
def __init__(self, *args):
(
curveObject, meshCreate,
uniformEnable,
vDivision, uDivision,
width, widthExtra,
spin, spinExtra,
twist, taper, arch, archAttachCurveEnable,
minPercent, maxPercent,
sample,
smoothDepth,
angleOffset
) = args
# noinspection PyArgumentList
self._mCurve = OpenMaya.MFnNurbsCurve(curveObject)
self._mMeshCreate = meshCreate
#
self._vDivision = vDivision - 1
self._uDivision = uDivision - 1
#
self._wgt__frame_w_ = width
self._widthExtra = widthExtra
self._spin = spin
self._spinExtra = spinExtra
#
self._twist = twist
self._taper = taper
self._arch = arch
self._archAttachCurveEnable = archAttachCurveEnable
#
self._sample = sample
self._smoothDepth = smoothDepth
#
self.fnc_angleOffset = angleOffset
# Clamp in 0.1
self._minPercent = max(min(minPercent, minPercent - .1, 1.0 - .1), 0.0)
self._maxPercent = max(maxPercent, min(minPercent + .1, 1.0), .1)
#
self._uniformEnable = uniformEnable
#
paramRange = self._mCurve.knotDomain
#
self._minParam = paramRange[0]
self._maxParam = paramRange[1]
#
self._length = self._mCurve.length()
#
self._searchCount = int(self._vDivision*self._sample)
#
step = int(math.log(self._vDivision, 2) / 2)
# Minimum Use 1
self._vStep = max(step, 1)
#
self._updateBasicData()
self._updateReduceData()
self._updateCreateData()
@staticmethod
def _mapRangeValue(range1, range2, value1):
assert isinstance(range1, tuple) or isinstance(range1, list), 'Argument Error, "range1" Must "tuple" or "list".'
assert isinstance(range2, tuple) or isinstance(range2, list), 'Argument Error, "range2" Must "tuple" or "list".'
#
min1, max1 = range1
min2, max2 = range2
#
percent = float(value1 - min1)/(max1 - min1)
#
value2 = (max2 - min2) * percent + min2
return value2
#
def _updateByPercentAt(self, index, percent):
if self._uniformEnable is True:
length = self._mapRangeValue((0, 1), (0, self._length), percent)
param = self._mCurve.findParamFromLength(length)
else:
param = self._mapRangeValue((0, 1), (self._minParam, self._maxParam), percent)
#
v = OpenMaya.MVector()
#
xAxis = v.kXaxisVector
xAxis_ = v.kXnegAxisVector
#
yAxis = v.kYaxisVector
yAxis_ = v.kYnegAxisVector
#
zAxis = v.kZaxisVector
zAxis_ = v.kZnegAxisVector
#
point, tangent = self._mCurve.getDerivativesAtParam(param, 4)
tangent = tangent.normalize()
#
if index == 0:
if tangent.isEquivalent(yAxis) or tangent.isEquivalent(yAxis_):
axis = xAxis
else:
axis = yAxis
# Vector Multiplication Cross
xNormal = tangent.__rxor__(axis)
yNormal = xNormal.__rxor__(tangent)
else:
quaternion = self.fnc_tangents[index - 1].rotateTo(tangent)
#
xNormal = self._xNormals[index - 1]
xNormal = xNormal.rotateBy(quaternion)
#
yNormal = self._yNormals[index - 1]
yNormal = yNormal.rotateBy(quaternion)
#
self._vPercents[index] = percent
#
self.cls_points[index] = point
self.fnc_tangents[index] = tangent
#
self._xNormals[index] = xNormal.normalize()
self._yNormals[index] = yNormal.normalize()
#
def _updateBasicData(self):
maxCount = self._searchCount + 1
#
self._vPercents = [None]*maxCount
#
self.cls_points = [None]*maxCount
self.fnc_tangents = [None]*maxCount
#
self._xNormals = [None]*maxCount
self._yNormals = [None]*maxCount
#
for seq in range(maxCount):
if seq == 0:
percent = self._minPercent
elif seq == self._searchCount:
percent = self._maxPercent
else:
percent = self._mapRangeValue((0, maxCount), (self._minPercent, self._maxPercent), seq)
#
self._updateByPercentAt(seq, percent)
#
def _updateAngle(self):
# Angels
self.fnc_angles = []
self.fnc_angleDic = {}
if self.fnc_tangents:
for seq, t in enumerate(self.fnc_tangents):
curPercent = self._vPercents[seq]
#
angleOffset = self.fnc_angleOffset.getValueAtPosition(curPercent)
angleMult = 1 + (angleOffset - .5) * 10.0
#
preSeq = seq - 1
if preSeq <= 0:
preSeq = 0
#
nexSeq = seq + 1
if nexSeq >= self._searchCount:
nexSeq = self._searchCount
#
preTangent = self.fnc_tangents[preSeq]
nexTangent = self.fnc_tangents[nexSeq]
#
preAngle = t.angle(preTangent)
nexAngle = t.angle(nexTangent)
#
angle = sum([preAngle, nexAngle]) / 2.0 * angleMult
# Must Round to .000x
angle = round(angle, 3)
#
self.fnc_angles.append(angle)
self.fnc_angleDic.setdefault(angle, []).append(seq)
#
def _updateAngleSortLis(self):
self._seqSortLis = []
if self.fnc_angleDic:
angles = self.fnc_angleDic.keys()
angles.sort()
for a in angles:
seqs = self.fnc_angleDic[a]
self._seqSortLis.extend(seqs)
#
def _updateReduceData(self):
def updateBranch():
self._filterSeqs = []
#
self._updateAngle()
self._updateAngleSortLis()
#
if self.fnc_angles:
for n in range(rangeCount):
seq = n + 1
#
curPercent = self._vPercents[seq]
#
preSeq = seq - span
if preSeq <= 0:
preSeq = 0
prePercent = self._vPercents[preSeq]
#
nexSeq = seq + span
if nexSeq >= self._searchCount:
nexSeq = self._searchCount
nexPercent = self._vPercents[nexSeq]
# Current Angle
curAngle = self.fnc_angles[seq]
#
preAngles = []
nexAngles = []
for sn in range(span):
subSeq = sn + 1
# Previous
preAngleSeq = seq - subSeq
if preAngleSeq <= minSeq:
da = (self.fnc_angles[minSeq + 1] - self.fnc_angles[minSeq])*subSeq
preAngles.append(self.fnc_angles[minSeq] + da)
else:
preAngles.append(self.fnc_angles[preAngleSeq])
# Next
nexAngleSeq = seq + subSeq
if nexAngleSeq >= maxSeq:
da = (self.fnc_angles[maxSeq] - self.fnc_angles[maxSeq - 1])*subSeq
preAngles.append(self.fnc_angles[maxSeq] + da)
else:
nexAngles.append(self.fnc_angles[nexAngleSeq])
#
preAngle = sum(preAngles) / span
nexAngle = sum(nexAngles) / span
#
angleSum = sum([preAngle, curAngle, nexAngle])
#
if angleSum > 0:
preAnglePercent = (preAngle + curAngle) / angleSum
nexAnglePercent = (curAngle + nexAngle) / angleSum
#
dAnglePercent = nexAnglePercent - preAnglePercent
else:
dAnglePercent = 0.0
#
percentRange = nexPercent - prePercent
dPercent = percentRange / 2.0
#
minPercent = 1.0/(self._vDivision*10.0)
#
newPercent = (curPercent + dPercent*dAnglePercent)
# Clamp Percent
newPercent = max(min(newPercent, nexPercent - minPercent), prePercent + minPercent)
#
self._updateByPercentAt(seq, newPercent)
#
def updateFilterSeq():
for n in range(rangeCount):
seq = n + 1
#
if seq % self._sample == 0:
self._filterSeqs.append(seq)
#
self._filterSeqs.insert(0, 0)
self._filterSeqs.append(self._searchCount)
self._filterSeqs.sort()
#
self._filterSeqs = []
#
span = self._sample
#
minSeq = 0
maxSeq = self._searchCount
rangeCount = self._searchCount - 2
#
for i in range(self._smoothDepth):
updateBranch()
#
updateFilterSeq()
#
def _updateCreateData(self):
def step01():
l = [0, 1, 2 + uCount, 1 + uCount]
for v in range(vCount):
for u in range(uCount):
self._nSideArray.append(4)
if u == 0:
l_ = [(i + v*(uCount + 1)) for i in l]
else:
l_ = [(i + v*(uCount + 1) + u) for i in l]
#
self._vertexIdArray.extend(l_)
#
def step02():
if (uCount + 1) % 2:
m = int((uCount + 1) / 2)
else:
m = None
#
c = float(uCount) / 2.0
#
for v in range(vCount + 1):
seq = v
for u in range(uCount + 1):
if seq == 0:
rSeq = 0
elif seq == self._vDivision:
rSeq = self._searchCount
else:
rSeq = self._filterSeqs[seq]
#
point = self.cls_points[rSeq]
tangent = self.fnc_tangents[rSeq]
sideVector = self._xNormals[rSeq]
midVector = self._yNormals[rSeq]
#
vPercent = self._vPercents[rSeq]
uPercent = float(abs(u - c)) / float(c)
#
widthExtra = self._widthExtra.getValueAtPosition(vPercent)
spinExtra = self._spinExtra.getValueAtPosition(vPercent)
#
vector = sideVector*(width*widthExtra*2)/2 + sideVector*((taper - 1)*vPercent)*width/2
#
p_ = OpenMaya.MPoint()
if u == 0 or u == uCount:
v_ = vector
else:
if u == m:
# noinspection PyArgumentList
v_ = OpenMaya.MVector(0, 0, 0)
else:
v_ = uPercent*vector
#
archRadians = math.radians(uPercent*90*arch)/2
if u < c:
archRadians = -math.radians(uPercent*90*arch)/2
v_ /= -1
# Arch
rotArch = OpenMaya.MQuaternion()
# noinspection PyArgumentList
rotArch.setValue(
OpenMaya.MVector(tangent.x, tangent.y, tangent.z),
archRadians
)
v_ = v_.rotateBy(rotArch)
#
if not archAttachCurveEnable:
v_ -= midVector*arch*(math.sin(math.radians(45)))*width/2
# Spin + Spin Extra + Twist
rotSpin = OpenMaya.MQuaternion()
# noinspection PyArgumentList
rotSpin.setValue(
OpenMaya.MVector(tangent.x, tangent.y, tangent.z),
math.radians(spin) + math.radians(spinExtra * 360) + math.radians(twist * vPercent)
)
v_ = v_.rotateBy(rotSpin)
#
p_.x, p_.y, p_.z = point.x + v_.x, point.y + v_.y, point.z + v_.z
#
self.cls_pointArray.append(p_)
#
if u == 0:
self._uArray.append(1.0)
elif u == uCount:
self._uArray.append(0.0)
else:
if u == c:
self._uArray.append(0.5)
elif c < u:
self._uArray.append(.5 - uPercent/2)
elif u < c:
self._uArray.append(.5 + uPercent/2)
#
self._vArray.append(1 - vPercent)
#
self._nSideArray, self._vertexIdArray = [], []
self.cls_pointArray = []
self._uArray, self._vArray = [], []
#
vCount = self._vDivision
uCount = self._uDivision
#
width = self._wgt__frame_w_
spin = self._spin
#
twist = self._twist
taper = self._taper
arch = self._arch
archAttachCurveEnable = self._archAttachCurveEnable
#
step01()
step02()
#
def createMesh(self):
mMesh = OpenMaya.MFnMesh()
#
mMesh.create(
self.cls_pointArray,
self._nSideArray,
self._vertexIdArray,
parent=self._mMeshCreate
)
#
mapSet = 'map1'
mMesh.setUVs(self._uArray, self._vArray, mapSet)
mMesh.assignUVs(self._nSideArray, self._vertexIdArray, mapSet)
#
class meshToSurfaceCmd(object):
def __init__(self, *args):
(
meshObject, surfaceCreate,
direction
) = args
# noinspection PyArgumentList
self._mMesh = OpenMaya.MFnMesh(meshObject)
self._mSurfaceCreate = surfaceCreate
#
self._layoutDirection = max(min(direction, 3), 0)
#
self._updateBasicData()
self._updateCreateData()
@staticmethod
def _getKnotsArray(count, degree):
lis = []
minKnots, maxKnots = 0.0, 1.0
#
iPCount = count - 2
[lis.append(minKnots) for i in range(degree)]
#
for seq in range(iPCount):
lis.append(float(seq + 1) * maxKnots / (iPCount + 1))
#
[lis.append(maxKnots) for i in range(degree)]
return lis
@staticmethod
def _getMidPoint(point1, point2):
x, y, z = (point1.x + point2.x)/2, (point1.y + point2.y)/2, (point1.z + point2.z)/2
# noinspection PyArgumentList
return OpenMaya.MPoint(x, y, z)
#
def cls_pointAt(self, column, row):
vertexId = self._rowVertexIdDic[row][column]
return self.cls_pointArray[vertexId]
#
def _midPointAt(self, column0, column1, | |
intrinsics
# because we cannot resolve runtime values like Arn of a resource. For purposes of detecting changes, this
# is good enough. Here is why:
#
# When using intrinsic functions there are two cases when has must change:
# - Value of the template parameter changes
# - (or) LogicalId of a referenced resource changes ie. !GetAtt NewResource.Arn
#
# Later case will already change the hash because some value in the Code dictionary changes. We handle the
# first case by resolving references to template parameters. It is okay even if these references are
# present inside another intrinsic such as !Join. The resolver will replace the reference with the parameter's
# value and keep all other parts of !Join identical. This will still trigger a change in the hash.
code_dict = intrinsics_resolver.resolve_parameter_refs(code_dict)
# Construct the LogicalID of Lambda version by appending 10 characters of SHA of CodeUri. This is necessary
# to trigger creation of a new version every time code location changes. Since logicalId changes, CloudFormation
# will drop the old version and create a new one for us. We set a DeletionPolicy on the version resource to
# prevent CloudFormation from actually deleting the underlying version resource
#
# SHA Collisions: For purposes of triggering a new update, we are concerned about just the difference previous
# and next hashes. The chances that two subsequent hashes collide is fairly low.
prefix = "{id}Version".format(id=self.logical_id)
logical_dict = {}
try:
logical_dict = code_dict.copy()
except (AttributeError, UnboundLocalError):
pass
else:
if function.Environment:
logical_dict.update(function.Environment)
if function.MemorySize:
logical_dict.update({"MemorySize": function.MemorySize})
logical_id = logical_id_generator.LogicalIdGenerator(prefix, logical_dict, code_sha256).gen()
attributes = self.get_passthrough_resource_attributes()
if attributes is None:
attributes = {}
if "DeletionPolicy" not in attributes:
attributes["DeletionPolicy"] = "Retain"
lambda_version = LambdaVersion(logical_id=logical_id, attributes=attributes)
lambda_version.FunctionName = function.get_runtime_attr("name")
lambda_version.Description = self.VersionDescription
return lambda_version
def _construct_alias(self, name, function, version):
"""Constructs a Lambda Alias for the given function and pointing to the given version
:param string name: Name of the alias
:param model.lambda_.LambdaFunction function: Lambda function object to associate the alias with
:param model.lambda_.LambdaVersion version: Lambda version object to associate the alias with
:return: Lambda alias object
:rtype model.lambda_.LambdaAlias
"""
if not name:
raise InvalidResourceException(self.logical_id, "Alias name is required to create an alias")
logical_id = "{id}Alias{suffix}".format(id=function.logical_id, suffix=name)
alias = LambdaAlias(logical_id=logical_id, attributes=self.get_passthrough_resource_attributes())
alias.Name = name
alias.FunctionName = function.get_runtime_attr("name")
alias.FunctionVersion = version.get_runtime_attr("version")
if self.ProvisionedConcurrencyConfig:
alias.ProvisionedConcurrencyConfig = self.ProvisionedConcurrencyConfig
return alias
def _validate_deployment_preference_and_add_update_policy(
self, deployment_preference_collection, lambda_alias, intrinsics_resolver, mappings_resolver
):
if "Enabled" in self.DeploymentPreference:
# resolve intrinsics and mappings for Type
enabled = self.DeploymentPreference["Enabled"]
enabled = intrinsics_resolver.resolve_parameter_refs(enabled)
enabled = mappings_resolver.resolve_parameter_refs(enabled)
self.DeploymentPreference["Enabled"] = enabled
if "Type" in self.DeploymentPreference:
# resolve intrinsics and mappings for Type
preference_type = self.DeploymentPreference["Type"]
preference_type = intrinsics_resolver.resolve_parameter_refs(preference_type)
preference_type = mappings_resolver.resolve_parameter_refs(preference_type)
self.DeploymentPreference["Type"] = preference_type
if deployment_preference_collection is None:
raise ValueError("deployment_preference_collection required for parsing the deployment preference")
deployment_preference_collection.add(self.logical_id, self.DeploymentPreference)
if deployment_preference_collection.get(self.logical_id).enabled:
if self.AutoPublishAlias is None:
raise InvalidResourceException(
self.logical_id, "'DeploymentPreference' requires AutoPublishAlias property to be specified."
)
if lambda_alias is None:
raise ValueError("lambda_alias expected for updating it with the appropriate update policy")
lambda_alias.set_resource_attribute(
"UpdatePolicy", deployment_preference_collection.update_policy(self.logical_id).to_dict()
)
class SamCanary(SamResourceMacro):
"""SAM canary macro."""
resource_type = "AWS::Serverless::Canary"
property_types = {
"FunctionName": PropertyType(False, one_of(is_str(), is_type(dict))),
"Handler": PropertyType(True, is_str()),
"Runtime": PropertyType(True, is_str()),
"CodeUri": PropertyType(False, one_of(is_str(), is_type(dict))),
"InlineCode": PropertyType(False, one_of(is_str(), is_type(dict))),
"MemorySize": PropertyType(False, is_type(int)),
"Tags": PropertyType(False, is_type(dict)),
# Easier to pass through as AWS::Synthetics::Canary only accepts a boolean
"ActiveTracing": PropertyType(False, is_type(bool)),
"AssumeRolePolicyDocument": PropertyType(False, is_type(dict)),
"Timeout": PropertyType(False, is_type(int)),
"Role": PropertyType(False, is_str()),
"Schedule": PropertyType(True, is_type(dict)),
"StartCanaryAfterCreation": PropertyType(True, is_type(bool)),
"ArtifactS3Location": PropertyType(False, one_of(is_type(dict), is_str())),
"FailureRetentionPeriod": PropertyType(False, is_type(int)),
"SuccessRetentionPeriod": PropertyType(False, is_type(int)),
"VpcConfig": PropertyType(False, is_type(dict)),
"Environment": PropertyType(False, dict_of(is_str(), is_type(dict))),
"Policies": PropertyType(False, one_of(is_str(), is_type(dict), list_of(one_of(is_str(), is_type(dict))))),
"CanaryMetricAlarms": PropertyType(False, list_of(is_type(dict))),
}
def to_cloudformation(self, **kwargs):
"""Returns the Synthetics Canary to which this SAM Canary corresponds.
:param dict kwargs: already-converted resources that may need to be modified when converting this \
macro to pure CloudFormation
:returns: a list of vanilla CloudFormation Resources, to which this Serverless Canary expands
:rtype: list
"""
resources = []
managed_policy_map = kwargs.get("managed_policy_map", {})
synthetics_canary = self._construct_synthetics_canary()
resources.append(synthetics_canary)
# A S3 Bucket resource will be added to the transformed template if the user doesn't provide an artifact
# bucket to store canary results
artifact_bucket_name = ""
if not self.ArtifactS3Location:
s3bucket = self._construct_artifact_bucket()
resources.append(s3bucket)
synthetics_canary.ArtifactS3Location = {"Fn::Join": ["", ["s3://", {"Ref": s3bucket.logical_id}]]}
artifact_bucket_name = {"Ref": s3bucket.logical_id}
if not self.Role:
role = self._construct_role(artifact_bucket_name, managed_policy_map)
resources.append(role)
synthetics_canary.ExecutionRoleArn = role.get_runtime_attr("arn")
if self.CanaryMetricAlarms:
self._validate_cloudwatch_alarms()
for alarm_dict in self.CanaryMetricAlarms:
resources.append(self._construct_cloudwatch_alarm(alarm_dict))
return resources
def _validate_cloudwatch_alarms(self):
"""Validates the CanaryMetricAlarms property in Serverless Canary
The property should follow the following structure
CanaryMetricAlarms:
- AlarmName:
MetricName (required): one of the metrics in VALID_CANARY_METRICS
Threshold (optional): any value of type double
ComparisonOperator (optional): any of the valid values (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cw-alarm.html#cfn-cloudwatch-alarms-comparisonoperator)
Period (optional): Integer that is 10, 30, 60, or any multiple of 60
Note: Alarm names are used as logical ids for their respective CloudWatchAlarm property so if user has multiple
alarms there should be no duplicate names as we don't want the alarms to override each other without the user's
knowledge
:raise: InvalidResourceException
"""
# keeps list of alarm names to make sure there are no duplicates
list_of_alarm_names = []
for alarm_dict in self.CanaryMetricAlarms:
# Throw an error if there is more than one alarm in the array index, like for example
# CanaryMetricAlarms:
# - Alarm1:
# MetricName: SuccessPercent
# Alarm2:
# MetricName: SuccessPercent
# Threshold: 90
# - Alarm3:
# MetricName: Failed
# throws an error for Alarm2 since its Alarm1 is already defined in that dict
if len(alarm_dict) != 1:
raise InvalidResourceException(self.logical_id, "Must have one alarm per array index")
# get the alarm name and the properties the user defined for the alarm
alarm_name = next(iter(alarm_dict))
alarm_item = alarm_dict[alarm_name]
# MetricName property is required
if alarm_item is None or "MetricName" not in alarm_item:
raise InvalidResourceException(
self.logical_id,
"CloudWatch alarm '{key}' is missing required property 'MetricName'.".format(key=alarm_name),
)
metric_name = alarm_item["MetricName"]
# MetricName must be one of the values in VALID_CANARY_METRICS
if metric_name not in VALID_CANARY_METRICS:
raise InvalidResourceException(
self.logical_id,
"MetricName needs to be one of {}".format(VALID_CANARY_METRICS),
)
# make sure all the alarm names are unique
if alarm_name in list_of_alarm_names:
raise InvalidResourceException(self.logical_id, "Duplicate CloudWatch alarm names")
else:
list_of_alarm_names.append(alarm_name)
def _construct_cloudwatch_alarm(self, alarm_dict):
"""Constructs an CloudWatch::Alarm resource if the user specifies the CloudWatchAlarm property in Serverless Canary
:param dict alarm_dict: Alarm name and properties as provided by the customer
:returns: the generated CloudWatch Alarm
:rtype: model.cloudwatch.CloudWatchAlarm
"""
# gets alarm name and the properties defined by user
alarm_name = next(iter(alarm_dict))
alarm_item = alarm_dict[alarm_name]
cloudwatch_alarm = CloudWatchAlarm(
logical_id=alarm_name,
depends_on=self.depends_on,
attributes=self.get_passthrough_resource_attributes(),
)
# default settings for the CloudWatch alarms
# the settings are identical to the Alarms that are made by Synthetics Canary using their dashboard
cloudwatch_alarm.MetricName = alarm_item["MetricName"]
cloudwatch_alarm.Namespace = "CloudWatchSynthetics"
cloudwatch_alarm.EvaluationPeriods = 1
cloudwatch_alarm.Statistic = "Sum"
cloudwatch_alarm.TreatMissingData = "notBreaching"
# connects the alarm to the metric produced by the Synthetics canary from this Serverless resource
cloudwatch_alarm.Dimensions = [{"Name": "CanaryName", "Value": {"Ref": self.logical_id}}]
# set the values if user provides them, if not set them to default value based on the MetricName
cloudwatch_alarm.ComparisonOperator = alarm_item.get(
"ComparisonOperator", DEFAULT_METRIC_VALUES[alarm_item["MetricName"]]["ComparisonOperator"]
)
cloudwatch_alarm.Threshold = float(
alarm_item.get("Threshold", DEFAULT_METRIC_VALUES[alarm_item["MetricName"]]["Threshold"])
)
cloudwatch_alarm.Period = alarm_item.get("Period", DEFAULT_METRIC_VALUES[alarm_item["MetricName"]]["Period"])
return cloudwatch_alarm
def _construct_role(self, artifact_bucket_name, managed_policy_map):
"""Constructs an IAM:Role resource only if user doesn't specify Role property in Serverless Canary
- If the ArtifactS3Location property isn't specified then the the policies to execute the Canary and handle
the resulting data will be added
- If the Tracing property is enabled then the XRay policy based on the user's region will be added
- If the VpcConfig property is specified then the policy to execute VPC will be added
- If the Policies property is specified then the that will be appended to the IAM::Role's Policies property
:returns: the generated IAM Role
:rtype: model.iam.IAMRole
"""
role_attributes = self.get_passthrough_resource_attributes()
if self.AssumeRolePolicyDocument:
assume_role_policy_document = self.AssumeRolePolicyDocument
else:
assume_role_policy_document = IAMRolePolicies.lambda_assume_role_policy()
# add AWS managed policies if user has enabled VpcConfig or Tracing
managed_policy_arns = []
if self.VpcConfig:
managed_policy_arns.append(
ArnGenerator.generate_aws_managed_policy_arn("service-role/AWSLambdaVPCAccessExecutionRole")
)
if | |
is invalid. Cannot add.')
if delete_prev:
cm.delete_computed_cid(cid)
cm.cx2_cid[cx] = cid
cm.cx2_nx [cx] = nx
cm.cx2_gx [cx] = gx
cm.cx2_roi[cx] = roi
cm.cx2_theta[cx] = theta
cm.max_roi = map(lambda (a,b): max(a,b), zip(cm.max_roi, roi))
# Add This Chip To Reverse Indexing
if cid >= len(cm.cid2_cx):
idAlloc = max(cid+1,len(cm.cid2_cx)*2 + 1)
logdbg('Allocating: '+str(idAlloc)+' more cids')
cm.cid2_cx = np.append(cm.cid2_cx, np.zeros(idAlloc,dtype=np.uint32))
cm.cid2_cx[cid] = cx
nm.nx2_cx_list[nx].append(cx)
gm.gx2_cx_list[gx].append(cx)
# Add user props
for key in cm.user_props.keys():
if not key in props.keys():
cm.user_props[key][cx] = ''
for key,val in props.iteritems():
cm.add_user_prop(key)
cm.user_props[key][cx] = val
# Increment Data Counters
cm.next_cx = max(cm.next_cx + 1, cx+1)
cm.next_cid = max(cm.next_cid+1, cid+1)
cm.max_cx = max(cm.max_cx, cx)
cm.max_cid = max(cm.max_cid, cid)
cm.num_c = cm.num_c + 1
cm.hs.vm.isDirty = True
return cid
def delete_computed_cid(cm, cid):
iom = cm.hs.iom
if np.iterable(cid): logerr('this function only works for a single cid')
logmsg('Removing CID=%d\'s computed files' % cid)
cid_fname_pattern = iom.get_chip_prefix(cid, [])+'*'
iom.remove_computed_files_with_pattern(cid_fname_pattern)
def remove_chip(cm, cx):
cx_list = [cx]
if type(cx) == types.ListType:
cx_list = cx
logdbg('Removing CXs '+str(cx_list))
for cx in cx_list:
# Remove data saved on disk and memory
cm.hs.on_cx_modified(cx)
cid = cm.cx2_cid[cx]
logmsg('Removing cid=%d' % cid)
# Remove cx from other.data managers
gx = cm.cx2_gx[cx]
nx = cm.cx2_nx[cx]
cm.hs.gm.gx2_cx_list[gx].remove(cx)
cm.hs.nm.nx2_cx_list[nx].remove(cx)
# Remove data saved in memory
cm.cx2_cid[cx] = 0
cm.cx2_nx[cx] = 0
cm.cx2_gx[cx] = 0
cm.cx2_roi[cx] = np.array([0,0,0,0],dtype=np.uint32)
cm.cx2_theta[cx] = 0
cm.cid2_cx[cid] = 0
def change_orientation(cm, cx, new_theta):
cid = cm.cx2_cid[cx]
logmsg('Giving cid=%d new theta: %r' % (cid, new_theta))
assert not new_theta is None
cm.hs.on_cx_modified(cx)
cm.cx2_theta[cx] = new_theta
def change_roi(cm, cx, new_roi):
cid = cm.cx2_cid[cx]
logmsg('Giving cid=%d new roi: %r' % (cid, new_roi))
assert not new_roi is None
if new_roi is None:
logerr('The ROI is np.empty')
cm.hs.on_cx_modified(cx)
cm.cx2_roi[cx] = new_roi
def rename_chip(cm, cx, new_name):
nm = cm.hs.nm
cid = cm.cid(cx)
old_nx = cm.cx2_nx[cx]
old_name = nm.nx2_name[old_nx]
if old_name == new_name:
logdbg('new_name == old_name')
return
logmsg('Renaming cid='+str(cid)+' from '+str(old_name)+' to '+new_name)
if not new_name in nm.name2_nx.keys():
nm.add_name(-1,new_name)
old_nx = cm.cx2_nx[cx]
new_nx = nm.name2_nx[new_name]
#Debug
old_nid = nm.nx2_nid[old_nx]
new_nid = nm.nx2_nid[new_nx]
logdbg('Old Name Info: cid=%d cx=%d, nid=%d, nx=%d, name=%s' % (cid, cx, old_nid, old_nx, old_name))
logdbg('New Name Info: cid=%d cx=%d, nid=%d, nx=%d, name=%s' % (cid, cx, new_nid, new_nx, new_name))
#EndDebug
nm.nx2_cx_list[old_nx].remove(cx)
nm.nx2_cx_list[new_nx].append(cx)
cm.cx2_nx[cx] = new_nx
# --- Raw Image Representation of Chip ---
def cx2_chip_list(cm, cx_list):
if np.iterable(cx_list):
return [cm.cx2_chip(cx) for cx in iter(cx_list) ]
else:
return [cm.cx2_chip(cx_list)]
def cx2_chip(cm, cx):
chip_fpath = cm.cx2_chip_fpath(cx)
# Load chip and rotate it
return np.asarray(
Image.open(chip_fpath).rotate(
cm.cx2_theta[cx]*180/np.pi, resample=Image.BICUBIC, expand=1))
def cx2_chip_size(cm, cx, rotated=False):
return cm._scaled_size(cx, rotated=rotated)
#chip_fpath = cm.cx2_chip_fpath(cx)
#return Image.open(chip_fpath).size
def cx2_T_chip2img(cm, cx, rotated=True):
'Return the transformation from Rotated Chip Space to Image Space'
#------------------------------
# Steps to transform a detection from Chip Space to Image Space
# (Chip Space): roi=[0, 0, cw, ch]
# * translate: -[cw, ch]/2
# * rotate: -theta
# * translate: [ucw, uch]/2
# (Unoriented Chip Space) = roi=[0,0,ucw,ucw]
# * scale: scale_factor
# * translate: rx, ry
# (Image Space): roi=[rx,ry,rw,rh]
#------------------------------
# rotation radians
theta = cm.cx2_theta[cx]
# roi size and translation
(rx, ry, rw, rh) = np.array(cm.cx2_roi[cx], dtype=np.float)
# unrotated size
(ucw, uch) = cm._scaled_size(cx, rotated=False, dtype=np.float)
# rotated size
(cw, ch) = cm._scaled_size(cx, rotated=True, dtype=np.float)
# Translation Variables
ctx, cty = ( cw/2, ch/2)
uctx, ucty = (ucw/2, uch/2)
sfx, sfy = rw/ucw, rh/uch
sinth = np.sin(theta)
costh = np.cos(theta)
# Translate to centered rotated
trans_center = np.array(
([ 1, 0, -ctx],
[ 0, 1, -cty],
[ 0, 0, 1]), dtype=np.float)
# unrotate
unrotate = np.array(
([costh, -sinth, 0],
[sinth, costh, 0],
[ 0, 0, 1]), dtype=np.float)
# translate to uncentered unrotated
trans_uncenter = np.array(
([ 1, 0, uctx],
[ 0, 1, ucty],
[ 0, 0, 1]), dtype=np.float)
# Unscale to untranslated image space
unscale = np.array(
([ sfx, 0, 0],
[ 0, sfy, 0],
[ 0, 0, 1]), dtype=np.float)
# Translate into image scale
trans_img = np.array(
([ 1, 0, rx],
[ 0, 1, ry],
[ 0, 0, 1]), dtype=np.float)
#return trans_center.dot(unrotate).dot(trans_uncenter).dot(unscale).dot(trans_img)
return trans_img.dot(unscale).dot(trans_uncenter).dot(unrotate).dot(trans_center)
def cx2_T_chip2unrotated(cm, cx, rotated=True):
'Return the transformation from Rotated Chip Space to Image Space'
# rotation radians
theta = cm.cx2_theta[cx]
# roi size and translation
(rx, ry, rw, rh) = np.array(cm.cx2_roi[cx],dtype=np.float)
# unrotated size
(ucw, uch) = cm._scaled_size(cx, rotated=False, dtype=np.float)
# rotated size
(cw, ch) = cm._scaled_size(cx, rotated=True, dtype=np.float)
# Translation Variables
ctx, cty = ( cw/2, ch/2)
uctx, ucty = (ucw/2, uch/2)
# Translate to centered rotated
trans_center = np.array(([ 1, 0, -ctx],
[ 0, 1, -cty],
[ 0, 0, 1]), dtype=np.float32)
# unrotate
unrotate = np.array(([np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[ 0, 0, 1]), dtype=np.float32)
# translate to uncentered unrotated
trans_uncenter = np.array(([ 1, 0, uctx],
[ 0, 1, ucty],
[ 0, 0, 1]), dtype=np.float32)
#return trans_center.dot(unrotate).dot(trans_uncenter).dot(unscale).dot(trans_img)
return trans_uncenter.dot(unrotate).dot(trans_center)
def cx2_chip_fpath(cm, cx):
'Gets chip fpath with checks'
iom = cm.hs.iom
cid = cm.cid(cx)
chip_fpath = iom.get_chip_fpath(cid)
if not os.path.exists(chip_fpath):
hotspotter.ChipFunctions.precompute_chips(cm.hs, cx_list=[cx], num_procs=1, force_recompute=True)
return chip_fpath
# --- Feature Representation Methods ---
def get_feats(cm, cx, force_recomp=False):
# FIXME: If the algorithm changes, the dirty bit is not flipped
if force_recomp or\
cm.cx2_fpts[cx] is None or\
cm.cx2_fdsc[cx] is None or\
np.sum(cm.cx2_dirty_bit[cx]):
cm.load_features(cx, force_recomp)
return (cm.cx2_fpts[cx], cm.cx2_fdsc[cx])
def get_fpts(cm, cx, force_recomp=False):
if force_recomp or cm.cx2_fpts[cx] is None or np.sum(cm.cx2_dirty_bit[cx]):
cm.load_features(cx, force_recomp)
return cm.cx2_fpts[cx]
def get_fdsc(cm, cx, force_recomp=False):
if force_recomp or cm.cx2_fdsc[cx] is None or np.sum(cm.cx2_dirty_bit[cx]):
cm.load_features(cx, force_recomp)
return cm.cx2_fdsc[cx]
def cx2_nfpts(cm, cxs=None):
if cxs == None:
cxs = cm.all_cxs()
if type(cxs) in [np.uint32, types.IntType]:
cxs = np.array([cxs],dtype=np.uint32)
return np.array([cm.cx2_fpts[cx].shape[0] for cx in cxs], dtype=np.uint32)
# --- Internals ---
def _scaled_size(cm, cx, dtype=float, rotated=False):
'''Returns the ChipSpace size of cx. Without considering rotation
Depends on the current algorithm settings
dtype specifies the percision of return type'''
# Compute Unrotated Chip Space
# Get raw size and target sizze
(_, _, rw, rh) = cm.cx2_roi[cx]
target_diag_pxls = cm.hs.am.algo_prefs.preproc.sqrt_num_pxls
# HACK: Double the size like Lowe; instead of normalizing
if target_diag_pxls == -1:
current_num_diag_pxls = np.sqrt(rw**2 + rh**2)
target_diag_pxls = current_num_diag_pxls*2 # max(, 5000)
ar = np.float(rw)/np.float(rh) # aspect ratio
if ar > 4 or ar < .25:
logwarn( 'Aspect ratio for cx=%d %.2f may be too extreme' % (cx, ar))
# Compute Unoriented scaled chip's width and height
ucw = np.sqrt(ar**2 * target_diag_pxls**2 / (ar**2 + 1))
uch = ucw / ar
# Rotate Unrotated Chip Space into Rotated Chip Space
if rotated:
theta = cm.cx2_theta[cx]
rot = np.array(([np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]), dtype=np.float)
# Extend of Unrotated Chip Space. Center shifted to the origin
pts_00 = np.array([(0,0), (ucw,0), (ucw,uch), (0, uch)]) - np.array((ucw, uch))/2
rot_pts = pts_00.dot(rot)
xymin = rot_pts.min(0)
xymax = rot_pts.max(0)
# Floating point Rotated Chip w/h
cw, ch = xymax - xymin
else:
# Floating point Unrotated Chip w/h
cw, ch = ucw, uch
# Convert to the specified dtype at the end
if dtype is np.float:
return cw, ch
elif np.dtype(dtype).kind == 'f':
return dtype(cw), dtype(ch)
else:
return dtype(round(cw)), dtype(round(ch))
# DEPRICATED
def _cut_out_roi(cm, img, roi):
logdbg('Image shape is: '+str(img.shape))
[gh, gw] = [ x-1 for x in img.shape[0:2] ]
[rx1,ry1,rw,rh] = [ max(0,x) for x in roi]
rx2 = min(gw, rx1+rw)
ry2 = min(gh, ry1+rh)
logdbg('Cutting out chip using: '+str((ry1,ry2,rx1,rx2)))
raw_chip = img[ ry1:ry2, rx1:rx2, : ]
return raw_chip
# DEPRICATED
def cx2_raw_chip(cm, cx):
# --- Cut out the Raw Chip from Img
# TODO: Save raw chips to disk?
gm = cm.hs.gm
gx = cm.cx2_gx[cx]
roi = cm.cx2_roi[cx]
# Read Image
img = gm.gx2_img(gx)
return cm._cut_out_roi(img, roi)
# TODO: Just have a flag for each preprocessing step.
# Move this over from AlgorithmManager
# DEPRICATEDISH
def cx2_pil_chip(cm, cx, scaled=True, preprocessed=True, rotated=False, colored=False):
am = cm.hs.am
# Convert the raw image to PIL, and uncolor unless otherwise requested
if not colored:
pil_chip = Image.fromarray( cm.cx2_raw_chip(cx) ).convert('L')
else:
pil_chip = Image.fromarray( cm.cx2_raw_chip(cx) )
# Scale | |
"""
- EMOS 'operatingsystem.py' Source Code -
(C) Cubeflix 2021 (EMOS)
"""
# Imports
from .misc import *
from .memory import *
from .cpu import *
from .computer import *
class OperatingSystem:
"""The main operating system or OS the computer uses. System calls through interrupts can allow for memory and process management (heap and process memory), along with other things such as IO (BIOS).
The OS also handles files and process management with memory, ensuring that we don't run out. Finally, the OS can also switch into user mode, letting the user control everything."""
def __init__(self, computer, has_password=True):
"""Create the operating system.
Args: computer -> the computer the operating system is installed on
has_password -> does the operating system use a password"""
self.computer = computer
self.has_password = <PASSWORD>
self.mem_alloc_ids = []
self.process_ids = []
self.processes = {}
self.running = False
# Maximum number of operations to run on each thread if no IO is involved
self.max_operations_per_thread = 64
# Terminal
self.terminal = Terminal(self.computer)
# Kernel STDOut
self.kernel_stdout = STDOut()
# System libraries (TODO)
self.syslibs = [INT_STR_LIB, WRITELIB, FLOAT_STR_LIB]
self.log = ''
def set_cmd_handler(self, cmdhandler):
"""Set the current command handler. This is optional.
Args: cmdhandler -> the command handler to use"""
self.cmdhandler = cmdhandler
def set_max_thread_operations(self, max_operations_per_thread):
"""Set the maximum number of operations each thread gets to run per iteration, if no IO is involved.
Args: max_operations_per_thread -> maximum operations per thread per iteration."""
self.max_operations_per_thread = max_operations_per_thread
def allocate_memory(self):
"""Allocate memory, returning the memory id."""
for i in range(max(self.mem_alloc_ids) if self.mem_alloc_ids else 0):
if not i in self.mem_alloc_ids:
# This id is free
current_mem_id = i
break
# No holes, so add a new id
current_mem_id = (max(self.mem_alloc_ids) if self.mem_alloc_ids else -1) + 1
self.computer.memory.add_memory_partition(('mem', current_mem_id), MemorySection(('mem', current_mem_id), 0, bytearray()))
self.mem_alloc_ids.append(current_mem_id)
return (0, current_mem_id)
def free_memory(self, mem_id):
"""Free the memory at memory id mem_id.
Args: mem_id -> memory id"""
if not mem_id in self.mem_alloc_ids:
return (19, "Memory ID does not exist.")
# Free the memory
self.mem_alloc_ids.remove(mem_id)
self.computer.memory.delete_memory_partition(('mem', mem_id))
return (0, None)
def get_memory_size(self, mem_id):
"""Get the size of the memory partition mem_id.
Args: mem_id -> memory id"""
if not mem_id in self.mem_alloc_ids:
return (19, "Memory ID does not exist.")
return (0, self.computer.memory.memorypartitions[('mem', mem_id)].size)
def get_memory(self, mem_id, start_offset, size):
"""Get memory mem_id at start_offset with size size.
Args: mem_id -> memory id
start_offset -> starting offset
size -> amount of memory to get"""
if not mem_id in self.mem_alloc_ids:
return (19, "Memory ID does not exist.")
return self.computer.memory.memorypartitions[('mem', mem_id)].get_bytes(start_offset, size)
def edit_memory(self, mem_id, data, start_offset):
"""Edit the memory at mem_id, and move data into the memory at start_offset.
Args: mem_id -> memory id
data -> data to edit to
start_offset -> starting offset"""
# Get size
exitcode, size = self.get_memory_size(mem_id)
if exitcode != 0:
return (exitcode, size)
# Get data
exitcode, current_data = self.get_memory(mem_id, 0, size)
if exitcode != 0:
return (exitcode, current_data)
# Too large, but starts within bounds
if start_offset + len(data) > size and start_offset < size:
new_data = current_data[ : start_offset] + data
# Too large, and starts out of bounds (padding with zero bytes)
elif start_offset + len(data) > size and start_offset > size:
new_data = current_data + bytes(start_offset - size) + data
# Within bounds
else:
# New data
new_data = current_data[ : start_offset] + data + current_data[start_offset + len(data) : ]
return self.computer.memory.edit_memory_partition(('mem', mem_id), MemorySection(self.computer.memory.memorypartitions[('mem', mem_id)].name, len(new_data), new_data))
def process_create(self, process):
"""Create a process, returning the PID.
Args: process -> the process to use"""
# Find a valid PID
for i in range(max(self.process_ids) if self.process_ids else 0):
if not i in self.process_ids:
# This PID is free
current_pid = i
break
# No holes, so add a new PID
current_pid = (max(self.process_ids) if self.process_ids else -1) + 1
# Add the process
self.processes[current_pid] = process
self.process_ids.append(current_pid)
# Add the process to memory
self.computer.memory.add_memory_partition(('proc', current_pid), process.processmemory)
# Update the process
self.processes[current_pid].state = 'r'
self.processes[current_pid].pid = current_pid
self.processes[current_pid].initialize(self.computer)
return (0, current_pid)
def thread_create(self, pid, thread):
"""Create a thread in a process, returning the TID.
Args: pid -> the process ID to add to
thread -> the thread to use"""
if not pid in self.process_ids:
return (20, "PID doesn't exist.")
self.processes[pid].threads[len(self.processes[pid].threads)] = thread
self.processes[pid].threads[len(self.processes[pid].threads) - 1].tid = len(self.processes[pid].threads) - 1
return (0, len(self.processes[pid].threads) - 1)
def process_terminate(self, pid):
"""Terminate a process.
Args: pid -> the process ID to terminate"""
if not pid in self.process_ids:
return (20, "PID doesn't exist.")
self.processes[pid].state = 't'
return (0, None)
def process_resume(self, pid):
"""Resume a terminated process.
Args: pid -> the process ID to resume"""
if not pid in self.process_ids:
return (20, "PID doesn't exist.")
self.processes[pid].state = 'r'
return (0, None)
def process_delete(self, pid):
"""Delete a process.
Args: pid -> the process ID to delete."""
if not pid in self.process_ids:
return (20, "PID doesn't exist.")
del self.processes[pid].pid
self.process_ids.remove(pid)
del self.processes[pid]
self.computer.memory.delete_memory_partition(('proc', pid))
return (0, None)
def thread_terminate(self, pid, tid):
"""Terminate a thread.
Args: pid -> the process ID
tid -> the thread ID"""
if not pid in self.process_ids:
return (20, "PID doesn't exist.")
if not tid in self.processes[pid].threads:
return (21, "TID dosen't exist.")
self.processes[pid].threads[tid].running = False
return (0, None)
def thread_resume(self, pid, tid):
"""Resume a thread.
Args: pid -> the process ID
tid -> the thread ID"""
if not pid in self.process_ids:
return (20, "PID doesn't exist.")
if not tid in self.processes[pid].threads:
return (21, "TID dosen't exist.")
self.processes[pid].threads[tid].running = True
return (0, None)
def thread_delete(self, pid, tid):
"""Delete a thread.
Args: pid -> the process ID
tid -> the thread ID"""
if not pid in self.process_ids:
return (20, "PID doesn't exist.")
if not tid in self.processes[pid].threads:
return (21, "TID dosen't exist.")
del self.processes[pid].threads[tid]
return (0, None)
def process_fork(self, pid):
"""Fork a process.
Args: pid -> the process ID to fork"""
if not pid in self.process_ids:
return (20, "PID doesn't exist.")
return self.process_create(copy.deepcopy(self.processes[pid]))
def thread_fork(self, pid, tid):
"""Fork a thread.
Args: pid -> the process ID
tid -> the thread ID to fork"""
if not pid in self.process_ids:
return (20, "PID doesn't exist.")
if not tid in self.processes[pid].threads:
return (21, "TID dosen't exist.")
return self.thread_create(pid, copy.deepcopy(self.processes[pid].threads[tid]))
def thread_await(self, pid, tid):
"""Wait until a thread is done.
Args: pid -> the process ID
tid -> the thread ID"""
if not pid in self.process_ids:
return (20, "PID doesn't exist.")
if not tid in self.processes[pid].threads:
return (21, "TID dosen't exist.")
while self.processes[pid].threads[tid].running or self.processes[pid].threads[tid].waiting:
pass
return (0, None)
def process_await(self, pid):
"""Wait until a process is done.
Args: pid -> the process ID"""
if not pid in self.process_ids:
return (20, "PID doesn't exist.")
while self.processes[pid].state == 'r':
pass
return (0, None)
def run_executable_data(self, data):
"""Run executable data and load it, retuning the process.
Args: data -> data to run"""
# Get the beginning data offset
data_offset = int.from_bytes(data[0 : 4], byteorder='little')
# Split the data
data = data[4 : ]
# Get the code section
code_section = data[ : data_offset]
# Get the data section
data_section = data[data_offset : ]
# Create the process memory
processmemory = ProcessMemory(code_section, data_section, b'')
# Create the thread
thread = PThread(0, MemorySection('stack', 0, b''), None)
# Create process
process = Process(processmemory, {0 : thread}, 't')
return process
def update_process_memory_global(self, pid, tid):
"""Update process PID's process memory in processes from the memory. Updates all running processes CPU memory as well.
Args: pid -> process id to update"""
self.computer.memory.memorypartitions[('proc', pid)].data = self.processes[pid].processmemory.data
# Update all processes in CPU
for cid, cpu in enumerate(self.computer.cpu.cores):
try:
if cpu.pname == ('proc', pid):
# Update this CPU core
self.computer.cpu.cores[cid].processmemory.data = self.processes[pid].processmemory.data
except Exception:
pass
def halt_thread(self, pid, tid, exitcode):
"""Halt a thread.
Args: pid -> process id
tid -> thread id to halt"""
# Terminate the program with exitcode
self.processes[pid].threads[tid].output = (exitcode, None)
e_exitcode = self.processes[pid].threads[tid].stack.set_data(self.processes[pid].threads[tid].stack.data + int.to_bytes(exitcode, 2, byteorder='little'))
self.processes[pid].threads[tid].running = False
if exitcode != 0:
self.processes[pid].start = 't'
self.processes[pid].output = (exitcode, None)
if not all([self.processes[pid].threads[t].running for t in self.processes[pid].threads]):
# All threads are done
self.processes[pid].state = 't'
self.processes[pid].output = (exitcode, None)
return e_exitcode
def systemcall(self, pid, tid):
"""Preform a system call.
Args: pid -> process ID of the process that called the system call
tid -> thread ID of the thread that called the system call"""
try:
self.processes[pid].threads[tid].waiting = True
# Wait until the CPU has finished the thread (and registers are committed)
while True:
ready = True
for cpu in self.computer.cpu.cores:
try:
if cpu.pname == ('proc', pid) and cpu.tid == tid:
ready = False
except Exception:
pass
if ready:
break
# Get the system call ID
syscallid = int.from_bytes(self.processes[pid].threads[tid].registers['RAX'].get_bytes(0, 4)[1], byteorder='little')
# Run the system call (NOTE: all syscalls must call update_process_memory_global after modifying memory)
# NOTE: All system calls must modify memory in the processes memory data, not global memory data. Using the method update_process_memory_global, memory can be synced up with all processes.
if syscallid == 0:
# Terminate with exit code in RBX
s_exitcode = int.from_bytes(self.processes[pid].threads[tid].registers['RBX'].get_bytes(0, 4)[1], byteorder='little')
exitcode = self.halt_thread(pid, tid, s_exitcode)
elif syscallid == 1:
# Write to the processes STDOut with the beginning offset in RBX, and the length in RCX
begin_offset = int.from_bytes(self.processes[pid].threads[tid].registers['RBX'].get_bytes(0, 4)[1], byteorder='little')
length = int.from_bytes(self.processes[pid].threads[tid].registers['RCX'].get_bytes(0, 4)[1], byteorder='little')
# Get the data
processmemory_use = self.processes[pid].get_processmemory_thread(tid)
exitcode, data = processmemory_use.get_bytes(begin_offset, length)
if exitcode != 0:
exitcode = (exitcode, None)
else:
# Write the data to the STDOut
exitcode = self.processes[pid].stdout.write(data, self.terminal)
elif syscallid == 2:
# Read from the processes STDIn with the length in RBX and save it to the thread's stack
length = int.from_bytes(self.processes[pid].threads[tid].registers['RBX'].get_bytes(0, 4)[1], | |
<filename>deep_qa-master/deep_qa/contrib/models/multiple_choice_bidaf.py
from copy import deepcopy
from typing import Any, Dict, List
from keras import backend as K
from keras.layers import Input
from overrides import overrides
from ...models.reading_comprehension.bidirectional_attention import BidirectionalAttentionFlow
from ...data.instances.mc_question_answer_instance import McQuestionAnswerInstance
from ...layers.attention.attention import Attention
from ...layers.backend.envelope import Envelope
from ...layers.backend.multiply import Multiply
from ...layers.wrappers.encoder_wrapper import EncoderWrapper
from ...layers.wrappers.time_distributed_with_mask import TimeDistributedWithMask
from ...training.text_trainer import TextTrainer
from ...training.models import DeepQaModel
class MultipleChoiceBidaf(TextTrainer):
"""
This class extends <NAME>'s `Bidirectional Attention Flow model
<https://www.semanticscholar.org/paper/Bidirectional-Attention-Flow-for-Machine-Seo-Kembhavi/7586b7cca1deba124af80609327395e613a20e9d>`_,
which was originally applied to predicting spans from a passage, to answering multiple choice
questions.
The approach we're going to take here is to load a BiDAF model directly (literally taking all
of the parameters we need to construct the ``BidirectionalAttentionFlow`` model class),
applying it to a question and passage, and then adding a few layers on top to try to match the
predicted span to the answer options we have.
To match the predicted span to the answer options, we'll first constructed a weighted
representation of the passage, weighted by the likelihood of each word in the passage being a
part of the span. Then we'll compare that representation to a representation for each answer
option.
Input:
- a passage of shape ``(batch_size, num_passage_words)``
- a question of shape ``(batch_size, num_question_words)``
- a set of answer options of shape ``(batch_size, num_options, num_option_words)``
Output:
- a probability distribution over the answer options, of shape ``(batch_size, num_options)``
Parameters
----------
bidaf_params : Dict[str, Any]
These parameters get passed to a
:class:`~deep_qa.models.reading_comprehension.bidirectional_attention.BidirectionalAttentionFlow`
object, which we load. They should be exactly the same as the parameters used to train the
saved model. There is one parameter that must be consistent across the contained BiDAF
model and this ``TextTrainer`` object, so we copy that parameter from the BiDAF params,
overwriting any parameters that you set for this ``MultipleChoiceBidaf`` model. This
parameter is "tokenizer".
train_bidaf : bool, optional (default=``False``)
Should we optimize the weights in the contained BiDAF model, or just the weights that we
define here? TODO(matt): setting this to ``True`` is currently incompatible with saving
and loading the ``MultipleChoiceBidaf`` model. Getting that to work is not a high
priority, as we're assuming you have far less multiple choice data, so you want a smaller
model, anyway.
num_options : int, optional (default=``None``)
For padding. How many options should we pad the data to? If ``None``, this is set from
the data.
num_option_words : int, optional (default=``None``)
For padding. How many words are in each answer option? If ``None``, this is set from
the data.
similarity_function : Dict[str, Any], optional (default={'type': 'bilinear'})
This is the similarity function used to compare an encoded span representation with encoded
option representations. These parameters get passed to a similarity function (see
:mod:`deep_qa.tensors.similarity_functions` for more info on what's acceptable). The
default similarity function with no parameters is a set of linear weights on the
concatenated inputs. Note that the inputs to this similarity function will have `different
sizes`, so the set of functions you can use is constrained (i.e., no dot product, etc.).
Also note that you almost certainly want to have some kind of bilinear interaction, or
linear with a hidden layer, or something, because fundamentally we want to say whether two
vectors are close in some projected space, which can't really be captured by a simple
linear similarity function.
Notes
-----
Porting the code to Keras 2 made this break for some reason that I haven't been able to figure
out yet. I told py.test to skip the test we had for this, so I'm moving it to ``contrib``
until such time as I get the test to actually pass.
"""
# pylint: disable=protected-access
def __init__(self, params: Dict[str, Any]):
bidaf_params = params.pop('bidaf_params')
params['tokenizer'] = deepcopy(bidaf_params.get('tokenizer', {}))
self._bidaf_model = BidirectionalAttentionFlow(bidaf_params)
self._bidaf_model.load_model()
self.train_bidaf = params.pop('train_bidaf', False)
self.num_options = params.pop('num_options', None)
self.num_option_words = params.pop('num_option_words', None)
self.similarity_function_params = params.pop('similarity_function', {'type': 'bilinear'})
if K.backend() == 'theano':
# This is a total hack. Sorry. But there's some crazy error in using the loaded BiDAF
# model in theano that's related to K.in_train_phase(), which is only relevant for
# dropout. We're not using dropout in the models we're learning here, so we just turn
# it off to avoid the crazy theano error. TODO(matt): It might make sense to turn off
# dropout in BiDAF during training for tensorflow, too.
K.set_learning_phase(0)
super(MultipleChoiceBidaf, self).__init__(params)
self.data_indexer = self._bidaf_model.data_indexer
# We need to not add any more words to the vocabulary, or the model will crash, because
# we're using the same embedding layer as BiDAF. So we finalize the data indexer, which
# will give us some warnings when we try to fit the indexer to the training data, but won't
# actually add anything. Also note that this has to happen _after_ we call the superclass
# constructor, or self.data_indexer will get overwritten. TODO(matt): make it so you can
# expand the embedding size after the fact in a loaded model (though that seems really hard
# to do correctly, especially in this setting where we're working directly with a loaded
# Keras model). An alternative would be to have our own embedding layer that's initialized
# from BiDAF's, use that, then use BiDAF for the phrase layer... Either way is pretty
# complicated.
self.data_indexer.finalize()
@overrides
def _build_model(self):
"""
Our basic outline here will be to run the BiDAF model on the question and the passage, then
compute an envelope over the passage for what words BiDAF thought were in the answer span.
Then we'll weight the BiDAF passage, and use the BiDAF encoder to encode the answer
options. Then we'll have a simple similarity function on top to score the similarity
between each answer option and the predicted answer span.
Getting the right stuff out of the BiDAF model is a little tricky. We're going to use the
same approach as done in :meth:`TextTrainer._build_debug_model
<deep_qa.training.trainer.TextTrainer._build_debug_model>`: we won't modify the model at
all, but we'll construct a new model that just changes the outputs to be various layers of
the original model.
"""
question_shape = self._bidaf_model._get_sentence_shape(self._bidaf_model.num_question_words)
question_input = Input(shape=question_shape, dtype='int32', name="question_input")
passage_shape = self._bidaf_model._get_sentence_shape(self._bidaf_model.num_passage_words)
passage_input = Input(shape=passage_shape, dtype='int32', name="passage_input")
options_shape = (self.num_options,) + self._bidaf_model._get_sentence_shape(self.num_option_words)
options_input = Input(shape=options_shape, dtype='int32', name='options_input')
# First we compute a span envelope over the passage, then multiply that by the passage
# representation.
bidaf_passage_model = self._get_model_from_bidaf(['question_input', 'passage_input'],
['final_merged_passage',
'span_begin_softmax',
'span_end_softmax'])
modeled_passage, span_begin, span_end = bidaf_passage_model([question_input, passage_input])
envelope = Envelope()([span_begin, span_end])
weighted_passage = Multiply()([modeled_passage, envelope])
# Then we encode the answer options the same way we encoded the question.
bidaf_question_model = self._get_model_from_bidaf(['question_input'], ['phrase_encoder'],
name="phrase_encoder_model")
# Total hack to make this compatible with TimeDistributedWithMask. Ok, ok, python's duck
# typing is kind of nice sometimes... At least I can get this to work, even though it's
# not supported in Keras.
bidaf_question_model.get_output_mask_shape_for = self.bidaf_question_model_mask_shape
embedded_options = TimeDistributedWithMask(bidaf_question_model, keep_dims=True)(options_input)
# Then we compare the weighted passage to each of the encoded options, and get a
# distribution over answer options. We'll use an encoder to get a single vector for the
# passage and for each answer option, then do an "attention" to get a distribution over
# answer options. We can think of doing other similarity computations (e.g., a
# decomposable attention) later.
passage_encoder = self._get_encoder(name="similarity", fallback_behavior="use default params")
option_encoder = EncoderWrapper(passage_encoder)
encoded_passage = passage_encoder(weighted_passage)
encoded_options = option_encoder(embedded_options)
attention_layer = Attention(deepcopy(self.similarity_function_params))
option_scores = attention_layer([encoded_passage, encoded_options])
return DeepQaModel(inputs=[question_input, passage_input, options_input],
outputs=option_scores)
@staticmethod
def bidaf_question_model_mask_shape(input_shape):
return input_shape[:-1]
def _get_model_from_bidaf(self,
input_layer_names: List[str],
output_layer_names: List[str],
name=None):
"""
Returns a new model constructed from ``self._bidaf_model``. This model will be a subset of
BiDAF, with the inputs specified by ``input_layer_names`` and the outputs specified by
``output_layer_names``. For example, you can use this to get a model that outputs the
passage embedding, just before the span prediction layers, by calling
``self._get_model_from_bidaf(['question_input', 'passage_input'], ['final_merged_passage'])``.
"""
layer_input_dict = {}
layer_output_dict = {}
for layer in self._bidaf_model.model.layers:
layer_input_dict[layer.name] = layer.get_input_at(0)
layer_output_dict[layer.name] = layer.get_output_at(0)
input_layers = [layer_input_dict[name] for | |
'a6_02_8': [0, 1, 8, 9],
'a6_02_9': [0, 1, 8, 9],
'a6_02_10': [0, 1, 8, 9],
'a6_02_11': [0, 1, 8, 9],
'a6_02_12a': [0, 1, 8, 9],
'a6_02_13': [0, 1, 8, 9],
'a6_02_14': [0, 1, 8, 9],
'a6_02_15': [0, 1, 8, 9],
'a6_04': [0, 1, 8, 9],
'a6_05': [0, 1, 8, 9],
'a6_06_1d': range(1, 31 + 1) + [99],
'a6_06_1m': range(1, 12 + 1) + [99],
'a6_06_1y': range(1900, MAX_YEAR) + [9999],
'a6_06_2d ': range(1, 31 + 1) + [99],
'a6_06_2m': range(1, 12 + 1) + [99],
'a6_06_2y': range(1900, MAX_YEAR) + [9999],
'a6_07d': range(1, 31 + 1) + [99],
'a6_07m': range(1, 12 + 1) + [99],
'a6_07y': range(1900, MAX_YEAR) + [9999],
'a6_09': [0, 1, 8, 9],
'a6_10': [0, 1, 8, 9],
'c1_01': [1, 2, 8, 9],
'c1_02': [1, 2, 3, 8, 9],
'c1_03': [0, 1, 8, 9],
'c1_04': [1, 2, 8, 9],
'c1_05a': [1, 2, 3, 4, 5, 6, 8, 9],
'c1_06a': [1, 2, 3, 4, 5, 8, 9],
'c1_07': [1, 2, 3, 4, 8, 9],
'c1_08a': [1, 2, 8, 9],
'c1_09': [1, 2, 8, 9],
'c1_10': [1, 8, 9],
'c1_10d': range(1, 31 + 1) + [99],
'c1_10m': range(1, 12 + 1) + [99],
'c1_10y': range(1900, MAX_YEAR) + [9999],
'c1_11': [1, 2, 8, 9],
'c1_12': [0, 1, 8, 9],
'c1_13': [0, 1, 8, 9],
'c1_14': [0, 1, 8, 9],
'c1_15': [0, 1, 8, 9],
'c1_16': [0, 1, 8, 9],
'c1_17': [0, 1, 8, 9],
'c1_18': [0, 1, 8, 9],
'c1_19_1': [0, 1, 8, 9],
'c1_19_2': [0, 1, 8, 9],
'c1_19_3': [0, 1, 8, 9],
'c1_19_4a': [0, 1, 8, 9],
'c1_19_5': [0, 1, 8, 9],
'c1_19_6': [0, 1, 8, 9],
'c1_20a': [1, 2, 3, 4, 5, 6, 8, 9],
'c1_21a': [1, 2, 3, 4, 5, 6, 8, 9],
'c1_22a': [1, 2, 3, 4, 5, 8, 9],
'c1_24': [1, 8, 9],
'c1_24d': range(1, 31 + 1) + [99],
'c1_24m': range(1, 12 + 1) + [99],
'c1_24y': range(1900, MAX_YEAR) + [9999],
'c1_25a': [0, 1, 2, 3, 4, 5, 6, 8, 9],
'c1_26': [0, 1, 2],
'c2_01_1': [0, 1, 8, 9],
'c2_01_2': [0, 1, 8, 9],
'c2_01_3': [0, 1, 8, 9],
'c2_01_4': [0, 1, 8, 9],
'c2_01_5': [0, 1, 8, 9],
'c2_01_6': [0, 1, 8, 9],
'c2_01_7': [0, 1, 8, 9],
'c2_01_8': [0, 1, 8, 9],
'c2_01_9': [0, 1, 8, 9],
'c2_01_10': [0, 1, 8, 9],
'c2_01_11': [0, 1, 8, 9],
'c2_01_12': [0, 1, 8, 9],
'c2_01_14': [0, 1, 8, 9],
'c2_02a': [1, 2, 3, 4, 5, 6, 8, 9],
'c2_03': [1, 2, 3, 8, 9],
'c2_04': [0, 1, 8, 9],
'c2_05a': [1, 2, 3, 4, 5, 6, 8, 9],
'c2_06': [1, 2, 8, 9],
'c2_07': [1, 2, 8, 9],
'c2_08a': [1, 2, 3, 8, 9],
'c2_09': [0, 1, 8, 9],
'c2_10a': [1, 2, 3, 4, 5, 6, 8, 9],
'c2_11': [0, 1, 8, 9],
'c2_12': [1, 2, 3, 4, 5, 8, 9],
'c2_13a': [1, 2, 3, 4, 5, 8, 9],
'c2_15a': [1, 2, 3, 4, 5, 6, 8, 9],
'c2_17': [1, 2, 3, 4, 8, 9],
'c2_18': [0, 1, 8, 9],
'c3_01': [0, 1, 8, 9],
'c3_02': [0, 1, 8, 9],
'c3_03_1': [0, 1, 8, 9],
'c3_03_2': [0, 1, 8, 9],
'c3_03_3': [0, 1, 8, 9],
'c3_03_4a': [0, 1, 8, 9],
'c3_03_5': [0, 1, 8, 9],
'c3_03_6': [0, 1, 8, 9],
'c3_04': [0, 1, 8, 9],
'c3_05': [0, 1, 8, 9],
'c3_06': [0, 1, 8, 9],
'c3_07': [0, 1, 8, 9],
'c3_08': [1, 2, 3, 4, 8, 9],
'c3_09': [0, 1, 8, 9],
'c3_10': [1, 2, 8, 9],
'c3_11': [0, 1, 8, 9],
'c3_12': [0, 1, 8, 9],
'c3_13': [0, 1, 8, 9],
'c3_14a': [1, 2, 3, 4, 5, 6, 8, 9],
'c3_15': [1, 2, 8, 9],
'c3_16': [0, 1, 8, 9],
'c3_17': [0, 1, 8, 9],
'c3_18a': [1, 2, 3, 4, 5, 6, 8, 9],
'c3_19a': [1, 2, 3, 4, 5, 6, 8, 9],
'c3_20': [0, 1, 8, 9],
'c3_21a': [1, 2, 3, 4, 5, 6, 8, 9],
'c3_22a': [1, 2, 3, 4, 5, 6, 8, 9],
'c3_23': [0, 1, 8, 9],
'c3_24': [0, 1, 8, 9],
'c3_25': [0, 1, 8, 9],
'c3_26': [0, 1, 8, 9],
'c3_27a': [1, 2, 3, 4, 5, 6, 8, 9],
'c3_28a': [1, 2, 3, 4, 5, 6, 8, 9],
'c3_29': [0, 1, 8, 9],
'c3_30a': [1, 2, 3, 4, 5, 6, 8, 9],
'c3_31a': [1, 2, 3, 4, 5, 6, 8, 9],
'c3_32': [0, 1, 8, 9],
'c3_33': [0, 1, 8, 9],
'c3_34': [0, 1, 8, 9],
'c3_35': [0, 1, 8, 9],
'c3_36': [0, 1, 8, 9],
'c3_37': [0, 1, 8, 9],
'c3_38': [0, 1, 8, 9],
'c3_39': [0, 1, 8, 9],
'c3_40': [0, 1, 8, 9],
'c3_41': [0, 1, 8, 9],
'c3_42': [0, 1, 8, 9],
'c3_44': [0, 1, 8, 9],
'c3_45a': [1, 8, 9],
'c3_46': [0, 1, 8, 9],
'c3_47': [0, 1, 8, 9],
'c3_48': [0, 1, 8, 9],
'c3_49': [0, 1, 8, 9],
'c4_01': [0, 1, 8, 9],
'c4_02a': [1, 2, 3, 4, 5, 6, 8, 9],
'c4_03': [0, 1, 8, 9],
'c4_04': [1, 2, 3, 8, 9],
'c4_05': [1, 2, 3, 8, 9],
'c4_06': [0, 1, 8, 9],
'c4_07a': [1, 8, 9],
'c4_08a': [1, 2, 3, 4, 5, 6, 8, 9],
'c4_09': [0, 1, 8, 9],
'c4_10a': [1, 2, 3, 4, 5, 6, 8, 9],
'c4_11': [0, 1, 8, 9],
'c4_12': [0, 1, 8, 9],
'c4_13a': [1, 2, 3, 4, 5, 6, 8, 9],
'c4_14': [0, 1, 8, 9],
'c4_15': [0, 1, 8, 9],
'c4_16': [0, 1, 8, 9],
'c4_17a': [1, 2, 3, 4, 5, 6, 8, 9],
'c4_18': [0, 1, 8, 9],
'c4_19a': [1, 2, 3, 4, 5, 6, 8, 9],
'c4_20': [0, 1, 8, 9],
'c4_22': [0, 1, 8, 9],
'c4_23': [0, 1, 8, 9],
'c4_24': [0, 1, 8, 9],
'c4_25': [0, 1, 8, 9],
'c4_26': [0, 1, 8, 9],
'c4_27': [1, 2, 3, 8, 9],
'c4_28': [0, 1, 8, 9],
'c4_29': [0, 1, 8, 9],
'c4_30': [0, 1, 8, 9],
'c4_31_1': [1, 2, 3, 4, 5, 8, 9],
'c4_31_2': [1, 2, 3, 4, 5, 8, 9],
'c4_32': [1, 2, 3, 4, 5, 8, 9],
'c4_33a': [1, 2, 3, 4, 5, 6, 8, 9],
'c4_34': [0, 1, 8, 9],
'c4_35': [0, 1, 8, 9],
'c4_36': [0, 1, 8, 9],
'c4_37a': [1, 2, 3, 4, 5, 6, 8, 9],
'c4_38': [0, 1, 8, 9],
'c4_39': [0, 1, 8, 9],
'c4_40': [0, 1, 8, 9],
'c4_41': [0, 1, 8, 9],
'c4_42': [0, 1, 8, 9],
'c4_43': [0, 1, 8, 9],
'c4_44': [0, 1, 8, 9],
'c4_46': [0, 1, 8, 9],
'c4_47_1': [0, 1, 8, 9],
'c4_47_2': [0, 1, 8, 9],
'c4_47_3': [0, 1, 8, 9],
'c4_47_4': [0, 1, 8, 9],
'c4_47_5': [0, 1, 8, 9],
'c4_47_6': [0, 1, 8, 9],
'c4_47_7': [0, 1, 8, 9],
'c4_47_8a': [0, 1, 8, 9],
'c4_47_9': [0, 1, 8, 9],
'c4_47_10': [0, 1, 8, 9],
'c4_47_11': [0, 1, 8, 9],
'c4_48': [0, 1, 8, 9],
'c4_49a': [1, 2, 3, 4, 5, 6, 8, 9],
'c5_01': [0, 1, 8, 9],
'c5_02_1': [0, 1, 8, 9],
'c5_02_2': [0, 1, 8, 9],
'c5_02_3': [0, 1, 8, 9],
'c5_02_4': [0, 1, 8, 9],
'c5_02_5': [0, 1, 8, 9],
'c5_02_6': [0, 1, 8, 9],
'c5_02_7': [0, 1, 8, 9],
'c5_02_8': [0, 1, 8, 9],
'c5_02_9': [0, 1, 8, 9],
'c5_02_10': [0, 1, 8, 9],
'c5_02_11a': [0, 1, 8, 9],
'c5_02_12': [0, 1, 8, 9],
'c5_02_13': [0, 1, 8, 9],
'c5_02_14': [0, 1, 8, 9],
'c5_04': [0, 1, 8, 9],
'c5_05': [0, 1, 8, 9],
'c5_06_1d': range(1, 31 + 1) + [99],
'c5_06_1m': range(1, 12 + 1) + [99],
'c5_06_1y': range(1900, MAX_YEAR) + [9999],
'c5_06_2d ': range(1, 31 + 1) + [99],
'c5_06_2m': range(1, 12 + 1) + [99],
'c5_06_2y': range(1900, MAX_YEAR) + [9999],
'c5_07_1a': [1, 2, 8, 9],
'c5_07_2a': [1, 2, 8, 9],
'c5_08d': range(1, 31 + 1) + [99],
'c5_08m': range(1, 12 + 1) | |
"""module to deal with gaussian cube type data
NB: for all transformations, the cubes coordinate system is understood to be
A = np.array([
[['(x0,y0,z0)', '(x0,y0,z1)'],
['(x0,y1,z0)', '(x0,y1,z1)']],
[['(x1,y0,z0)', '(x1,y0,z1)'],
['(x1,y1,z0)', '(x1,y1,z1)']]
])
which leads to;
A.shape -> (x length, y length, z length)
"""
from collections import OrderedDict
from itertools import product
import warnings
import numpy
from ejplugins import validate_against_schema
with warnings.catch_warnings(record=True):
warnings.filterwarnings("ignore", category=ImportWarning)
import pymatgen as pym
from pymatgen.io.ase import AseAtomsAdaptor
import numpy as np
import ase
from ipyatom.geometry2d_utils import minimum_bounding_box
import ipyatom.geometry3d_utils as g3
from scipy.linalg import expm
from scipy.ndimage import zoom as ndzoom
from scipy.interpolate import interpn
from scipy.spatial.qhull import Delaunay
from jsonschema import validate
from jsonextended import units as eunits
from ipyatom.utils import slice_mask, round_to_base, get_default_atom_map
from ipyatom.repeat_cell import atoms_to_dict
def gcube_to_dict(cube, cell_vectors, centre=(0., 0., 0.), name="", dtype="", vstruct=None, color_bbox="black"):
""" convert gaussian cube data to visual dict
Parameters
----------
cube: numpy.array
cell_vectors: list
[[a1,a2,a3],[b1,b2,b3],[c1,c2,c3]]
centre: list
[x, y, z]
name: str
name of structure
dtype: str
label of density type (e.g. charge or spin)
vstruct: dict
an existing vstruct to append to
color_bbox: str or None
color of outline bbox
Returns
-------
"""
a, b, c = cell_vectors
centre = 0.5 * (np.array(a) + np.array(b) + np.array(c))
output = {'type': 'repeat_density',
'name': name,
'dtype': dtype,
'centre': centre.tolist(),
'dcube': cube.copy(),
'cell_vectors': {"a": a, "b": b, "c": c},
'color_bbox': color_bbox,
'transforms': []}
if vstruct is not None:
vstruct["elements"].append(output)
return vstruct
else:
return {'elements': [output], 'transforms': []}
# def ejdata_to_dict(data, name="", dtype="charge", lunit="angstrom", vstruct=None, color_bbox="black",
# retrieve_atoms=True, atom_map=None, **kwargs):
# """ convert ejplugin data to visual dict
#
# Parameters
# ----------
# data: dict
# must contain density and cell_vectors keys
# name: str
# name of structure
# dtype: str
# density type ("charge" or "spin")
# lunit: str
# length unit
# vstruct: dict
# an existing vstruct to append to
# color_bbox: str or None
# color of outline bbox
# retrieve_atoms: bool
# if present retrieve atomic positions as repeat_cell element (requires symbols and fcoords)
# atom_map: None or dict
# a mapping of atom labels to keys; ["radius", "color_fill", "color_outline", "transparency"],
# e.g. {"H": {"radius": 1, "color_fill": '#bfbfbf', "color_outline": None, "transparency": 1.}, ...}
# kwargs : object
# additional per atom parameters (must be lists the same length as number of atoms), e.g. charge=[0,1,-1]
#
# Returns
# -------
#
# """
# gkey = "{}_density".format(dtype)
# if gkey not in data or "cell_vectors" not in data:
# raise ValueError("data does not contain both cell_vectors and {} keys".format(gkey))
# validate(data["cell_vectors"], {"type": "object", "required": ["a", "b", "c"],
# "properties": {
# "a": {"type": "object", "required": ["units", "magnitude"]},
# "b": {"type": "object", "required": ["units", "magnitude"]},
# "c": {"type": "object", "required": ["units", "magnitude"]}
# }})
# cell = eunits.combine_quantities(data["cell_vectors"])
# cell = eunits.apply_unitschema(cell, {"a": lunit, "b": lunit, "c": lunit}, as_quantity=False)
# cell_vectors = [cell["a"].tolist(), cell["b"].tolist(), cell["c"].tolist()]
# output = gcube_to_dict(data[gkey], cell_vectors, name=name, dtype=dtype,
# vstruct=vstruct, color_bbox=color_bbox)
#
# if "symbols" in data and "fcoords" in data and retrieve_atoms:
# atoms = ase.Atoms(symbols=data["symbols"], scaled_positions=data["fcoords"], cell=cell_vectors)
# output = atoms_to_dict(atoms, name=name, color_bbox=None, vstruct=output, atom_map=atom_map, **kwargs)
# elif "symbols" in data and "ccoords" in data and retrieve_atoms:
# atoms = ase.Atoms(symbols=data["symbols"], positions=data["ccoords"], cell=cell_vectors)
# output = atoms_to_dict(atoms, name=name, color_bbox=None, vstruct=output, atom_map=atom_map, **kwargs)
#
# return output
def ejdata_to_dict(data, name="", lunit="angstrom", vstruct=None, color_bbox="black",
retrieve_atoms=True, atom_map=None, **kwargs):
""" convert ejplugin data to visual dict
Parameters
----------
data: dict
must contain density and cell_vectors keys
name: str
name of structure
dtype: str
density type ("charge" or "spin")
lunit: str
length unit
vstruct: dict
an existing vstruct to append to
color_bbox: str or None
color of outline bbox
retrieve_atoms: bool
if present retrieve atomic positions as repeat_cell element (requires symbols and fcoords)
atom_map: None or dict
a mapping of atom labels to keys; ["radius", "color_fill", "color_outline", "transparency"],
e.g. {"H": {"radius": 1, "color_fill": '#bfbfbf', "color_outline": None, "transparency": 1.}, ...}
kwargs : object
additional per atom parameters (must be lists the same length as number of atoms), e.g. charge=[0,1,-1]
Returns
-------
"""
validate_against_schema(data, "edensity")
data = eunits.combine_quantities(data)
data = eunits.apply_unitschema(data, {"a": lunit, "b": lunit, "c": lunit, "ccoords": lunit}, as_quantity=False)
cell = data["cell_vectors"]
cell_vectors = [cell["a"].tolist(), cell["b"].tolist(), cell["c"].tolist()]
vstruct = {'elements': [], 'transforms': []} if vstruct is None else vstruct
for density in data["densities"]:
vstruct = gcube_to_dict(density["magnitude"], cell_vectors,
name=name, dtype=density["type"],
vstruct=vstruct, color_bbox=color_bbox)
if "atoms" in data and retrieve_atoms:
adict = {"cell": cell_vectors}
if "symbols" in data["atoms"]:
adict["symbols"] = data["atoms"]["symbols"]
else:
adict["numbers"] = data["atoms"]["atomic_number"]
if "ccoords" in data["atoms"]:
adict["positions"] = data["atoms"]["ccoords"]
else:
adict["scaled_positions"] = data["atoms"]["fcoords"]
atoms = ase.Atoms(**adict)
vstruct = atoms_to_dict(atoms, name=name, color_bbox=None, vstruct=vstruct, atom_map=atom_map, **kwargs)
return vstruct
_atom_map_schema = {
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9]*$": {
"type": "object",
"required": ["radius", "color_fill"],
"properties": {
"radius": {"type": "number"},
}
}
}
}
def atoms_to_rdensity(atoms, cube_dims=(50, 50, 50), name="", dtype="nuclei", color_bbox="black", vstruct=None,
atom_map=None, rdist_implement=2):
""" convert an atom object to a repeat density
Parameters
----------
atoms: pymatgen.core.structure.Structure or ase.Atoms
cube_dims: tuple of int
(adim, bdim, cdim) of final cube
name: str
name of structure
color_bbox: str or None
color of outline bbox
vstruct: dict
an existing vstruct to append to
atom_map: None or dict
a mapping of atom labels to keys; ["radius", "color_fill"],
e.g. {"H": {"radius": 1, "color_fill": '#bfbfbf'}, ...}
rdist_implement: int
implementation for assigning coordinate to atom site (for optimisation testing)
Returns
-------
vstruct: dict
color_map: dict
{(<label>, <color>): <value in dcube>, ...}
"""
if isinstance(atoms, ase.atoms.Atoms):
atoms = AseAtomsAdaptor.get_structure(atoms)
if not isinstance(atoms, pym.core.structure.Structure):
raise ValueError("struct must be ase.Atoms or pymatgen.Structure")
if vstruct is not None:
if "elements" not in vstruct:
raise ValueError("the existing vstruct does not have an elements key")
# get atom data
if atom_map is None:
atom_map = get_default_atom_map()
validate(atom_map, _atom_map_schema)
atom_data = atoms.as_dict()
a, b, c = [_ for _ in atoms.lattice.matrix]
centre = 0.5 * (a + b + c)
sites = []
for i, site in enumerate(atom_data["sites"]):
label = site["label"]
site_data = {"ccoord": site["xyz"], "label": label}
site_data.update(atom_map[label])
sites.append(site_data)
# create a map of site labels to color and index
color_map = {(d[0], d[1]): i + 1 for i, d in enumerate(sorted(
set([(site["label"], site["color_fill"]) for site in sites])))}
# create fractional coordinates cube
ndim, mdim, ldim = cube_dims
gcube = np.full((ldim, mdim, ndim), np.nan)
indices = np.array(list(product(range(ldim), range(mdim), range(ndim))))
# convert indices to cartesian coordinates
coords = np.einsum('...jk,...k->...j', np.array([a, b, c]).T,
np.divide(np.asarray(indices, dtype=np.float64),
np.array((ldim - 1, mdim - 1, ndim - 1), dtype=np.float64))
) # - centre
# if coord within radial distance of atom set its value
# TODO time/profile implementations and optimise
# a) basic for loop implementation
if rdist_implement == 1:
for i, coord in enumerate(coords):
for site in sites:
if abs(np.linalg.norm(coord - site["ccoord"])) < site["radius"]:
gcube[indices[i][0], indices[i][1], indices[i][2]] = color_map[(site["label"], site["color_fill"])]
break
# b) basic numpy implementation
elif rdist_implement == 2:
for site in sites:
mask = np.abs(np.linalg.norm(coords - site["ccoord"], axis=1)) < site["radius"]
gcube[indices[mask, 0], indices[mask, 1], indices[mask, 2]] = color_map[(site["label"], site["color_fill"])]
# c) implementation where we avoid computing distances for coordinates already assigned to a site
# from testing this is actually slower (even for ~100 atom sites)
elif rdist_implement == 3:
unassigned_mask = np.full((coords.shape[0],), True)
for site in sites:
site_mask = np.full((coords.shape[0],), False)
site_mask[unassigned_mask] = (np.abs(np.linalg.norm(coords[unassigned_mask] - site["ccoord"], axis=1))
< site["radius"])
unassigned_mask = np.logical_and(unassigned_mask, np.logical_not(site_mask))
gcube[indices[site_mask, 0], indices[site_mask, 1], indices[site_mask, 2]] = color_map[(site["label"],
site["color_fill"])]
else:
raise ValueError("rdist_implement must be 1, 2 or 3")
output = {'type': 'repeat_density',
'name': name,
'dtype': dtype,
'centre': centre.tolist(),
'dcube': gcube.T,
'cell_vectors': {"a": a.tolist(), "b": b.tolist(), "c": c.tolist()},
'color_bbox': color_bbox,
'transforms': []}
if vstruct is not None:
vstruct["elements"].append(output)
return vstruct, color_map
else:
return {'elements': [output], 'transforms': []}, color_map
def _repeat_repeat_density(vstruct, repeats=(0, 0, 0),
recentre=True):
"""
Parameters
----------
vstruct
repeats
recentre
Returns
-------
Examples
--------
>>> from pprint import pprint
>>> dstruct = {
... 'dcube':np.ones((3,2,1)),
... 'centre':[0.5,1.5,2.0],
... 'cell_vectors':{
... 'a':[1,0,0],
... 'b':[0,3,0],
... 'c':[0,0,4]}
... }
>>> dstruct["dcube"].shape
(3, 2, 1)
>>> _repeat_repeat_density(
... dstruct,(0,1,1))
>>> dstruct["dcube"].shape
(3, 4, 2)
>>> pprint(dstruct["cell_vectors"])
{'a': [1.0, 0.0, 0.0], 'b': [0.0, 6.0, 0.0], 'c': [0.0, 0.0, 8.0]}
>>> pprint(dstruct["centre"])
[0.5, 3.0, 4.0]
>>> pprint(dstruct["dcube"].tolist())
[[[1.0, 1.0], [1.0, 1.0], [1.0, 1.0], [1.0, 1.0]],
[[1.0, 1.0], [1.0, 1.0], [1.0, 1.0], [1.0, 1.0]],
[[1.0, 1.0], [1.0, 1.0], [1.0, 1.0], [1.0, 1.0]]]
"""
rep_a, rep_b, rep_c = repeats
reps = OrderedDict([('a', 1 + abs(rep_a)), ('b', 1 + abs(rep_b)), ('c', 1 + abs(rep_c))])
vstruct['dcube'] = np.tile(vstruct['dcube'],
list(reps.values()))
a = np.array(vstruct['cell_vectors']['a'], dtype=float)
| |
<gh_stars>1-10
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import docker
from docker.errors import DockerException, NotFound, BuildError, ImageNotFound
from docker.models.containers import Container
import json
import logging
import time
from azure.ai.ml._local_endpoints.vscode_debug.vscode_client import VSCodeClient
from azure.ai.ml._local_endpoints.errors import (
InvalidLocalEndpointError,
LocalEndpointNotFoundError,
LocalEndpointInFailedStateError,
DockerEngineNotAvailableError,
MultipleLocalDeploymentsFoundError,
LocalEndpointImageBuildError,
LocalEndpointImageBuildCondaError,
)
from azure.ai.ml._utils.utils import initialize_logger_info
from azure.ai.ml._local_endpoints.local_endpoint_mode import LocalEndpointMode
from azure.ai.ml.constants import LocalEndpointConstants
module_logger = logging.getLogger(__name__)
initialize_logger_info(module_logger, terminator="")
DEFAULT_LABELS = {
LocalEndpointConstants.LABEL_KEY_AZUREML_LOCAL_ENDPOINT: "",
LocalEndpointConstants.LABEL_KEY_ENDPOINT_NAME: "",
LocalEndpointConstants.LABEL_KEY_DEPLOYMENT_NAME: "",
LocalEndpointConstants.LABEL_KEY_ENDPOINT_JSON: "",
LocalEndpointConstants.LABEL_KEY_DEPLOYMENT_JSON: "",
LocalEndpointConstants.LABEL_KEY_AZUREML_PORT: "",
}
class DockerClient(object):
"""Client for interacting with User's Docker environment for local endpoints."""
def __init__(
self,
client: docker.DockerClient = None,
vscode_client: VSCodeClient = None,
):
self._lazy_client = client
self._vscode_client = vscode_client if vscode_client else VSCodeClient()
@property
def _client(self) -> docker.DockerClient:
"""Lazy initializer for docker-py client.
:return: docker.client.DockerClient
:raises: azure.ai.ml._local_endpoints.errors.DockerEngineNotAvailableError
"""
if self._lazy_client is None:
try:
self._lazy_client = docker.from_env()
except DockerException as e:
if "Error while fetching server API version" in str(e):
raise DockerEngineNotAvailableError()
raise
return self._lazy_client
def create_endpoint(
self,
endpoint_name: str,
endpoint_metadata: str,
build_directory: str,
image_name: str,
dockerfile_path: str,
) -> None:
try:
self._client.images.build(path=build_directory, tag=image_name, dockerfile=dockerfile_path)
except BuildError:
pass
self.delete(endpoint_name=endpoint_name, verify_exists=False)
labels = DEFAULT_LABELS.copy()
labels[LocalEndpointConstants.LABEL_KEY_ENDPOINT_NAME] = endpoint_name
labels[LocalEndpointConstants.LABEL_KEY_ENDPOINT_JSON] = endpoint_metadata
container_name = self._get_container_name(endpoint_name)
self._client.containers.run(
image_name,
name=container_name,
labels=labels,
detach=True,
tty=True,
publish_all_ports=True,
)
def create_deployment(
self,
endpoint_name: str,
deployment_name: str,
endpoint_metadata: str,
deployment_metadata: str,
build_directory: str,
dockerfile_path: str,
conda_source_path: str,
conda_yaml_contents: str,
volumes: dict,
environment: dict,
azureml_port: int,
local_endpoint_mode: LocalEndpointMode,
prebuilt_image_name: str = None,
) -> None:
"""Builds and runs an image from provided image context.
:param endpoint_name: name of local endpoint
:type endpoint_name: str
:param deployment_name: name of local deployment
:type deployment_name: str
:param endpoint_metadata: Endpoint entity information serialized.
:type endpoint_metadata: str
:param deployment_metadata: Deployment entity information serialized.
:type deployment_metadata: str
:param build_directory: directory on user's local system to write conda file
:type build_directory: str
:param dockerfile_path: directory on user's local system to write Dockerfile
:type dockerfile_path: str
:param conda_source_path: source of conda file (either path on user's local machine or environment ID)
:type conda_source_path: str
:param conda_yaml_contents: contents of user's conda file for docker build
:type conda_yaml_contents: str
:param volumes: dictionary of volumes to mount to docker container
:type volumes: dict
:param environment: dictionary of docker environment variables to set in container
:type environment: dict
:param azureml_port: Port exposed in Docker image for AzureML service.
:type azureml_port: int
:param local_endpoint_mode: Mode for how to create the local user container.
:type local_endpoint_mode: LocalEndpointMode
:param prebuilt_image_name: Name of pre-built image from customer if using BYOC flow.
:type prebuilt_image_name: str
"""
# Prepare image
if prebuilt_image_name is None:
image_name = self._get_image_name(endpoint_name, deployment_name)
module_logger.debug(f"Building local image [{image_name}].\n")
module_logger.debug(f"Build directory: {build_directory}\n")
module_logger.debug(f"Dockerfile path: {dockerfile_path}\n")
module_logger.debug(f"Image [{image_name}] is built.\n")
self._build_image(
build_directory=build_directory,
image_name=image_name,
dockerfile_path=dockerfile_path,
conda_source_path=conda_source_path,
conda_yaml_contents=conda_yaml_contents,
)
else:
image_name = prebuilt_image_name
try:
self._client.images.get(image_name)
except ImageNotFound:
module_logger.info(f"\nDid not find image '{image_name}' locally. Pulling from registry.")
try:
self._client.images.pull(image_name)
except NotFound:
raise InvalidLocalEndpointError(
message=f"Could not find image '{image_name}' locally or in registry. Please check your image name.",
no_personal_data_message="Could not find image locally or in registry. Please check your image name.",
)
module_logger.info("\nStarting up endpoint")
# Delete container if exists
self.delete(endpoint_name=endpoint_name, verify_exists=False)
labels = self.get_container_labels(
endpoint_name=endpoint_name,
deployment_name=deployment_name,
endpoint_metadata=endpoint_metadata,
deployment_metadata=deployment_metadata,
azureml_port=azureml_port,
)
module_logger.debug(f"Setting labels: {labels}\n")
module_logger.debug(f"Mounting volumes: {volumes}\n")
module_logger.debug(f"Setting environment variables: {environment}\n")
container_name = self._get_container_name(endpoint_name, deployment_name)
container = self._client.containers.create(
image_name,
name=container_name,
labels=labels,
volumes=self._reformat_volumes(volumes),
environment=environment,
detach=True,
tty=True,
publish_all_ports=True,
)
if local_endpoint_mode == LocalEndpointMode.VSCodeDevContainer:
try:
devcontainer_path = self._vscode_client.create_dev_container_json(
azureml_container=container,
endpoint_name=endpoint_name,
deployment_name=deployment_name,
build_directory=build_directory,
image_name=image_name,
environment=environment,
volumes=volumes,
labels=labels,
)
finally:
# This pre-created container is only used for retrieving the entry script
# to add debugpy statements
container.remove()
app_path = environment[LocalEndpointConstants.ENVVAR_KEY_AML_APP_ROOT]
self._vscode_client.invoke_dev_container(devcontainer_path=devcontainer_path, app_path=app_path)
time.sleep(LocalEndpointConstants.DEFAULT_STARTUP_WAIT_TIME_SECONDS)
else:
container.start()
time.sleep(LocalEndpointConstants.DEFAULT_STARTUP_WAIT_TIME_SECONDS)
container.reload()
self._validate_container_state(
endpoint_name=endpoint_name, deployment_name=deployment_name, container=container
)
scoring_uri = self.get_scoring_uri(endpoint_name=endpoint_name, deployment_name=deployment_name)
module_logger.debug(f"Container [{container_name}] is up and running at {scoring_uri}\n")
def delete(self, endpoint_name: str, deployment_name: str = None, verify_exists: bool = True) -> None:
"""Deletes local endpoint / deployment.
:param endpoint_name: name of local endpoint
:type endpoint_name: str
:param deployment_name: name of local deployment
:type deployment_name: (str, optional)
:param verify_exists: Verify that the endpoint exists on deletion. Default: True
:type verify_exists: (bool, optional)
:raises: azure.ai.ml._local_endpoints.errors.LocalEndpointNotFoundError
"""
containers = self.list_containers(endpoint_name=endpoint_name, deployment_name=deployment_name)
if verify_exists and len(containers) == 0:
raise LocalEndpointNotFoundError(endpoint_name=endpoint_name, deployment_name=deployment_name)
for container in containers:
container.stop()
container.remove()
module_logger.debug(f"Endpoint container [{container.name}] is removed.\n")
def get_endpoint(self, endpoint_name: str) -> dict:
"""Returns metadata for local endpoint or deployment.
:param endpoint_name: name of local endpoint
:type endpoint_name: str
:returns dict: JSON dict representing user provided endpoint input
"""
container = self.get_endpoint_container(endpoint_name=endpoint_name)
if container is None:
raise LocalEndpointNotFoundError(endpoint_name=endpoint_name)
return self.get_endpoint_json_from_container(container=container)
def get_deployment(self, endpoint_name: str, deployment_name: str = None) -> dict:
"""Returns metadata for local deployment.
:param endpoint_name: name of local endpoint
:type endpoint_name: str
:param deployment_name: name of local deployment
:type deployment_name: (str, optional)
:returns dict: JSON dict representing user provided endpoint input
"""
container = self.get_endpoint_container(endpoint_name=endpoint_name, deployment_name=deployment_name)
if container is None:
raise LocalEndpointNotFoundError(endpoint_name=endpoint_name, deployment_name=deployment_name)
return self.get_deployment_json_from_container(container=container)
def get_scoring_uri(self, endpoint_name: str, deployment_name: str = None) -> str:
"""Returns scoring uri for local endpoint or deployment.
:param endpoint_name: name of local endpoint
:type endpoint_name: str
:param deployment_name: name of local deployment
:type deployment_name: (str, optional)
:raises: azure.ai.ml._local_endpoints.errors.LocalEndpointNotFoundError
:raises: azure.ai.ml._local_endpoints.errors.MultipleLocalDeploymentsFoundError
"""
container = self.get_endpoint_container(
endpoint_name=endpoint_name, deployment_name=deployment_name, verify_single_deployment=True
)
if container is None:
return
self._validate_container_state(
endpoint_name=endpoint_name, deployment_name=deployment_name, container=container
)
return self.get_scoring_uri_from_container(container=container)
def logs(self, endpoint_name: str, deployment_name: str, lines: int) -> str:
"""Returns logs from local deployment.
:param endpoint_name: name of local endpoint
:type endpoint_name: str
:param deployment_name: name of local deployment
:type deployment_name: str
:param lines: number of lines to retrieve from container logs
:type lines: int
:return: str
:raises: azure.ai.ml._local_endpoints.errors.LocalEndpointNotFoundError
"""
container = self.get_endpoint_container(endpoint_name, deployment_name=deployment_name)
if container is None:
raise LocalEndpointNotFoundError(endpoint_name=endpoint_name, deployment_name=deployment_name)
return container.logs(tail=int(lines)).decode()
def list_containers(self, endpoint_name: str = None, deployment_name: str = None, include_stopped: bool = True):
"""Returns a list of local endpoints.
:param endpoint_name: Name of local endpoint. If none, all local endpoints will be returned.
:type endpoint_name: (str, optional)
:param deployment_name: Name of local deployment. If none, all deployments under endpoint will be returned.
:type deployment_name: (str, optional)
:param include_stopped: Include stopped containers. Default: True.
:type include_stopped: (str, optional)
:returns list[Container]: array of Container objects from docker-py library
"""
filters = {"label": [f"{LocalEndpointConstants.LABEL_KEY_AZUREML_LOCAL_ENDPOINT}"]}
if endpoint_name:
filters["label"].append(f"{LocalEndpointConstants.LABEL_KEY_ENDPOINT_NAME}={endpoint_name}")
if deployment_name:
filters["label"].append(f"{LocalEndpointConstants.LABEL_KEY_DEPLOYMENT_NAME}={deployment_name}")
return self._client.containers.list(filters=filters, all=include_stopped)
def get_container_labels(
self,
endpoint_name: str,
deployment_name: str,
endpoint_metadata: dict,
deployment_metadata: dict,
azureml_port: int,
) -> dict:
labels = DEFAULT_LABELS.copy()
labels[LocalEndpointConstants.LABEL_KEY_ENDPOINT_NAME] = endpoint_name
labels[LocalEndpointConstants.LABEL_KEY_DEPLOYMENT_NAME] = deployment_name
labels[LocalEndpointConstants.LABEL_KEY_ENDPOINT_JSON] = endpoint_metadata
labels[LocalEndpointConstants.LABEL_KEY_DEPLOYMENT_JSON] = deployment_metadata
labels[LocalEndpointConstants.LABEL_KEY_AZUREML_PORT] = str(azureml_port)
return labels
def get_endpoint_container(
self,
endpoint_name: str,
deployment_name: str = None,
verify_single_deployment: bool = False,
include_stopped: bool = True,
) -> Container:
"""Builds and runs an image from provided image context.
:param endpoint_name: name of local endpoint
:type endpoint_name: str
:param deployment_name: name of local deployment
:type deployment_name: (str, optional)
:param verify_single_deployment: Fail if more than one deployment container exists
:type verify_single_deployment: (bool, optional)
:param include_stopped: Include container even if it's stopped. Default: True.
:type include_stopped: (bool, optional)
:returns docker.models.containers.Container:
"""
containers = self.list_containers(
endpoint_name=endpoint_name, deployment_name=deployment_name, include_stopped=include_stopped
)
if len(containers) == 0:
return
if len(containers) > 1 and verify_single_deployment:
raise MultipleLocalDeploymentsFoundError(endpoint_name=endpoint_name)
return containers[0]
def get_endpoint_json_from_container(self, container: Container) -> dict:
if container:
data = container.labels[LocalEndpointConstants.LABEL_KEY_ENDPOINT_JSON]
return json.loads(data)
return
def get_deployment_json_from_container(self, container: Container) -> dict:
if container:
data = container.labels[LocalEndpointConstants.LABEL_KEY_DEPLOYMENT_JSON]
return json.loads(data)
return
def get_status_from_container(self, container: Container) -> str:
"""Returns status of container.
:param container: container of local Deployment
:type container: docker.models.containers.Container
:return str: container status
"""
return container.status
def get_scoring_uri_from_container(self, container: Container) -> str:
"""Returns scoring_uri of container.
:param container: container of local Deployment
:type container: docker.models.containers.Container
:return str: container scoring_uri
"""
port = 5001
# Example container.ports: {'5001/tcp': [{'HostIp': '0.0.0.0', 'HostPort': '5001'}], '8883/tcp': None, '8888/tcp': None}
if container is not None and container.ports is not None:
azureml_port = container.labels["azureml-port"]
for docker_port, host_addresses in container.ports.items():
if azureml_port in docker_port and host_addresses is not None:
for address in host_addresses:
if "HostPort" in address:
port = address["HostPort"]
break
# TODO: resolve scoring path correctly
return f"http://localhost:{port}/score"
def _build_image(
self,
build_directory: str,
image_name: str,
dockerfile_path: str,
conda_source_path: str,
conda_yaml_contents: str,
) -> None:
try:
module_logger.info("\nBuilding Docker image from Dockerfile")
first_line = True
for status in | |
import pygame as pg
from pygame import mixer
import random
from math import sqrt
# initialize the pygame
pg.init()
# clock = pg.time.Clock()
screen = pg.display.set_mode((800, 600))
# background jogo
background = pg.image.load('imagens/brasilia2.jpg')
# menu do jogo
menu_image = pg.image.load('imagens/menu.jpeg')
menu_arrow = pg.image.load('imagens/menu_seta.png')
menu_about = pg.image.load('imagens/sobre.jpeg')
# imagens auxiliares
quadro_fundo = pg.image.load('imagens/quadro_fundo.jpg')
coracao = pg.image.load('imagens/coracao.png')
coracao2 = pg.image.load('imagens/coracao.png')
coracao3 = pg.image.load('imagens/coracao.png')
# Ze gotinha
gotinha = pg.image.load('imagens/ze-gota.png')
you_tried = pg.image.load('imagens/at_least.jpg')
# title and icon
pg.display.set_caption("Gôtcha!")
icon = pg.image.load('imagens/icone-gotinha.png')
pg.display.set_icon(icon)
# Player
playerImg = pg.image.load('imagens/jogador.png')
# seringa
seringaImg = pg.image.load('imagens/seringa.png')
# Cloroquina
cloroquinaImg = pg.image.load('imagens/cloroquina.png')
# coronavirus
coronavirusImg = pg.image.load('imagens/coronavirus.png')
# carta pfizer
pfizerImg = pg.image.load('imagens/carta-pfizer.png')
# Score
textX = 10
textY = 10
font = pg.font.Font('font/Roboto-Medium.ttf', 32)
# Virus and chloroquine passed count
passed_font = pg.font.Font('font/Roboto-Medium.ttf', 14)
# Texts Game Over
big_over_font = pg.font.Font('font/Roboto-Medium.ttf', 64)
small_over_font = pg.font.Font('font/Roboto-Medium.ttf', 16)
def new_game():
global playerX, playerY, playerX_change, playerY_change, seringaX, seringaY, seringaX_change, seringa_state
global cloroquinaX, cloroquinaY, cloroquinaX_change, coronavirusX, coronavirusY, coronavirusX_change, pfizerX
global pfizerY, pfizerX_change, score_value, count_miss_virus, count_chloroquine, count_miss_chloroquine
global count_letters, count_missed_letters
playerX = 10
playerY = 500
playerX_change = 0
playerY_change = 0
seringaX = -50
seringaY = -50
seringaX_change = 1.5
seringa_state = "ready" # 'ready': nao se ve a seringa na tela / 'fire': a seringa esta se movendo
cloroquinaX = 1500
cloroquinaY = random.randint(52, 536)
cloroquinaX_change = -0.4 # anda constantemente
coronavirusX = 1300
coronavirusY = random.randint(52, 536)
coronavirusX_change = -0.5
pfizerX = 1600
pfizerY = random.randint(52, 536)
pfizerX_change = -0.3
score_value = 0
count_miss_virus = 0
count_chloroquine = 0
count_miss_chloroquine = 0
count_letters = 0
count_missed_letters = 0
def seta_menu(x, y):
screen.blit(menu_arrow, (x, y))
def hearts(x, y):
if count_chloroquine <= 0:
screen.blit(coracao, (x + 700, y + 5))
if count_chloroquine <= 1:
screen.blit(coracao2, (x + 730, y + 5))
if count_chloroquine <= 2:
screen.blit(coracao3, (x + 760, y + 5))
def show_score(x, y):
score = font.render(f"Score: {str(score_value)}", True, (255, 255, 255))
virus_pass = passed_font.render(f"Vírus passados: {str(count_miss_virus)}", True, (204, 204, 0))
letter_pass = passed_font.render(f"Cartas perdidas: {str(count_missed_letters)}", True, (204, 204, 0))
letter_get = passed_font.render(f"Cartas adquiridas: {str(count_letters)}", True, (0, 204, 0))
chloroquine_pass = passed_font.render(f"Caixas passadas: {str(count_miss_chloroquine)}", True,
(204, 204, 0))
screen.blit(score, (x, y))
screen.blit(virus_pass, (x + 370, y))
screen.blit(letter_pass, (x + 200, y))
screen.blit(letter_get, (x + 200, y + 20))
screen.blit(chloroquine_pass, (x + 370, y + 20))
def game_over_text():
over_text = big_over_font.render(f"GAME OVER", True, (255, 255, 255))
screen.blit(over_text, (200, 250))
def player(x, y):
screen.blit(playerImg, (x, y)) # "desenha" na tela
def cloroquina(x, y):
screen.blit(cloroquinaImg, (x, y))
def coronavirus(x, y):
screen.blit(coronavirusImg, (x, y))
def pfizer(x, y):
screen.blit(pfizerImg, (x, y))
def fire_seringa(x, y):
screen.blit(seringaImg, (x + 60, y + 16))
def is_collision(cloroquina_x, cloroquina_y, seringa_x, seringa_y,
corona_x, corona_y, pfizer_x, pfizer_y, player_x, player_y):
distancia_cloroq = sqrt((cloroquina_x - seringa_x) ** 2 + (cloroquina_y - seringa_y) ** 2)
distancia_corona = sqrt((corona_x - seringa_x) ** 2 + (corona_y - seringa_y) ** 2)
distancia_pfizer = sqrt((pfizer_x - seringa_x) ** 2 + (pfizer_y - seringa_y) ** 2)
dist_player_pfizer = sqrt((pfizer_x - player_x) ** 2 + (pfizer_y - player_y) ** 2)
dist_player_chloroq = sqrt((cloroquina_x - player_x) ** 2 + (cloroquina_y - player_y) ** 2)
if distancia_cloroq <= 27:
matou_cloroq = True
else:
matou_cloroq = False
if distancia_corona <= 27:
matou_corona = True
else:
matou_corona = False
if distancia_pfizer <= 27:
matou_pfizer = True
else:
matou_pfizer = False
if dist_player_pfizer <= 48:
pegou_pfizer = True
else:
pegou_pfizer = False
if dist_player_chloroq <= 48:
pegou_cloroq = True
else:
pegou_cloroq = False
return matou_cloroq, matou_corona, matou_pfizer, pegou_pfizer, pegou_cloroq
def best_end_game():
main_text = big_over_font.render(f"PARABÉNS!", True, (255, 255, 255))
small_text = small_over_font.render(f"10 cartas da Pfizer obtidas!"
f" Imunização da população garantida!", True, (255, 255, 255))
small_text2 = small_over_font.render(f"Nenhum coronavírus ou caixa de cloroquina foi deixado pra trás!"
f"", True, (255, 255, 255))
small_text3 = small_over_font.render(f"Muitas vidas foram salvas!", True, (255, 255, 255))
screen.blit(quadro_fundo, (100, 100))
screen.blit(gotinha, (370, 210))
screen.blit(main_text, (200, 120))
screen.blit(small_text, (150, 375))
screen.blit(small_text2, (150, 400))
screen.blit(small_text3, (300, 425))
def cloroq_end_game():
main_text = big_over_font.render(f"PARABÉNS!", True, (255, 255, 255))
small_text = small_over_font.render(f"Todas as 10 cartas da Pfizer obtidas!"
f" Imunização da população garantida!", True, (255, 255, 255))
small_text2 = small_over_font.render(f"Nenhum coronavírus ou caixa de pacote foi deixado pra trás!"
f"", True, (255, 255, 255))
small_text3 = small_over_font.render(f"Muitas vidas foram salvas!", True, (255, 255, 255))
screen.blit(quadro_fundo, (100, 100))
screen.blit(gotinha, (370, 190))
screen.blit(main_text, (200, 120))
screen.blit(small_text, (110, 325))
screen.blit(small_text2, (110, 350))
screen.blit(small_text3, (110, 375))
def medium_end_game():
main_text = big_over_font.render(f"Muito bem!", True, (255, 255, 255))
small_text = small_over_font.render(f"10 cartas da Pfizer obtidas!"
f" Imunização da população garantida!", True, (255, 255, 255))
small_text2 = small_over_font.render(f"Porém, alguns coronavírus e/ou caixas de cloroquina passaram :("
f"", True, (255, 255, 0))
small_text3 = small_over_font.render(f"Se esforce mais e tente eliminar todos na próxima. Você consegue!", True,
(255, 255, 255))
screen.blit(quadro_fundo, (100, 100))
screen.blit(you_tried, (325, 200))
screen.blit(main_text, (220, 120))
screen.blit(small_text, (150, 375))
screen.blit(small_text2, (140, 400))
screen.blit(small_text3, (135, 425))
def is_gameover(player_x, player_y, corona_x, corona_y):
dist_gameover_corona = sqrt((player_x - corona_x) ** 2 + (player_y - corona_y) ** 2)
if dist_gameover_corona <= 48:
return True
else:
return False
# menu arrow Y
arrow_Y = 0
game_run = True
while game_run:
new_game() # novo jogo, reseta tudo
# background menu music
mixer.music.load('sons/menu_music.mp3')
mixer.music.set_volume(0.1)
mixer.music.play(-1) # o -1 faz tocar em loop
# menu inicial
exit_game = False
menu = True
show_about = False
while menu:
screen.fill((255, 255, 255))
screen.blit(menu_image, (0, 0))
if arrow_Y == 0:
seta_menu(230, 297) # começar
if arrow_Y == 1:
seta_menu(285, 393) # creditos
if arrow_Y == 2:
seta_menu(320, 490) # sair
for event in pg.event.get():
if event.type == pg.QUIT:
menu = False
exit_game = True
game_run = False
if event.type == pg.KEYDOWN:
if event.key == pg.K_UP:
mixer.Sound('sons/click_menu.wav').play()
arrow_Y += -1
if event.key == pg.K_DOWN:
mixer.Sound('sons/click_menu.wav').play()
arrow_Y += 1
if event.key == pg.K_KP_ENTER or event.key == pg.K_RETURN:
if arrow_Y == 0:
mixer.Sound('sons/game_init.wav').play()
pg.time.delay(2000)
menu = False
if arrow_Y == 1:
mixer.Sound('sons/sair_creditos.wav').play()
show_about = True
if arrow_Y == 2:
mixer.Sound('sons/sair_creditos.wav').play()
pg.time.delay(1000)
menu = False
exit_game = True
game_run = False
# Menu Sobre
while show_about:
screen.blit(menu_about, (100, 100))
for about_event in pg.event.get():
if about_event.type == pg.KEYDOWN:
show_about = False
pg.display.update()
# limita a seta
if arrow_Y >= 2:
arrow_Y = 2
if arrow_Y <= 0:
arrow_Y = 0
pg.display.update()
# background game music
mixer.music.load('sons/background_music.mp3')
mixer.music.set_volume(0.1)
mixer.music.play(-1) # o -1 faz tocar em loop
# game inicia
running = True
while running:
# clock.tick(30)
if exit_game:
break
# RGB
screen.fill((0, 0, 128))
# background image
screen.blit(background, (0, 0))
for event in pg.event.get(): # eventos
# para sair do loop (fechar o programa) quando clicar no 'X'
if event.type == pg.QUIT:
running = False
game_run = False
# se uma tecla é pressionada, verifica a direção e anda nela
if event.type == pg.KEYDOWN:
if event.key == pg.K_LEFT:
playerX_change = -0.6
if event.key == pg.K_RIGHT:
playerX_change = 0.6
if event.key == pg.K_UP:
playerY_change = -0.6
if event.key == pg.K_DOWN:
playerY_change = 0.6
if event.key == pg.K_SPACE and seringa_state == "ready":
mixer.Sound('sons/disparo_seringa.mp3').play()
seringaX, seringaY = playerX, playerY # 1ª coord é no player
seringa_state = "fire"
fire_seringa(seringaX, seringaY)
# quando deixa de pressionar uma tecla, zera o acrescimo de movimento
if event.type == pg.KEYUP:
if event.key == pg.K_LEFT or event.key == pg.K_RIGHT:
playerX_change = 0
if event.key == pg.K_UP or event.key == pg.K_DOWN:
playerY_change = 0
# movimento objetos
cloroquinaX += cloroquinaX_change # constante!
coronavirusX += coronavirusX_change
pfizerX += pfizerX_change
# não deixa ir ao infinito e além
if cloroquinaX <= -60:
count_miss_chloroquine += 1
cloroquinaX = 1250
cloroquinaY = random.randint(52, 536)
if coronavirusX <= -60:
count_miss_virus += 1
coronavirusX = 790
coronavirusY = random.randint(52, 536)
if pfizerX <= -60:
count_missed_letters += 1
pfizerX = 1000
pfizerY = random.randint(52, 536)
# movimento jogador
playerX += playerX_change
playerY += playerY_change
# não deixa o jogador passar das bordas
if playerX <= 0:
playerX = 1
elif playerX >= 736:
playerX = 734
if playerY >= 536:
playerY = 535
elif playerY <= 52:
playerY = 53
# mostra elementos do jogo
player(playerX, playerY)
cloroquina(cloroquinaX, cloroquinaY)
coronavirus(coronavirusX, coronavirusY)
pfizer(pfizerX, pfizerY)
show_score(textX, textY)
# movimento da seringa
if seringaX >= 800:
# seringaX = 800
seringa_state = "ready"
seringaX = -50
seringaY = -50
if seringa_state == "fire":
fire_seringa(seringaX, seringaY)
seringaX += seringaX_change
# colisão
collision = is_collision(cloroquinaX, cloroquinaY, seringaX, seringaY,
coronavirusX, coronavirusY, pfizerX, pfizerY, playerX, playerY)
# | |
<filename>console/scan_scheduler.py
#!/usr/bin/env python
# Standard Python libraries.
import datetime
import ipaddress
import itertools
import logging
import sys
# Third party Python libraries.
from django.utils.timezone import localtime
# Custom Python libraries.
import django_connector
# Setup logging.
LOG_FORMATTER = logging.Formatter("%(asctime)s [%(levelname)s] %(message)s")
ROOT_LOGGER = logging.getLogger()
def clean_text(uncleaned_text):
"""Clean text by replacing specific characters."""
cleaned_text = uncleaned_text.lower().replace(" - ", "_").replace("-", "_").replace(" ", "_").replace("/", "_")
# Ensures __ can be used as a delimiter to extract site name, engine, and timestamp in the
# console/scan_results/masscan_json_to_csv.py and console/scan_results/nmap_to_csv.py scripts.
while "__" in cleaned_text:
cleaned_text = cleaned_text.replace("__", "_")
return cleaned_text
def is_ip_address(ip):
"""Takes an IP address returns True/False if it is a valid IPv4 or IPv6 address."""
ip = str(ip)
try:
ipaddress.ip_address(ip)
return True
except ValueError:
return False
def is_ip_network(address, strict=False):
"""Takes an address returns True/False if it is a valid network."""
address = str(address)
try:
ipaddress.ip_network(address, strict)
return True
except ValueError:
return False
def distribute(included_targets_as_list_size, total_scan_engines_in_pool):
"""Distribute targets to scan engines as evenly as possible. Generates a list of targets per engine. For example,
if there are 13 targets and 3 scan engines, this function will return [5, 4 ,4] - 5 targets for engine1, 4
targets for engine2, and 4 targets for engine3.
https://stackanswers.net/questions/distribute-an-integer-amount-by-a-set-of-slots-as-evenly-as-possible
"""
base, extra = divmod(included_targets_as_list_size, total_scan_engines_in_pool)
return [base + (i < extra) for i in range(total_scan_engines_in_pool)]
def schedule_scan(scan_dict):
"""Given a scan dictionary, try and schedule the scan."""
# The ScheduledScan model acts as the sanitized endpoint for engines to determine scan jobs. We don't want to
# expose the other models, so we populate that ScheduledScan model instead. The actual exposed fields for the API
# are controlled using console/api/serializers.py.
# Ensure none of the values are empty. blank=False is only enforced through forms, which this method of creating a
# scheduled scan does not honor.
empty_scan_dict_value_detected = False
for key, value in scan_dict.items():
# Ignore fields that are allowed to be empty.
if key in ["excluded_targets", "pooled_scan_result_file_base_name", "scan_binary_process_id"]:
continue
if not value:
ROOT_LOGGER.error(f"scan_dict['{key}'] has an empty value.")
empty_scan_dict_value_detected = True
if empty_scan_dict_value_detected:
return
try:
# Add entry to ScheduledScan model. Convert dictionary to kwargs using **.
# https://stackoverflow.com/questions/5710391/converting-python-dict-to-kwargs
obj, created = django_connector.ScheduledScan.objects.get_or_create(**scan_dict)
if created:
ROOT_LOGGER.info(f"Adding to scheduled scans: {scan_dict}")
else:
ROOT_LOGGER.error(f"Scheduled scan not created: {scan_dict}")
except Exception as e:
ROOT_LOGGER.exception(f"Error adding scan: {scan_dict}. Exception: {e}")
def main():
# Set current date and time variables. Example datetime objects are provided throughout.
# Use Django's app timezone to determine current datetime.
# datetime.datetime(2021, 5, 3, 10, 21, 53, 197844, tzinfo=<DstTzInfo 'America/Chicago' CDT-1 day, 19:00:00 DST>)
now_datetime = localtime()
# datetime.time(10, 21, 53, 197844)
now_time = now_datetime.time()
# Filter on enabled scans only. We can't filter on occurrences using Django's .filter() method; it will have to
# be checked using logic below. Author's reason why .filter() can't be used:
# https://github.com/django-recurrence/django-recurrence/issues/91#issuecomment-286890133
scans = django_connector.Scan.objects.filter(enable_scan=True)
if not scans:
ROOT_LOGGER.info("No scans enabled")
return
# Loop through each scan to determine if it needs to be scheduled.
for scan in scans:
"""
Have fun understanding the documentation for django-recurrence!
https://django-recurrence.readthedocs.io/en/latest/
This is a challenging library to work with since the django-recurrence README states "The recurrence field only
deals with recurrences not with specific time information." That's why a separate Scan.start_time field is
required. A recurrence object has a granularity of a date, and does not include time, so some challenging logic
is required to determine a one-off scan (no recurring schedule) vs. a recurring scan (with a possible hourly
frequency). When using scan.recurrence.between(), the start and end values are Python datetime objects with a
date granularity, so time is completely ignored. Thus, a dtstart seed datetime object for recurrences is used.
The author has stated "I don't actually use this library now - so my support here is mostly just merging fixes
where I am comfortable with them, and pushing releases to PyPI. If someone else wants to take over ownership,
I'd be more than happy to hand it over."
(https://github.com/django-recurrence/django-recurrence/issues/163#issuecomment-604111964)
I've tried to provide verbose comments to explain my reasoning and logic, but every time I come back to this
code and library, it takes me a day to figure out what's going on.
"""
# Standardize the exdates. Just a note: https://github.com/django-recurrence/django-recurrence/issues/70
for index, exdate in enumerate(scan.recurrences.exdates):
updated_exdate = localtime(exdate).replace(hour=now_time.hour).replace(minute=now_time.minute)
# print(f"Old exdate: {exdate} -- new exdate {updated_exdate}")
scan.recurrences.exdates[index] = updated_exdate
# datetime.datetime(2021, 5, 3, 0, 0)
beginning_of_today = now_datetime.replace(hour=0).replace(minute=0).replace(second=0).replace(microsecond=0)
# datetime.datetime(2021, 5, 3, 23, 59, 59)
end_of_today = now_datetime.replace(hour=23).replace(minute=59).replace(second=59).replace(microsecond=0)
# dtstart is time zone aware since it's coming from Django.
# datetime.datetime(2021, 5, 3, 15, 24, tzinfo=<DstTzInfo 'America/Chicago' CDT-1 day, 19:00:00 DST>)
dtstart = localtime(scan.dtstart)
# Retrieve all ths scan occurrences.
scan_occurrences = scan.recurrences.between(beginning_of_today, end_of_today, dtstart=dtstart, inc=True)
# If no scan occurrences exist given the datetime parameters, move on to the next potential scan.
if not scan_occurrences:
continue
# Pare down now_datetime (datetime.datetime(2021, 5, 3, 10, 21, 53, 197844)) to include just the date and time
# datetime.datetime(2021, 5, 3, 10, 21)
now_datetime_stripped = now_datetime.replace(second=0).replace(microsecond=0)
# Further pare down the datetime object to just include a date and no time datetime.datetime(2021, 5, 3, 0, 0),
# for single one-off scans. In these cases, there isn't a recurrence since it is a one-time event.
now_datetime_stripped_only_date = now_datetime_stripped.replace(hour=0).replace(minute=0)
# datetime.time(10, 21)
now_time_stripped = now_time.replace(second=0).replace(microsecond=0)
# Scans with an occurrence.
if now_datetime_stripped in scan_occurrences:
schedule_this_scan = True
# Single one-off scans with a start time that matches the current time and date in scan_occurrences.
elif (scan.start_time.replace(second=0) == now_time_stripped) and (
now_datetime_stripped_only_date in scan_occurrences
):
schedule_this_scan = True
# Scan scheduling criteria wasn't met.
else:
schedule_this_scan = False
# If the scheduled_scan bit was not set to True, move on.
if not schedule_this_scan:
continue
# Let's extract the remaining variables from existing database relationships. Note that the Scan model has the
# Site model as a foreign key, and in turn, the Site model has foreign keys for the Engine and ScanCommand
# models (see the scantron_model_graph.png for a visualization). Therefore, if a field from the Engine or
# ScanCommand models is updated, it will update the Site model, and cascade to the Scan model.
# Scan model.
# For the current scan_start_time, use now_time_stripped instead of scan.start_time in case an hourly recurrence
# frequency is used.
scan_start_time = now_time_stripped
# Site model.
site_name = scan.site.site_name
ROOT_LOGGER.info(f"Found scan for {site_name} at {scan_start_time}.")
# ScanCommand model.
scan_command = scan.site.scan_command.scan_command
scan_binary = scan.site.scan_command.scan_binary
# Generate timestamps
#####################
# Set start_datetime to now_datetime.
start_datetime = now_datetime
# Convert start_datetime datetime object to string for result_file_base_name.
timestamp = datetime.datetime.strftime(start_datetime, "%Y%m%d_%H%M")
# Excluded Targets.
###################
# Convert excluded_targets string to list to reduce duplicates later.
excluded_targets = scan.site.excluded_targets.split()
# Convert queryset to a list of strings (where each string may contain more than 1 target).
globally_excluded_targets_objects = list(
django_connector.GloballyExcludedTarget.objects.all().values_list("globally_excluded_targets", flat=True)
)
# Initialize empty list.
globally_excluded_targets = []
# globally_excluded_targets_objects may look like ["1.2.3.4 192.168.127.12", "www.example.com"], so we need to loop
# through each string in the list, split on space (if applicable), in order to build a new list.
for get in globally_excluded_targets_objects:
targets = get.split(" ")
for target in targets:
globally_excluded_targets.append(target)
# Combine both excluded lists, cast as set to reduce duplicates, re-cast as list, and sort targets.
all_excluded_targets = sorted(list(set(excluded_targets + globally_excluded_targets)))
# masscan --excludefile can only contain IP addresses. If the scan_binary is masscan, remove non-IP addresses
# from all_excluded_targets.
if scan_binary == "masscan":
# Create a temporary list of valid IP addresses.
all_excluded_targets_temp = []
for excluded_target in all_excluded_targets:
if is_ip_address(excluded_target) or is_ip_network(excluded_target):
all_excluded_targets_temp.append(excluded_target)
else:
ROOT_LOGGER.info(
f"masscan can only scan IPs. Removed target '{excluded_target}' from excluded targets."
)
all_excluded_targets = all_excluded_targets_temp
# Convert to a string. strip() removes any prepended or trailing spaces.
all_excluded_targets_string = " ".join(all_excluded_targets).strip()
# Included Targets
##################
# masscan -iL argument file can only contain IP addresses. If the scan_binary is masscan, remove non-IP
# addresses from included_targets.
if scan_binary == "masscan":
# Convert from string to list of targets.
included_targets_list = | |
<filename>src/napari_lf/lfa/lflib/calibration/models.py
import numpy as np
import cv2
# ------------------------------------------------------------------
# ISOMETRY TRANSFORM
#
# Allows for translation & rotation only. Computed by solving the
# orthogonal Procrustes problem:
#
# http://en.wikipedia.org/wiki/Orthogonal_Procrustes_problem
#
# ------------------------------------------------------------------
class IsometryWarp(object):
def __init__(self, lenslet_pitch, pixel_size):
self.forwardCoefficients = np.identity(3, dtype=np.float32)
self.reverseCoefficients = np.identity(3, dtype=np.float32)
self.lenslet_pitch = lenslet_pitch
self.pixel_size = pixel_size
def fit(self, data):
n = data.shape[0];
X1 = data[:,0:2] * self.pixel_size # (Scaled) chief rays
X2 = data[:,2: ] * self.lenslet_pitch # (Scaled) lenslet centers
# Compute column mean & matrices with mean removed.
X1_mean = np.mean(X1, axis = 0);
X2_mean = np.mean(X2, axis = 0);
X1_hat = X1 - np.tile(X1_mean, (n, 1));
X2_hat = X2 - np.tile(X2_mean, (n, 1));
# Compute procrustes transform using the SVD
(U,D,V) = np.linalg.svd(np.dot(X1_hat.T, X2_hat));
R = np.dot(U, V.T);
mu = X2_mean - np.dot(X1_mean, R);
S1 = np.array( [[ self.pixel_size, 0.0 , 0.0],
[ 0.0 , self.pixel_size, 0.0],
[ 0.0 , 0.0 , 1.0]] )
S2 = np.array( [[ 1.0/self.lenslet_pitch, 0.0 , 0.0],
[ 0.0 , 1.0/self.lenslet_pitch, 0.0],
[ 0.0 , 0.0 , 1.0]] )
self.forwardCoefficients = np.array([[ R[0,0], R[1,0], mu[0] ],
[ R[0,1], R[1,1], mu[1] ],
[ 0.0 , 0.0 , 1.0 ]]);
self.forwardCoefficients = np.dot(self.forwardCoefficients, S1)
self.forwardCoefficients = np.dot(S2, self.forwardCoefficients)
self.reverseCoefficients = np.linalg.inv(self.forwardCoefficients)
def get_error(self, data):
X1 = np.hstack( (data[:,0:2], np.ones((data.shape[0], 1)) ) ) # (Scaled) chief rays
X2 = data[:,2: ] # (Scaled) lenslet centers
X1_prime = np.dot(self.forwardCoefficients, X1.T).T
err_per_point = np.sqrt(np.sum(( X2 - X1_prime[:,0:2] )**2, axis=1)) # sum squared error per row
return err_per_point
# OUTPUT AND DIGANOSTIC ROUTINES
def __str__(self):
s = "\t\tTranslation: ["
tmp = self.reverseCoefficients[0:2,2]
for t in tmp:
s+= ('%0.2f, ' % t)
s+= "]\n\t\tRotation: ["
s+= str(self.reverseCoefficients[0,0]) + ' ' + str(self.reverseCoefficients[0,1]) + '; '
s+= str(self.reverseCoefficients[1,0]) + ' ' + str(self.reverseCoefficients[1,1]) + ']'
return s
def check_fit(self, data):
X1 = np.hstack( (data[:,0:2], np.ones((data.shape[0], 1)) ) ) # (Scaled) chief rays
X2 = data[:,2: ] # (Scaled) lenslet centers
X1_prime = np.dot(self.forwardCoefficients, X1.T).T
diff = X2 - X1_prime[:,0:2]
err = np.sqrt(np.power(diff[:,0],2) + np.power(diff[:,1],2))
print(('\t--> Fit Statistics - Mean: %0.2f Median: %0.2f Min: %0.2f Max: %0.2f (units: normalized lenslets)' %
(err.mean(), np.median(err, axis=0), err.min(), err.max() )))
def eval(self, data, direction='r'):
X1 = np.hstack( (data[:,0:2], np.ones((data.shape[0], 1)) ) ) # (Scaled) chief rays
if (direction == 'R' or direction == 'r'):
X1_prime = np.dot(self.reverseCoefficients, X1.T)
elif (direction == 'F' or direction == 'f'):
X1_prime = np.dot(self.forwardCoefficients, X1.T)
else:
raise ValueError("Unknown direction in IsometryWarp.eval()")
return X1_prime[0:2,:].T
def eval_point(self, data, direction='r'):
X1 = np.array([data[0], data[1], 1.0])
if (direction == 'R' or direction == 'r'):
X1_prime = np.dot(self.reverseCoefficients, X1.T)
elif (direction == 'F' or direction == 'f'):
X1_prime = np.dot(self.forwardCoefficients, X1.T)
else:
raise ValueError("Unknown direction in AffineWarp.eval()")
return X1_prime[0:2]
def forward(self, coords):
result = np.dot(self.forwardCoefficients, np.array([coords[0], coords[1], 1.0]))
return result[0:2]
def reverse(self, coords):
result = np.dot(self.reverseCoefficients, np.array([coords[0], coords[1], 1.0]))
return result[0:2]
# Warp an image using this isometry transform.
#
# This function expects input_image to be a numpy float32 array.
#
# the cropToInside option can be used to crop the output image to
# the inside of the valid lenslets. Note that cropping to the
# inside will (currently) shift the image output relative to the
# calibration matrix, so you should be cautious about employing
# this option where you hope to use the calibration matrix for
# further computations beyond this point.
def warp_image(self, input_image, output_pixels_per_lenslet, direction="R",
cropToInside = False, lenslet_offset = None, output_size = None):
im = cv2.fromarray(input_image)
ul = self.eval_point([0, 0], 'f').flatten()
ur = self.eval_point([0, im.cols], 'f').flatten()
ll = self.eval_point([im.rows, 0], 'f').flatten()
lr = self.eval_point([im.rows, im.cols], 'f').flatten()
leftbound = np.ceil(max(ul[1], ll[1]))
rightbound = np.floor(min(ur[1], lr[1]))
topbound = np.ceil(max(ul[0], ur[0]))
bottombound = np.floor(min(ll[0], lr[0]))
# Don't crop left of lenslets (0, y) or above (x, 0)
leftbound = max(leftbound, 0)
topbound = max(topbound,0)
if output_size is not None:
putative_output_size = output_size
else:
nt = int(np.floor(bottombound - topbound))
ns = int(np.floor(rightbound - leftbound))
putative_output_size = (nt*output_pixels_per_lenslet, ns*output_pixels_per_lenslet)
# Create the output image
output_image = cv2.CreateMat(putative_output_size[0], putative_output_size[1], im.type)
# Apply the transform.
if (direction == 'f' or direction == 'F'):
coeff = cv2.CreateMat(2,3,cv2.CV_32FC1)
coeff[0,0] = self.forwardCoefficients[0,0] * output_pixels_per_lenslet
coeff[0,1] = self.forwardCoefficients[0,1] * output_pixels_per_lenslet
coeff[0,2] = self.forwardCoefficients[0,2]
coeff[1,0] = self.forwardCoefficients[1,0] * output_pixels_per_lenslet
coeff[1,1] = self.forwardCoefficients[1,1] * output_pixels_per_lenslet
coeff[1,2] = self.forwardCoefficients[1,2]
cv2.WarpAffine(im, output_image, coeff,
flags=cv2.CV_INTER_LINEAR+cv2.CV_WARP_FILL_OUTLIERS+cv2.CV_WARP_INVERSE_MAP)
else:
coeff = cv2.CreateMat(2,3,cv2.cv2_32FC1)
coeff[0,0] = self.reverseCoefficients[0,0] / output_pixels_per_lenslet
coeff[0,1] = self.reverseCoefficients[0,1] / output_pixels_per_lenslet
coeff[0,2] = self.reverseCoefficients[0,2]
coeff[1,0] = self.reverseCoefficients[1,0] / output_pixels_per_lenslet
coeff[1,1] = self.reverseCoefficients[1,1] / output_pixels_per_lenslet
coeff[1,2] = self.reverseCoefficients[1,2]
cv2.WarpAffine(im, output_image, coeff,
flags=cv2.CV_INTER_LINEAR+cv2.CV_WARP_FILL_OUTLIERS+cv2.CV_WARP_INVERSE_MAP)
result = np.asarray(output_image)
# Don't crop left of lenslets (0, y) or above (x, 0)
leftbound = max(leftbound, 0)
topbound = max(topbound,0)
if cropToInside:
return result[topbound * output_pixels_per_lenslet:bottombound * output_pixels_per_lenslet,
leftbound * output_pixels_per_lenslet:rightbound * output_pixels_per_lenslet]
else:
return result
# ------------------------------------------------------------------
# AFFINE WARP
#
# the model has 6 DOF capable of encoding rotation, anisotropic
# scaling, shearing, and translation.
#
# x' = a + cx + ey
# y' = b + dx + fy
#
# ------------------------------------------------------------------
class AffineWarp(object):
def __init__(self):
self.forwardCoefficients = np.zeros((2,3), dtype='float32')
self.reverseCoefficients = np.zeros((2,3), dtype='float32')
self.forwardCoefficients[0,1] = 1.0
self.forwardCoefficients[1,2] = 1.0
self.reverseCoefficients[0,1] = 1.0
self.reverseCoefficients[1,2] = 1.0
def eval(self, data, direction='r'):
x = data[:,0]
y = data[:,1]
composite = np.vstack( (np.ones(x.shape), x, y) )
if (direction == 'R' or direction == 'r'):
return np.dot(composite.transpose(), self.reverseCoefficients.transpose())
elif (direction == 'F' or direction == 'f'):
return np.dot(composite.transpose(), self.forwardCoefficients.transpose())
else:
raise ValueError("Unknown direction in AffineWarp.eval()")
def eval_point(self, data, direction='r'):
x = np.array([[data[0]]], dtype='float32')
y = np.array([[data[1]]], dtype='float32')
composite = np.vstack( (np.ones(x.shape), x, y) )
if (direction == 'R' or direction == 'r'):
return np.array(np.dot(composite.transpose(), self.reverseCoefficients.transpose()))[0]
elif (direction == 'F' or direction == 'f'):
return np.array(np.dot(composite.transpose(), self.forwardCoefficients.transpose()))[0]
else:
raise ValueError("Unknown direction in AffineWarp.eval()")
def fit(self, data):
datalen = data.shape[0]
A = np.zeros( (datalen,3) )
bx = np.zeros( (datalen) )
by = np.zeros( (datalen) )
Ar = np.zeros( (datalen,3) )
bxr = np.zeros( (datalen) )
byr = np.zeros( (datalen) )
for i in range(datalen):
px = float(data[i,0])
py = float(data[i,1])
lx = float(data[i,2])
ly = float(data[i,3])
A[i,0] = 1.0; Ar[i,0] = 1.0;
A[i,1] = px; Ar[i,1] = lx;
A[i,2] = py; Ar[i,2] = ly;
bx[i] = lx; bxr[i] = px
by[i] = ly; byr[i] = py
self.forwardCoefficients = np.zeros((2,3), dtype='float32')
self.reverseCoefficients = np.zeros((2,3), dtype='float32')
import numpy.linalg as linalg
self.forwardCoefficients[0,:] = linalg.lstsq(A,bx)[0]
self.forwardCoefficients[1,:] = linalg.lstsq(A,by)[0]
self.reverseCoefficients[0,:] = linalg.lstsq(Ar,bxr)[0]
self.reverseCoefficients[1,:] = linalg.lstsq(Ar,byr)[0]
def get_error(self, data, direction='f'):
x = data[:,0]
y = data[:,1]
composite = np.vstack( (np.ones(x.shape), x, y ) )
if (direction == 'R' or direction == 'r'):
B_fit = np.dot(composite.transpose(), self.reverseCoefficients.transpose())
elif (direction == 'F' or direction == 'f'):
B_fit = np.dot(composite.transpose(), self.forwardCoefficients.transpose())
else:
raise ValueError("Unkown direction in AffineWarp.get_error()")
B = data[:,2:4]
err_per_point = np.sqrt(np.sum((B-B_fit)**2,axis=1)) # sum squared error per row
return err_per_point
def forwardCoefficients(self):
return self.forwardCoefficients
def reverseCoefficients(self):
return self.reverseCoefficients
def forward(self, coords):
return np.dot(self.forwardCoefficients, np.array([1, coords[0], coords[1]]))
def reverse(self, coords):
return np.dot(self.reverseCoefficients, np.array([1, coords[0], coords[1]]))
def save(self, filename):
a = np.vstack((self.forwardCoefficients, self.reverseCoefficients))
np.savetxt(filename, a, fmt="%12.6G")
def load(self, filename):
a = np.loadtxt(filename)
self.forwardCoefficients = a[0:2,:].astype(np.float32)
self.reverseCoefficients = a[2:4,:].astype(np.float32)
# OUTPUT AND DIGANOSTIC ROUTINES
def __str__(self):
s = "\t\tTranslation: ["
tmp = self.reverseCoefficients[:,0];
for t in tmp:
s+= ('%0.2f, ' % t)
s+= "]\n\t\tScaling and rotation: ["
s+= str(self.reverseCoefficients[0,1]) + ' ' + str(self.reverseCoefficients[0,2]) + '; '
s+= str(self.reverseCoefficients[1,1]) + ' ' + str(self.reverseCoefficients[1,2]) + ']'
return s
def check_fit(self, data):
projected_camlenses = self.eval(data[:,0:2], 'f')
diff = data[:,2:4] - projected_camlenses
err = np.sqrt(np.power(diff[:,0],2) + np.power(diff[:,1],2))
print(('\t--> Fit Statistics - Mean: %0.2f Median: %0.2f Min: %0.2f Max: %0.2f (units: normalized lenslets)' %
(err.mean(), np.median(err, axis=0), err.min(), err.max() )))
# Warp an image using this affine transform.
#
# This function expects input_image to be a numpy float32 array.
#
# the cropToInside option can be used to crop the output image to
# the inside of the valid lenslets. Note that cropping to the
# inside will (currently) shift the image output relative to the
# calibration | |
the app having
special entitlements with an explicit
application-identifier. Currently supports
testing aps-environment entitlement.
"""
tests_zip = proto.Field(
proto.MESSAGE,
number=1,
message='FileReference',
)
xctestrun = proto.Field(
proto.MESSAGE,
number=2,
message='FileReference',
)
xcode_version = proto.Field(
proto.STRING,
number=3,
)
app_bundle_id = proto.Field(
proto.STRING,
number=4,
)
test_special_entitlements = proto.Field(
proto.BOOL,
number=6,
)
class IosTestLoop(proto.Message):
r"""A test of an iOS application that implements one or more game
loop scenarios. This test type accepts an archived application
(.ipa file) and a list of integer scenarios that will be
executed on the app sequentially.
Attributes:
app_ipa (google.devtools.testing_v1.types.FileReference):
Required. The .ipa of the application to
test.
scenarios (Sequence[int]):
The list of scenarios that should be run
during the test. Defaults to the single scenario
0 if unspecified.
app_bundle_id (str):
Output only. The bundle id for the
application under test.
"""
app_ipa = proto.Field(
proto.MESSAGE,
number=1,
message='FileReference',
)
scenarios = proto.RepeatedField(
proto.INT32,
number=2,
)
app_bundle_id = proto.Field(
proto.STRING,
number=3,
)
class AndroidInstrumentationTest(proto.Message):
r"""A test of an Android application that can control an Android
component independently of its normal lifecycle. Android
instrumentation tests run an application APK and test APK inside the
same process on a virtual or physical AndroidDevice. They also
specify a test runner class, such as com.google.GoogleTestRunner,
which can vary on the specific instrumentation framework chosen.
See http://developer.android.com/tools/testing/testing_android.html
for more information on types of Android tests.
Attributes:
app_apk (google.devtools.testing_v1.types.FileReference):
The APK for the application under test.
app_bundle (google.devtools.testing_v1.types.AppBundle):
A multi-apk app bundle for the application
under test.
test_apk (google.devtools.testing_v1.types.FileReference):
Required. The APK containing the test code to
be executed.
app_package_id (str):
The java package for the application under
test. The default value is determined by
examining the application's manifest.
test_package_id (str):
The java package for the test to be executed.
The default value is determined by examining the
application's manifest.
test_runner_class (str):
The InstrumentationTestRunner class.
The default value is determined by examining the
application's manifest.
test_targets (Sequence[str]):
Each target must be fully qualified with the package name or
class name, in one of these formats:
- "package package_name"
- "class package_name.class_name"
- "class package_name.class_name#method_name"
If empty, all targets in the module will be run.
orchestrator_option (google.devtools.testing_v1.types.OrchestratorOption):
The option of whether running each test within its own
invocation of instrumentation with Android Test Orchestrator
or not. \*\* Orchestrator is only compatible with
AndroidJUnitRunner version 1.0 or higher! \*\* Orchestrator
offers the following benefits:
- No shared state
- Crashes are isolated
- Logs are scoped per test
See
https://developer.android.com/training/testing/junit-runner.html#using-android-test-orchestrator
for more information about Android Test Orchestrator.
If not set, the test will be run without the orchestrator.
sharding_option (google.devtools.testing_v1.types.ShardingOption):
The option to run tests in multiple shards in
parallel.
"""
app_apk = proto.Field(
proto.MESSAGE,
number=1,
oneof='app_under_test',
message='FileReference',
)
app_bundle = proto.Field(
proto.MESSAGE,
number=8,
oneof='app_under_test',
message='AppBundle',
)
test_apk = proto.Field(
proto.MESSAGE,
number=2,
message='FileReference',
)
app_package_id = proto.Field(
proto.STRING,
number=3,
)
test_package_id = proto.Field(
proto.STRING,
number=4,
)
test_runner_class = proto.Field(
proto.STRING,
number=5,
)
test_targets = proto.RepeatedField(
proto.STRING,
number=6,
)
orchestrator_option = proto.Field(
proto.ENUM,
number=7,
enum='OrchestratorOption',
)
sharding_option = proto.Field(
proto.MESSAGE,
number=9,
message='ShardingOption',
)
class AndroidRoboTest(proto.Message):
r"""A test of an android application that explores the
application on a virtual or physical Android Device, finding
culprits and crashes as it goes. Next tag: 30
Attributes:
app_apk (google.devtools.testing_v1.types.FileReference):
The APK for the application under test.
app_bundle (google.devtools.testing_v1.types.AppBundle):
A multi-apk app bundle for the application
under test.
app_package_id (str):
The java package for the application under
test. The default value is determined by
examining the application's manifest.
app_initial_activity (str):
The initial activity that should be used to
start the app.
max_depth (int):
The max depth of the traversal stack Robo can
explore. Needs to be at least 2 to make Robo
explore the app beyond the first activity.
Default is 50.
max_steps (int):
The max number of steps Robo can execute.
Default is no limit.
robo_directives (Sequence[google.devtools.testing_v1.types.RoboDirective]):
A set of directives Robo should apply during
the crawl. This allows users to customize the
crawl. For example, the username and password
for a test account can be provided.
robo_script (google.devtools.testing_v1.types.FileReference):
A JSON file with a sequence of actions Robo
should perform as a prologue for the crawl.
starting_intents (Sequence[google.devtools.testing_v1.types.RoboStartingIntent]):
The intents used to launch the app for the
crawl. If none are provided, then the main
launcher activity is launched. If some are
provided, then only those provided are launched
(the main launcher activity must be provided
explicitly).
"""
app_apk = proto.Field(
proto.MESSAGE,
number=1,
oneof='app_under_test',
message='FileReference',
)
app_bundle = proto.Field(
proto.MESSAGE,
number=16,
oneof='app_under_test',
message='AppBundle',
)
app_package_id = proto.Field(
proto.STRING,
number=2,
)
app_initial_activity = proto.Field(
proto.STRING,
number=3,
)
max_depth = proto.Field(
proto.INT32,
number=7,
)
max_steps = proto.Field(
proto.INT32,
number=8,
)
robo_directives = proto.RepeatedField(
proto.MESSAGE,
number=11,
message='RoboDirective',
)
robo_script = proto.Field(
proto.MESSAGE,
number=13,
message='FileReference',
)
starting_intents = proto.RepeatedField(
proto.MESSAGE,
number=15,
message='RoboStartingIntent',
)
class RoboDirective(proto.Message):
r"""Directs Robo to interact with a specific UI element if it is
encountered during the crawl. Currently, Robo can perform text
entry or element click.
Attributes:
resource_name (str):
Required. The android resource name of the
target UI element. For example,
in Java: R.string.foo
in xml: @string/foo
Only the "foo" part is needed.
Reference doc:
https://developer.android.com/guide/topics/resources/accessing-
resources.html
input_text (str):
The text that Robo is directed to set. If left empty, the
directive will be treated as a CLICK on the element matching
the resource_name.
action_type (google.devtools.testing_v1.types.RoboActionType):
Required. The type of action that Robo should
perform on the specified element.
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
input_text = proto.Field(
proto.STRING,
number=2,
)
action_type = proto.Field(
proto.ENUM,
number=3,
enum='RoboActionType',
)
class RoboStartingIntent(proto.Message):
r"""Message for specifying the start activities to crawl.
Attributes:
launcher_activity (google.devtools.testing_v1.types.LauncherActivityIntent):
An intent that starts the main launcher
activity.
start_activity (google.devtools.testing_v1.types.StartActivityIntent):
An intent that starts an activity with
specific details.
timeout (google.protobuf.duration_pb2.Duration):
Timeout in seconds for each intent.
"""
launcher_activity = proto.Field(
proto.MESSAGE,
number=1,
oneof='starting_intent',
message='LauncherActivityIntent',
)
start_activity = proto.Field(
proto.MESSAGE,
number=2,
oneof='starting_intent',
message='StartActivityIntent',
)
timeout = proto.Field(
proto.MESSAGE,
number=3,
message=duration_pb2.Duration,
)
class LauncherActivityIntent(proto.Message):
r"""Specifies an intent that starts the main launcher activity.
"""
class StartActivityIntent(proto.Message):
r"""A starting intent specified by an action, uri, and
categories.
Attributes:
action (str):
Action name. Required for START_ACTIVITY.
uri (str):
URI for the action.
categories (Sequence[str]):
Intent categories to set on the intent.
"""
action = proto.Field(
proto.STRING,
number=2,
)
uri = proto.Field(
proto.STRING,
number=3,
)
categories = proto.RepeatedField(
proto.STRING,
number=4,
)
class EnvironmentMatrix(proto.Message):
r"""The matrix of environments in which the test is to be
executed.
Attributes:
android_matrix (google.devtools.testing_v1.types.AndroidMatrix):
A matrix of Android devices.
android_device_list (google.devtools.testing_v1.types.AndroidDeviceList):
A list of Android devices; the test will be
run only on the specified devices.
ios_device_list (google.devtools.testing_v1.types.IosDeviceList):
A list of iOS devices.
"""
android_matrix = proto.Field(
proto.MESSAGE,
number=1,
oneof='environment_matrix',
message='AndroidMatrix',
)
android_device_list = proto.Field(
proto.MESSAGE,
number=2,
oneof='environment_matrix',
message='AndroidDeviceList',
)
ios_device_list = proto.Field(
proto.MESSAGE,
number=3,
oneof='environment_matrix',
message='IosDeviceList',
)
class AndroidDeviceList(proto.Message):
r"""A list of Android device configurations in which the test is
to be executed.
Attributes:
android_devices (Sequence[google.devtools.testing_v1.types.AndroidDevice]):
Required. A list of Android devices.
"""
android_devices = proto.RepeatedField(
proto.MESSAGE,
number=1,
message='AndroidDevice',
)
class IosDeviceList(proto.Message):
r"""A list of iOS device configurations in which the test is to
be executed.
Attributes:
ios_devices (Sequence[google.devtools.testing_v1.types.IosDevice]):
Required. A list of iOS devices.
"""
ios_devices = proto.RepeatedField(
proto.MESSAGE,
number=1,
message='IosDevice',
)
class AndroidMatrix(proto.Message):
r"""A set of Android device configuration permutations is defined
by the the cross-product of the given axes. Internally, the
given AndroidMatrix will be expanded into a set of
AndroidDevices.
Only supported permutations will be instantiated. Invalid
permutations (e.g., incompatible models/versions) are ignored.
Attributes:
android_model_ids (Sequence[str]):
Required. The ids of the set of Android
device to be used. Use the
TestEnvironmentDiscoveryService to get supported
options.
android_version_ids (Sequence[str]):
Required. The ids of the set of Android OS
version to be used. Use the
TestEnvironmentDiscoveryService to get supported
options.
locales (Sequence[str]):
Required. The set of locales the test device
will enable for testing. Use the
TestEnvironmentDiscoveryService to get supported
options.
orientations (Sequence[str]):
Required. The set of orientations to test
with. Use the TestEnvironmentDiscoveryService to
get supported options.
"""
android_model_ids = proto.RepeatedField(
| |
<reponame>someengineering/resoto<gh_stars>100-1000
from __future__ import annotations
import hashlib
import json
import logging
import re
from collections import namedtuple, defaultdict
from functools import reduce
from typing import Optional, Generator, Any, Dict, List, Set, Tuple
from networkx import DiGraph, MultiDiGraph, all_shortest_paths, is_directed_acyclic_graph
from resotocore.model.model import Model, Kind, AnyKind, ComplexKind, ArrayKind, DateTimeKind, DictionaryKind
from resotocore.model.resolve_in_graph import GraphResolver, NodePath, ResolveProp
from resotocore.types import Json
from resotocore.util import utc, utc_str, value_in_path, set_value_in_path, value_in_path_get
log = logging.getLogger(__name__)
# This version is used when the content hash of a node is computed.
# All computed hashes will be invalidated, by incrementing the version.
# This can be used, if computed values should be recomputed for all imported data.
ContentHashVersion = 3
class Section:
# The reported section contains the data gathered by the collector.
# This data is usually not changed by the user directly, but implicitly via changes on the
# infrastructure, so the next collect run will change this state.
reported = "reported"
# This section holds changes that should be reflected by the given node.
# The desired section can be queried the same way as the reported section
# and allows to query commands of the graph with a common desired state.
# For example the clean flag is manifested in the desired section.
# The separate clean step would query all nodes that should be cleaned
# and can compute the correct order of action by walking the graph structure.
desired = "desired"
# This section holds information about this node that are gathered during the import process.
# Example: This section resolves common graph attributes like cloud, account, region, zone to make
# querying the graph easy.
metadata = "metadata"
# Following sections are used to lookup special kinds in the graph hierarchy to simplify access.
# See GraphResolver for details.
# All resolved ancestors are written to this section.
ancestors = "ancestors"
# Resolved descendants would be written to this section.
# Only here for completeness - currently not used.
descendants = "descendants"
# The set of all content sections
content_ordered = [reported, desired, metadata]
content = set(content_ordered)
# The list of all lookup sections
lookup_sections_ordered = [ancestors, descendants]
# The list of all sections
all_ordered = [*content_ordered, *lookup_sections_ordered]
all = set(all_ordered)
# remove the section plus dot if it exists in the string: reported.foo => foo
__no_section = re.compile("^(" + "|".join(f"({s})" for s in content_ordered) + ")[.]")
@classmethod
def without_section(cls, path: str) -> str:
return cls.__no_section.sub("", path, 1)
class EdgeType:
# This edge type defines the default relationship between resources.
# It is the main edge type and is assumed, if no edge type is given.
# The related graph is also used as source of truth for graph updates.
default: str = "default"
# This edge type defines the order of delete operations.
# A resource can be deleted, if all outgoing resources are deleted.
delete: str = "delete"
# The set of all allowed edge types.
# Note: the database schema has to be adapted to support additional edge types.
all: Set[str] = {default, delete}
class Direction:
# Opposite direction as the edge direction.
inbound = "in"
# Same direction as the edge direction
outbound = "out"
# Ignore the direction of the edge and traverse in any direction.
any = "any"
# The set of all allowed directions.
all: Set[str] = {inbound, outbound, any}
EdgeKey = namedtuple("EdgeKey", ["from_node", "to_node", "edge_type"])
class GraphBuilder:
def __init__(self, model: Model):
self.model = model
self.graph = MultiDiGraph()
self.nodes = 0
self.edges = 0
def add_from_json(self, js: Json) -> None:
if "id" in js and Section.reported in js:
self.add_node(
js["id"],
js[Section.reported],
js.get(Section.desired, None),
js.get(Section.metadata, None),
js.get("search", None),
js.get("replace", False) is True,
)
elif "from" in js and "to" in js:
self.add_edge(js["from"], js["to"], js.get("edge_type", EdgeType.default))
else:
raise AttributeError(f"Format not understood! Got {json.dumps(js)} which is neither vertex nor edge.")
def add_node(
self,
node_id: str,
reported: Json,
desired: Optional[Json] = None,
metadata: Optional[Json] = None,
search: Optional[str] = None,
replace: bool = False,
) -> None:
self.nodes += 1
# validate kind of this reported json
coerced = self.model.check_valid(reported)
reported = reported if coerced is None else coerced
kind = self.model[reported]
# create content hash
sha = GraphBuilder.content_hash(reported, desired, metadata)
# flat all properties into a single string for search
flat = search if isinstance(search, str) else (GraphBuilder.flatten(reported, kind))
self.graph.add_node(
node_id,
id=node_id,
reported=reported,
desired=desired,
metadata=metadata,
hash=sha,
kind=kind,
kinds=list(kind.kind_hierarchy()),
kinds_set=kind.kind_hierarchy(),
flat=flat,
replace=replace | metadata.get("replace", False) is True if metadata else False,
)
def add_edge(self, from_node: str, to_node: str, edge_type: str) -> None:
self.edges += 1
key = GraphAccess.edge_key(from_node, to_node, edge_type)
self.graph.add_edge(from_node, to_node, key, edge_type=edge_type)
@staticmethod
def content_hash(js: Json, desired: Optional[Json] = None, metadata: Optional[Json] = None) -> str:
sha256 = hashlib.sha256()
# all content hashes will be different, when the version changes
sha256.update(ContentHashVersion.to_bytes(2, "big"))
sha256.update(json.dumps(js, sort_keys=True).encode("utf-8"))
if desired:
sha256.update(json.dumps(desired, sort_keys=True).encode("utf-8"))
if metadata:
sha256.update(json.dumps(metadata, sort_keys=True).encode("utf-8"))
return sha256.hexdigest()
@staticmethod
def flatten(js: Json, kind: Kind) -> str:
result = ""
def dispatch(value: Any, k: Kind) -> None:
nonlocal result
if isinstance(value, dict):
for prop, elem in value.items():
sub = (
k.property_kind_of(prop, AnyKind())
if isinstance(k, ComplexKind)
else (k.value_kind if isinstance(k, DictionaryKind) else AnyKind())
)
dispatch(elem, sub)
elif isinstance(value, list):
sub = k.inner if isinstance(k, ArrayKind) else AnyKind()
for elem in value:
dispatch(elem, sub)
elif value is None or isinstance(value, bool):
pass
else:
# in case of date time: "2017-05-30T22:04:34Z" -> "2017-05-30 22:04:34"
if isinstance(k, DateTimeKind):
value = re.sub("[ZT]", " ", value)
if result:
result += " "
result += str(value).strip()
dispatch(js, kind)
return result
def check_complete(self) -> None:
# check that all vertices are given, that were defined in any edge definition
# note: DiGraph will create an empty vertex node automatically
for node_id, node in self.graph.nodes(data=True):
assert node.get(Section.reported), f"{node_id} was used in an edge definition but not provided as vertex!"
edge_types = {edge[2] for edge in self.graph.edges(data="edge_type")}
al = EdgeType.all
assert not edge_types.difference(al), f"Graph contains unknown edge types! Given: {edge_types}. Known: {al}"
# make sure there is only one root node
rid = GraphAccess.root_id(self.graph)
root_node = self.graph.nodes[rid]
# make sure the root
if value_in_path(root_node, NodePath.reported_kind) == "graph_root" and rid != "root":
# remove node with wrong id +
root_node = self.graph.nodes[rid]
root_node["id"] = "root"
self.graph.add_node("root", **root_node)
for succ in list(self.graph.successors(rid)):
for edge_type in EdgeType.all:
key = GraphAccess.edge_key(rid, succ, edge_type)
if self.graph.has_edge(rid, succ, key):
self.graph.remove_edge(rid, succ, key)
self.add_edge("root", succ, edge_type)
self.graph.remove_node(rid)
NodeData = Tuple[str, Json, Optional[Json], Optional[Json], Optional[Json], str, List[str], str]
class GraphAccess:
def __init__(
self,
sub: MultiDiGraph,
maybe_root_id: Optional[str] = None,
visited_nodes: Optional[Set[Any]] = None,
visited_edges: Optional[Set[EdgeKey]] = None,
):
super().__init__()
self.g = sub
self.nodes = sub.nodes()
self.visited_nodes: Set[object] = visited_nodes if visited_nodes else set()
self.visited_edges: Set[EdgeKey] = visited_edges if visited_edges else set()
self.at = utc()
self.at_json = utc_str(self.at)
self.maybe_root_id = maybe_root_id
self.resolved = False
def root(self) -> str:
return self.maybe_root_id if self.maybe_root_id else GraphAccess.root_id(self.g)
def node(self, node_id: str) -> Optional[Json]:
self.visited_nodes.add(node_id)
if self.g.has_node(node_id):
n = self.nodes[node_id]
return self.dump(node_id, n)
else:
return None
def has_edge(self, from_id: object, to_id: object, edge_type: str) -> bool:
key = self.edge_key(from_id, to_id, edge_type)
result: bool = self.g.has_edge(from_id, to_id, key)
if result:
self.visited_edges.add(key)
return result
def resolve(self) -> None:
if not self.resolved:
self.resolved = True
log.info("Resolve attributes in graph")
for node_id in self.nodes:
self.__resolve(node_id, self.nodes[node_id])
self.__resolve_count_descendants()
log.info("Resolve attributes finished.")
def __resolve_count_descendants(self) -> None:
visited: Set[str] = set()
def count_successors_by(node_id: str, edge_type: str, path: List[str]) -> Dict[str, int]:
result: Dict[str, int] = {}
to_visit = list(self.successors(node_id, edge_type))
while to_visit:
visit_next: List[str] = []
for elem_id in to_visit:
if elem_id not in visited:
visited.add(elem_id)
elem = self.nodes[elem_id]
if not value_in_path_get(elem, NodePath.is_phantom, False):
extracted = value_in_path(elem, path)
if isinstance(extracted, str):
result[extracted] = result.get(extracted, 0) + 1
# check if there is already a successor summary: stop the traversal and take the result.
existing = value_in_path(elem, NodePath.descendant_summary)
if existing and isinstance(existing, dict):
for summary_item, count in existing.items():
result[summary_item] = result.get(summary_item, 0) + count
else:
visit_next.extend(a for a in self.successors(elem_id, edge_type) if a not in visited)
to_visit = visit_next
return result
for on_kind, prop in GraphResolver.count_successors.items():
| |
elif nodeName_ == 'CutOffTime':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'CutOffTime')
value_ = self.gds_validate_string(value_, node, 'CutOffTime')
self.CutOffTime = value_
self.CutOffTime_nsprefix_ = child_.prefix
elif nodeName_ == 'CutOffWindow' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'CutOffWindow')
ival_ = self.gds_validate_integer(ival_, node, 'CutOffWindow')
self.CutOffWindow = ival_
self.CutOffWindow_nsprefix_ = child_.prefix
elif nodeName_ == 'BulkMaxWeight' and child_.text:
sval_ = child_.text
fval_ = self.gds_parse_decimal(sval_, node, 'BulkMaxWeight')
fval_ = self.gds_validate_decimal(fval_, node, 'BulkMaxWeight')
self.BulkMaxWeight = fval_
self.BulkMaxWeight_nsprefix_ = child_.prefix
elif nodeName_ == 'BulkMaxPackages' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'BulkMaxPackages')
ival_ = self.gds_validate_integer(ival_, node, 'BulkMaxPackages')
self.BulkMaxPackages = ival_
self.BulkMaxPackages_nsprefix_ = child_.prefix
super(ValidatePickUpResponse, self).buildChildren(child_, node, nodeName_, True)
# end class ValidatePickUpResponse
class GetPickUpHistoryRequest(Request):
"""GetPickUpHistoryRequest"""
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = Request
def __init__(self, PickUpHistorySearchCriteria=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
super(GetPickUpHistoryRequest, self).__init__( **kwargs_)
self.PickUpHistorySearchCriteria = PickUpHistorySearchCriteria
self.PickUpHistorySearchCriteria_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, GetPickUpHistoryRequest)
if subclass is not None:
return subclass(*args_, **kwargs_)
if GetPickUpHistoryRequest.subclass:
return GetPickUpHistoryRequest.subclass(*args_, **kwargs_)
else:
return GetPickUpHistoryRequest(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_PickUpHistorySearchCriteria(self):
return self.PickUpHistorySearchCriteria
def set_PickUpHistorySearchCriteria(self, PickUpHistorySearchCriteria):
self.PickUpHistorySearchCriteria = PickUpHistorySearchCriteria
def hasContent_(self):
if (
self.PickUpHistorySearchCriteria is not None or
super(GetPickUpHistoryRequest, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='GetPickUpHistoryRequest', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('GetPickUpHistoryRequest')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'GetPickUpHistoryRequest':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='GetPickUpHistoryRequest')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='GetPickUpHistoryRequest', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='GetPickUpHistoryRequest'):
super(GetPickUpHistoryRequest, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='GetPickUpHistoryRequest')
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='GetPickUpHistoryRequest', fromsubclass_=False, pretty_print=True):
super(GetPickUpHistoryRequest, self).exportChildren(outfile, level, namespaceprefix_, namespacedef_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.PickUpHistorySearchCriteria is not None:
namespaceprefix_ = self.PickUpHistorySearchCriteria_nsprefix_ + ':' if (UseCapturedNS_ and self.PickUpHistorySearchCriteria_nsprefix_) else ''
self.PickUpHistorySearchCriteria.export(outfile, level, namespaceprefix_, namespacedef_='', name_='PickUpHistorySearchCriteria', pretty_print=pretty_print)
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(GetPickUpHistoryRequest, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'PickUpHistorySearchCriteria':
obj_ = PickUpHistorySearchCriteria.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.PickUpHistorySearchCriteria = obj_
obj_.original_tagname_ = 'PickUpHistorySearchCriteria'
super(GetPickUpHistoryRequest, self).buildChildren(child_, node, nodeName_, True)
# end class GetPickUpHistoryRequest
class PickUpHistorySearchCriteria(GeneratedsSuper):
"""PickUpHistorySearchCriteria"""
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, FromDate=None, ToDate=None, ConfirmationNumber=None, AccountNumber=None, Status=None, MaxNumOfRecords=None, SortColumn=None, SortDirection=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.FromDate = FromDate
self.FromDate_nsprefix_ = None
self.ToDate = ToDate
self.ToDate_nsprefix_ = None
self.ConfirmationNumber = ConfirmationNumber
self.ConfirmationNumber_nsprefix_ = None
self.AccountNumber = AccountNumber
self.AccountNumber_nsprefix_ = None
self.Status = Status
self.Status_nsprefix_ = None
self.MaxNumOfRecords = MaxNumOfRecords
self.MaxNumOfRecords_nsprefix_ = None
self.SortColumn = SortColumn
self.SortColumn_nsprefix_ = None
self.SortDirection = SortDirection
self.SortDirection_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, PickUpHistorySearchCriteria)
if subclass is not None:
return subclass(*args_, **kwargs_)
if PickUpHistorySearchCriteria.subclass:
return PickUpHistorySearchCriteria.subclass(*args_, **kwargs_)
else:
return PickUpHistorySearchCriteria(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_FromDate(self):
return self.FromDate
def set_FromDate(self, FromDate):
self.FromDate = FromDate
def get_ToDate(self):
return self.ToDate
def set_ToDate(self, ToDate):
self.ToDate = ToDate
def get_ConfirmationNumber(self):
return self.ConfirmationNumber
def set_ConfirmationNumber(self, ConfirmationNumber):
self.ConfirmationNumber = ConfirmationNumber
def get_AccountNumber(self):
return self.AccountNumber
def set_AccountNumber(self, AccountNumber):
self.AccountNumber = AccountNumber
def get_Status(self):
return self.Status
def set_Status(self, Status):
self.Status = Status
def get_MaxNumOfRecords(self):
return self.MaxNumOfRecords
def set_MaxNumOfRecords(self, MaxNumOfRecords):
self.MaxNumOfRecords = MaxNumOfRecords
def get_SortColumn(self):
return self.SortColumn
def set_SortColumn(self, SortColumn):
self.SortColumn = SortColumn
def get_SortDirection(self):
return self.SortDirection
def set_SortDirection(self, SortDirection):
self.SortDirection = SortDirection
def hasContent_(self):
if (
self.FromDate is not None or
self.ToDate is not None or
self.ConfirmationNumber is not None or
self.AccountNumber is not None or
self.Status is not None or
self.MaxNumOfRecords is not None or
self.SortColumn is not None or
self.SortDirection is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='PickUpHistorySearchCriteria', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('PickUpHistorySearchCriteria')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'PickUpHistorySearchCriteria':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='PickUpHistorySearchCriteria')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='PickUpHistorySearchCriteria', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='PickUpHistorySearchCriteria'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='PickUpHistorySearchCriteria', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.FromDate is not None:
namespaceprefix_ = self.FromDate_nsprefix_ + ':' if (UseCapturedNS_ and self.FromDate_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sFromDate>%s</%sFromDate>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.FromDate), input_name='FromDate')), namespaceprefix_ , eol_))
if self.ToDate is not None:
namespaceprefix_ = self.ToDate_nsprefix_ + ':' if (UseCapturedNS_ and self.ToDate_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sToDate>%s</%sToDate>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.ToDate), input_name='ToDate')), namespaceprefix_ , eol_))
if self.ConfirmationNumber is not None:
namespaceprefix_ = self.ConfirmationNumber_nsprefix_ + ':' if (UseCapturedNS_ and self.ConfirmationNumber_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sConfirmationNumber>%s</%sConfirmationNumber>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.ConfirmationNumber), input_name='ConfirmationNumber')), namespaceprefix_ , eol_))
if self.AccountNumber is not None:
namespaceprefix_ = self.AccountNumber_nsprefix_ + ':' if (UseCapturedNS_ and self.AccountNumber_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sAccountNumber>%s</%sAccountNumber>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.AccountNumber), input_name='AccountNumber')), namespaceprefix_ , eol_))
if self.Status is not None:
namespaceprefix_ = self.Status_nsprefix_ + ':' if (UseCapturedNS_ and self.Status_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sStatus>%s</%sStatus>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.Status), input_name='Status')), namespaceprefix_ , eol_))
if self.MaxNumOfRecords is not None:
namespaceprefix_ = self.MaxNumOfRecords_nsprefix_ + ':' if (UseCapturedNS_ and self.MaxNumOfRecords_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sMaxNumOfRecords>%s</%sMaxNumOfRecords>%s' % (namespaceprefix_ , self.gds_format_integer(self.MaxNumOfRecords, input_name='MaxNumOfRecords'), namespaceprefix_ , eol_))
if self.SortColumn is not None:
namespaceprefix_ = self.SortColumn_nsprefix_ + ':' if (UseCapturedNS_ and self.SortColumn_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sSortColumn>%s</%sSortColumn>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.SortColumn), input_name='SortColumn')), namespaceprefix_ , eol_))
if self.SortDirection is not None:
namespaceprefix_ = self.SortDirection_nsprefix_ + ':' if (UseCapturedNS_ and self.SortDirection_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sSortDirection>%s</%sSortDirection>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.SortDirection), input_name='SortDirection')), namespaceprefix_ , eol_))
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'FromDate':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'FromDate')
value_ = self.gds_validate_string(value_, node, 'FromDate')
self.FromDate = value_
self.FromDate_nsprefix_ = child_.prefix
elif nodeName_ == 'ToDate':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'ToDate')
value_ = self.gds_validate_string(value_, node, 'ToDate')
self.ToDate = value_
self.ToDate_nsprefix_ = child_.prefix
elif nodeName_ == 'ConfirmationNumber':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'ConfirmationNumber')
value_ = self.gds_validate_string(value_, node, 'ConfirmationNumber')
self.ConfirmationNumber = value_
self.ConfirmationNumber_nsprefix_ = child_.prefix
elif nodeName_ == 'AccountNumber':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'AccountNumber')
value_ = self.gds_validate_string(value_, node, 'AccountNumber')
self.AccountNumber = value_
self.AccountNumber_nsprefix_ = child_.prefix
elif nodeName_ == 'Status':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Status')
value_ = self.gds_validate_string(value_, node, 'Status')
self.Status = value_
self.Status_nsprefix_ = child_.prefix
elif nodeName_ == 'MaxNumOfRecords' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'MaxNumOfRecords')
ival_ = self.gds_validate_integer(ival_, node, 'MaxNumOfRecords')
self.MaxNumOfRecords = ival_
self.MaxNumOfRecords_nsprefix_ = child_.prefix
elif nodeName_ == 'SortColumn':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'SortColumn')
value_ = self.gds_validate_string(value_, node, 'SortColumn')
self.SortColumn = value_
self.SortColumn_nsprefix_ = child_.prefix
elif nodeName_ == 'SortDirection':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'SortDirection')
| |
<gh_stars>0
"""
Utilities
"""
import os, sys, re, signal, subprocess, site
from importlib import import_module
###############################################################################
def expect(condition, error_msg, exc_type=SystemExit, error_prefix="ERROR:"):
###############################################################################
"""
Similar to assert except doesn't generate an ugly stacktrace. Useful for
checking user error, not programming error.
>>> expect(True, "error1")
>>> expect(False, "error2")
Traceback (most recent call last):
...
SystemExit: ERROR: error2
"""
if not condition:
msg = error_prefix + " " + error_msg
raise exc_type(msg)
###############################################################################
def run_cmd(cmd, input_str=None, from_dir=None, verbose=None, dry_run=False,
arg_stdout=subprocess.PIPE, arg_stderr=subprocess.PIPE, env=None, combine_output=False):
###############################################################################
"""
Wrapper around subprocess to make it much more convenient to run shell commands
>>> run_cmd('ls file_i_hope_doesnt_exist')[0] != 0
True
"""
arg_stderr = subprocess.STDOUT if combine_output else arg_stderr
from_dir = str(from_dir) if from_dir else from_dir
if verbose:
print("RUN: {}\nFROM: {}".format(cmd, os.getcwd() if from_dir is None else from_dir))
if dry_run:
return 0, "", ""
if input_str is not None:
stdin = subprocess.PIPE
input_str = input_str.encode('utf-8')
else:
stdin = None
proc = subprocess.Popen(cmd,
shell=True,
stdout=arg_stdout,
stderr=arg_stderr,
stdin=stdin,
cwd=from_dir,
env=env)
output, errput = proc.communicate(input_str)
if output is not None:
try:
output = output.decode('utf-8', errors='ignore')
output = output.strip()
except AttributeError:
pass
if errput is not None:
try:
errput = errput.decode('utf-8', errors='ignore')
errput = errput.strip()
except AttributeError:
pass
stat = proc.wait()
return stat, output, errput
###############################################################################
def run_cmd_no_fail(cmd, input_str=None, from_dir=None, verbose=None, dry_run=False,
arg_stdout=subprocess.PIPE, arg_stderr=subprocess.PIPE, env=None, combine_output=False, exc_type=SystemExit):
###############################################################################
"""
Wrapper around subprocess to make it much more convenient to run shell commands.
Expects command to work. Just returns output string.
>>> run_cmd_no_fail('echo foo') == 'foo'
True
>>> run_cmd_no_fail('echo THE ERROR >&2; false') # doctest:+ELLIPSIS
Traceback (most recent call last):
...
SystemExit: ERROR: Command: 'echo THE ERROR >&2; false' failed with error ...
>>> run_cmd_no_fail('grep foo', input_str='foo') == 'foo'
True
>>> run_cmd_no_fail('echo THE ERROR >&2', combine_output=True) == 'THE ERROR'
True
"""
stat, output, errput = run_cmd(cmd, input_str=input_str, from_dir=from_dir, verbose=verbose, dry_run=dry_run,
arg_stdout=arg_stdout, arg_stderr=arg_stderr, env=env, combine_output=combine_output)
if stat != 0:
# If command produced no errput, put output in the exception since we
# have nothing else to go on.
errput = output if not errput else errput
if errput is None:
errput = ""
expect(False, "Command: '{}' failed with error '{}' from dir '{}'".format(cmd, errput, os.getcwd() if from_dir is None else from_dir), exc_type=exc_type)
return output
###############################################################################
def check_minimum_python_version(major, minor):
###############################################################################
"""
Check your python version.
>>> check_minimum_python_version(sys.version_info[0], sys.version_info[1])
>>>
"""
msg = "Python " + str(major) + ", minor version " + str(minor) + " is required, you have " + str(sys.version_info[0]) + "." + str(sys.version_info[1])
expect(sys.version_info[0] > major or
(sys.version_info[0] == major and sys.version_info[1] >= minor), msg)
###############################################################################
def convert_to_seconds(time_str):
###############################################################################
"""
Convert time value in [[HH:]MM:]SS to seconds
>>> convert_to_seconds("42")
42
>>> convert_to_seconds("01:01:01")
3661
"""
components = time_str.split(":")
expect(len(components) < 4, "Unusual time string: '{}'".format(time_str))
components.reverse()
result = 0
for idx, component in enumerate(components):
result += int(component) * pow(60, idx)
return result
###############################################################################
def convert_to_babylonian_time(seconds):
###############################################################################
"""
Convert time value to seconds to HH:MM:SS
>>> convert_to_babylonian_time(3661)
'01:01:01'
"""
hours = int(seconds / 3600)
seconds %= 3600
minutes = int(seconds / 60)
seconds %= 60
return "{:02d}:{:02d}:{:02d}".format(hours, minutes, seconds)
###############################################################################
def format_time(time_format, input_format, input_time):
###############################################################################
"""
Converts the string input_time from input_format to time_format
Valid format specifiers are "%H", "%M", and "%S"
% signs must be followed by an H, M, or S and then a separator
Separators can be any string without digits or a % sign
Each specifier can occur more than once in the input_format,
but only the first occurence will be used.
An example of a valid format: "%H:%M:%S"
Unlike strptime, this does support %H >= 24
>>> format_time("%H:%M:%S", "%H", "43")
'43:00:00'
>>> format_time("%H %M", "%M,%S", "59,59")
'0 59'
>>> format_time("%H, %S", "%H:%M:%S", "2:43:9")
'2, 09'
"""
input_fields = input_format.split("%")
expect(input_fields[0] == input_time[:len(input_fields[0])],
"Failed to parse the input time; does not match the header string")
input_time = input_time[len(input_fields[0]):]
timespec = {"H": None, "M": None, "S": None}
maxvals = {"M": 60, "S": 60}
DIGIT_CHECK = re.compile('[^0-9]*')
# Loop invariants given input follows the specs:
# field starts with H, M, or S
# input_time starts with a number corresponding with the start of field
for field in input_fields[1:]:
# Find all of the digits at the start of the string
spec = field[0]
value_re = re.match(r'\d*', input_time)
expect(value_re is not None,
"Failed to parse the input time for the '{}' specifier, expected an integer".format(spec))
value = value_re.group(0)
expect(spec in timespec, "Unknown time specifier '" + spec + "'")
# Don't do anything if the time field is already specified
if timespec[spec] is None:
# Verify we aren't exceeding the maximum value
if spec in maxvals:
expect(int(value) < maxvals[spec],
"Failed to parse the '{}' specifier: A value less than {:d} is expected".format(spec, maxvals[spec]))
timespec[spec] = value
input_time = input_time[len(value):]
# Check for the separator string
expect(len(re.match(DIGIT_CHECK, field).group(0)) == len(field),
"Numbers are not permissible in separator strings")
expect(input_time[:len(field) - 1] == field[1:],
"The separator string ({}) doesn't match '{}'".format(field[1:], input_time))
input_time = input_time[len(field) - 1:]
output_fields = time_format.split("%")
output_time = output_fields[0]
# Used when a value isn't given
min_len_spec = {"H": 1, "M": 2, "S": 2}
# Loop invariants given input follows the specs:
# field starts with H, M, or S
# output_time
for field in output_fields[1:]:
expect(field == output_fields[-1] or len(field) > 1,
"Separator strings are required to properly parse times")
spec = field[0]
expect(spec in timespec, "Unknown time specifier '" + spec + "'")
if timespec[spec] is not None:
output_time += "0" * (min_len_spec[spec] - len(timespec[spec]))
output_time += timespec[spec]
else:
output_time += "0" * min_len_spec[spec]
output_time += field[1:]
return output_time
###############################################################################
class SharedArea(object):
###############################################################################
"""
Enable 0002 umask within this manager
"""
def __init__(self, new_perms=0o002):
self._orig_umask = None
self._new_perms = new_perms
def __enter__(self):
self._orig_umask = os.umask(self._new_perms)
def __exit__(self, *_):
os.umask(self._orig_umask)
###############################################################################
class Timeout(object):
###############################################################################
"""
A context manager that implements a timeout. By default, it
will raise exception, but a custon function call can be provided.
Provided None as seconds makes this class a no-op
"""
def __init__(self, seconds, action=None):
self._seconds = seconds
self._action = action if action is not None else self._handle_timeout
def _handle_timeout(self, *_):
raise RuntimeError("Timeout expired")
def __enter__(self):
if self._seconds is not None:
signal.signal(signal.SIGALRM, self._action)
signal.alarm(self._seconds)
def __exit__(self, *_):
if self._seconds is not None:
signal.alarm(0)
###############################################################################
def median(items):
###############################################################################
"""
>>> items = [2.3]
>>> median(items)
2.3
>>> items = [2.3, 8.1, 3.4, 1.5, 11, 3.42321]
>>> median(items)
3.4116049999999998
>>> items = [2.3, 8.1, 3.4, 1.5, 11, 3.42321, -3.1]
>>> median(items)
3.4
"""
if not items:
return None
else:
quotient, remainder = divmod(len(items), 2)
return sorted(items)[quotient] if remainder else sum(sorted(items)[quotient - 1:quotient + 1]) / 2.
###############################################################################
def ensure_pip():
###############################################################################
"""
Ensures that pip is available. Notice that we cannot use the _ensure_pylib_impl
function below, since it would cause circular dependencies. This one has to
be done by hand.
"""
# Use ensurepip for installing pip
import ensurepip
ensurepip.bootstrap(user=True)
# needed to "rehash" available libs
site.main() # pylint: disable=no-member
_ = import_module("pip")
###############################################################################
def pip_install_lib(pip_libname):
###############################################################################
"""
Ask pip to install a version of a package which is >= min_version
"""
# Installs will use pip, so we need to ensure it is available
ensure_pip()
# Note: --trusted-host may not work for ancient versions of python
# --upgrade makes sure we get the latest version, even if one is already installed
stat, _, err = run_cmd("{} -m pip install --upgrade {} --trusted-host files.pythonhosted.org --user".format(sys.executable, pip_libname))
expect(stat == 0, "Failed to install {}, cannot continue:\n{}".format(pip_libname, err))
# needed to "rehash" available libs
site.main() # pylint: disable=no-member
###############################################################################
def package_version_ok(pkg, min_version=None):
###############################################################################
"""
Checks that the loaded package's version is >= that the minimum required one.
If no minimum version is passed, then return True
"""
from pkg_resources import parse_version
return True if min_version is None else parse_version(pkg.__version__) >= parse_version(min_version)
###############################################################################
def _ensure_pylib_impl(libname, min_version=None, pip_libname=None):
###############################################################################
"""
Internal method, clients should not call this directly; please use of the
public ensure_XXX methods. If one does not exist, we will need to evaluate
if we want to add a new outside dependency.
"""
install = False
try:
pkg = import_module(libname)
if not package_version_ok(pkg,min_version):
print("Detected version for package {} is too old: detected {}, required >= {}. Will attempt | |
flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(TForestFire self) -> TForestFire
__init__(TForestFire self, PNGraph const & GraphPt, double const & ForwBurnProb, double const & BackBurnProb,
double const & DecayProb=1.0, int const & RndSeed=1) -> TForestFire
Parameters:
GraphPt: PNGraph const &
ForwBurnProb: double const &
BackBurnProb: double const &
DecayProb: double const &
RndSeed: int const &
__init__(TForestFire self, PNGraph const & GraphPt, double const & ForwBurnProb, double const & BackBurnProb,
double const & DecayProb=1.0) -> TForestFire
Parameters:
GraphPt: PNGraph const &
ForwBurnProb: double const &
BackBurnProb: double const &
DecayProb: double const &
__init__(TForestFire self, PNGraph const & GraphPt, double const & ForwBurnProb, double const & BackBurnProb) -> TForestFire
Parameters:
GraphPt: PNGraph const &
ForwBurnProb: double const &
BackBurnProb: double const &
"""
_snap.TForestFire_swiginit(self,_snap.new_TForestFire(*args))
def SetGraph(self, *args):
"""
SetGraph(TForestFire self, PNGraph const & GraphPt)
Parameters:
GraphPt: PNGraph const &
"""
return _snap.TForestFire_SetGraph(self, *args)
def GetGraph(self):
"""
GetGraph(TForestFire self) -> PNGraph
Parameters:
self: TForestFire const *
"""
return _snap.TForestFire_GetGraph(self)
def SetBurnProb(self, *args):
"""
SetBurnProb(TForestFire self, double const & ForwBurnProb, double const & BackBurnProb)
Parameters:
ForwBurnProb: double const &
BackBurnProb: double const &
"""
return _snap.TForestFire_SetBurnProb(self, *args)
def SetProbDecay(self, *args):
"""
SetProbDecay(TForestFire self, double const & DecayProb)
Parameters:
DecayProb: double const &
"""
return _snap.TForestFire_SetProbDecay(self, *args)
def Infect(self, *args):
"""
Infect(TForestFire self, int const & NodeId)
Parameters:
NodeId: int const &
Infect(TForestFire self, TIntV InfectedNIdV)
Parameters:
InfectedNIdV: TIntV const &
"""
return _snap.TForestFire_Infect(self, *args)
def InfectAll(self):
"""
InfectAll(TForestFire self)
Parameters:
self: TForestFire *
"""
return _snap.TForestFire_InfectAll(self)
def InfectRnd(self, *args):
"""
InfectRnd(TForestFire self, int const & NInfect)
Parameters:
NInfect: int const &
"""
return _snap.TForestFire_InfectRnd(self, *args)
def BurnExpFire(self):
"""
BurnExpFire(TForestFire self)
Parameters:
self: TForestFire *
"""
return _snap.TForestFire_BurnExpFire(self)
def BurnGeoFire(self):
"""
BurnGeoFire(TForestFire self)
Parameters:
self: TForestFire *
"""
return _snap.TForestFire_BurnGeoFire(self)
def GetFireTm(self):
"""
GetFireTm(TForestFire self) -> int
Parameters:
self: TForestFire const *
"""
return _snap.TForestFire_GetFireTm(self)
def GetBurned(self):
"""
GetBurned(TForestFire self) -> int
Parameters:
self: TForestFire const *
"""
return _snap.TForestFire_GetBurned(self)
def GetBurnedNId(self, *args):
"""
GetBurnedNId(TForestFire self, int const & NIdN) -> int
Parameters:
NIdN: int const &
"""
return _snap.TForestFire_GetBurnedNId(self, *args)
def GetBurnedNIdV(self, *args):
"""
GetBurnedNIdV(TForestFire self) -> TIntV
GetBurnedNIdV(TForestFire self, TIntV NIdV)
Parameters:
NIdV: TIntV &
"""
return _snap.TForestFire_GetBurnedNIdV(self, *args)
def PlotFire(self, *args):
"""
PlotFire(TForestFire self, TStr FNmPref, TStr Desc, bool const & PlotAllBurned=False)
Parameters:
FNmPref: TStr const &
Desc: TStr const &
PlotAllBurned: bool const &
PlotFire(TForestFire self, TStr FNmPref, TStr Desc)
Parameters:
FNmPref: TStr const &
Desc: TStr const &
"""
return _snap.TForestFire_PlotFire(self, *args)
def GenGraph(*args):
"""
GenGraph(int const & Nodes, double const & FwdProb, double const & BckProb) -> PNGraph
Parameters:
Nodes: int const &
FwdProb: double const &
BckProb: double const &
"""
return _snap.TForestFire_GenGraph(*args)
GenGraph = staticmethod(GenGraph)
__swig_destroy__ = _snap.delete_TForestFire
TForestFire.SetGraph = new_instancemethod(_snap.TForestFire_SetGraph,None,TForestFire)
TForestFire.GetGraph = new_instancemethod(_snap.TForestFire_GetGraph,None,TForestFire)
TForestFire.SetBurnProb = new_instancemethod(_snap.TForestFire_SetBurnProb,None,TForestFire)
TForestFire.SetProbDecay = new_instancemethod(_snap.TForestFire_SetProbDecay,None,TForestFire)
TForestFire.Infect = new_instancemethod(_snap.TForestFire_Infect,None,TForestFire)
TForestFire.InfectAll = new_instancemethod(_snap.TForestFire_InfectAll,None,TForestFire)
TForestFire.InfectRnd = new_instancemethod(_snap.TForestFire_InfectRnd,None,TForestFire)
TForestFire.BurnExpFire = new_instancemethod(_snap.TForestFire_BurnExpFire,None,TForestFire)
TForestFire.BurnGeoFire = new_instancemethod(_snap.TForestFire_BurnGeoFire,None,TForestFire)
TForestFire.GetFireTm = new_instancemethod(_snap.TForestFire_GetFireTm,None,TForestFire)
TForestFire.GetBurned = new_instancemethod(_snap.TForestFire_GetBurned,None,TForestFire)
TForestFire.GetBurnedNId = new_instancemethod(_snap.TForestFire_GetBurnedNId,None,TForestFire)
TForestFire.GetBurnedNIdV = new_instancemethod(_snap.TForestFire_GetBurnedNIdV,None,TForestFire)
TForestFire.PlotFire = new_instancemethod(_snap.TForestFire_PlotFire,None,TForestFire)
TForestFire_swigregister = _snap.TForestFire_swigregister
TForestFire_swigregister(TForestFire)
def TForestFire_GenGraph(*args):
"""
TForestFire_GenGraph(int const & Nodes, double const & FwdProb, double const & BckProb) -> PNGraph
Parameters:
Nodes: int const &
FwdProb: double const &
BckProb: double const &
"""
return _snap.TForestFire_GenGraph(*args)
class TFfGGen(object):
"""Proxy of C++ TFfGGen class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
srUndef = _snap.TFfGGen_srUndef
srOk = _snap.TFfGGen_srOk
srFlood = _snap.TFfGGen_srFlood
srTimeLimit = _snap.TFfGGen_srTimeLimit
TimeLimitSec = _swig_property(_snap.TFfGGen_TimeLimitSec_get, _snap.TFfGGen_TimeLimitSec_set)
def __init__(self, *args):
"""
__init__(TFfGGen self, bool const & BurnExpFireP, int const & StartNNodes, double const & ForwBurnProb,
double const & BackBurnProb, double const & DecayProb, double const & Take2AmbasPrb,
double const & OrphanPrb) -> TFfGGen
Parameters:
BurnExpFireP: bool const &
StartNNodes: int const &
ForwBurnProb: double const &
BackBurnProb: double const &
DecayProb: double const &
Take2AmbasPrb: double const &
OrphanPrb: double const &
"""
_snap.TFfGGen_swiginit(self,_snap.new_TFfGGen(*args))
def GetGraph(self):
"""
GetGraph(TFfGGen self) -> PNGraph
Parameters:
self: TFfGGen const *
"""
return _snap.TFfGGen_GetGraph(self)
def SetGraph(self, *args):
"""
SetGraph(TFfGGen self, PNGraph const & NGraph)
Parameters:
NGraph: PNGraph const &
"""
return _snap.TFfGGen_SetGraph(self, *args)
def Clr(self):
"""
Clr(TFfGGen self)
Parameters:
self: TFfGGen *
"""
return _snap.TFfGGen_Clr(self)
def GetParamStr(self):
"""
GetParamStr(TFfGGen self) -> TStr
Parameters:
self: TFfGGen const *
"""
return _snap.TFfGGen_GetParamStr(self)
def AddNodes(self, *args):
"""
AddNodes(TFfGGen self, int const & GraphNodes, bool const & FloodStop=True) -> TFfGGen::TStopReason
Parameters:
GraphNodes: int const &
FloodStop: bool const &
AddNodes(TFfGGen self, int const & GraphNodes) -> TFfGGen::TStopReason
Parameters:
GraphNodes: int const &
"""
return _snap.TFfGGen_AddNodes(self, *args)
def GenGraph(self, *args):
"""
GenGraph(TFfGGen self, int const & GraphNodes, bool const & FloodStop=True) -> TFfGGen::TStopReason
Parameters:
GraphNodes: int const &
FloodStop: bool const &
GenGraph(TFfGGen self, int const & GraphNodes) -> TFfGGen::TStopReason
Parameters:
GraphNodes: int const &
GenGraph(TFfGGen self, int const & GraphNodes, PGStatVec & EvolStat, bool const & FloodStop=True) -> TFfGGen::TStopReason
Parameters:
GraphNodes: int const &
EvolStat: PGStatVec &
FloodStop: bool const &
GenGraph(TFfGGen self, int const & GraphNodes, PGStatVec & EvolStat) -> TFfGGen::TStopReason
Parameters:
GraphNodes: int const &
EvolStat: PGStatVec &
"""
return _snap.TFfGGen_GenGraph(self, *args)
def PlotFireSize(self, *args):
"""
PlotFireSize(TFfGGen self, TStr FNmPref, TStr DescStr)
Parameters:
FNmPref: TStr const &
DescStr: TStr const &
"""
return _snap.TFfGGen_PlotFireSize(self, *args)
def GenFFGraphs(*args):
"""
GenFFGraphs(double const & FProb, double const & BProb, TStr FNm)
Parameters:
FProb: double const &
BProb: double const &
FNm: TStr const &
"""
return _snap.TFfGGen_GenFFGraphs(*args)
GenFFGraphs = staticmethod(GenFFGraphs)
__swig_destroy__ = _snap.delete_TFfGGen
TFfGGen.GetGraph = new_instancemethod(_snap.TFfGGen_GetGraph,None,TFfGGen)
TFfGGen.SetGraph = new_instancemethod(_snap.TFfGGen_SetGraph,None,TFfGGen)
TFfGGen.Clr = new_instancemethod(_snap.TFfGGen_Clr,None,TFfGGen)
TFfGGen.GetParamStr = new_instancemethod(_snap.TFfGGen_GetParamStr,None,TFfGGen)
TFfGGen.AddNodes = new_instancemethod(_snap.TFfGGen_AddNodes,None,TFfGGen)
TFfGGen.GenGraph = new_instancemethod(_snap.TFfGGen_GenGraph,None,TFfGGen)
TFfGGen.PlotFireSize = new_instancemethod(_snap.TFfGGen_PlotFireSize,None,TFfGGen)
TFfGGen_swigregister = _snap.TFfGGen_swigregister
TFfGGen_swigregister(TFfGGen)
cvar = _snap.cvar
def TFfGGen_GenFFGraphs(*args):
"""
TFfGGen_GenFFGraphs(double const & FProb, double const & BProb, TStr FNm)
Parameters:
FProb: double const &
BProb: double const &
FNm: TStr const &
"""
return _snap.TFfGGen_GenFFGraphs(*args)
class TUndirFFire(object):
"""Proxy of C++ TUndirFFire class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, _BurnProb=0.3):
"""
__init__(TUndirFFire self, double const & _BurnProb=0.3) -> TUndirFFire
Parameters:
_BurnProb: double const &
__init__(TUndirFFire self) -> TUndirFFire
"""
_snap.TUndirFFire_swiginit(self,_snap.new_TUndirFFire(_BurnProb))
def SetGraph(self, *args):
"""
SetGraph(TUndirFFire self, PUNGraph const & GraphPt)
Parameters:
GraphPt: PUNGraph const &
"""
return _snap.TUndirFFire_SetGraph(self, *args)
def GetGraph(self):
"""
GetGraph(TUndirFFire self) -> PUNGraph
Parameters:
self: TUndirFFire const *
"""
return _snap.TUndirFFire_GetGraph(self)
def GetNBurned(self):
"""
GetNBurned(TUndirFFire self) -> int
Parameters:
self: TUndirFFire const *
"""
return _snap.TUndirFFire_GetNBurned(self)
def GetBurnedNId(self, *args):
"""
GetBurnedNId(TUndirFFire self, int const & n) -> int
Parameters:
n: int const &
"""
return _snap.TUndirFFire_GetBurnedNId(self, *args)
def BurnGeoFire(self, *args):
"""
BurnGeoFire(TUndirFFire self, int const & StartNId) -> int
Parameters:
StartNId: int const &
"""
return _snap.TUndirFFire_BurnGeoFire(self, *args)
def AddNodes(self, *args):
"""
AddNodes(TUndirFFire self, int const & GraphNodes, bool const & FloodStop=True) -> TFfGGen::TStopReason
Parameters:
GraphNodes: int const &
FloodStop: bool const &
AddNodes(TUndirFFire self, int const & GraphNodes) -> TFfGGen::TStopReason
Parameters:
GraphNodes: int const &
"""
return _snap.TUndirFFire_AddNodes(self, *args)
__swig_destroy__ = _snap.delete_TUndirFFire
TUndirFFire.SetGraph = new_instancemethod(_snap.TUndirFFire_SetGraph,None,TUndirFFire)
TUndirFFire.GetGraph = new_instancemethod(_snap.TUndirFFire_GetGraph,None,TUndirFFire)
TUndirFFire.GetNBurned = new_instancemethod(_snap.TUndirFFire_GetNBurned,None,TUndirFFire)
TUndirFFire.GetBurnedNId = new_instancemethod(_snap.TUndirFFire_GetBurnedNId,None,TUndirFFire)
TUndirFFire.BurnGeoFire = new_instancemethod(_snap.TUndirFFire_BurnGeoFire,None,TUndirFFire)
TUndirFFire.AddNodes = new_instancemethod(_snap.TUndirFFire_AddNodes,None,TUndirFFire)
TUndirFFire_swigregister = _snap.TUndirFFire_swigregister
TUndirFFire_swigregister(TUndirFFire)
class TCs(object):
"""Proxy of C++ TCs class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(TCs self) -> TCs
__init__(TCs self, TCs Cs) -> TCs
Parameters:
Cs: TCs const &
__init__(TCs self, int const & Int) -> TCs
Parameters:
Int: int const &
"""
_snap.TCs_swiginit(self,_snap.new_TCs(*args))
def __eq__(self, *args):
"""
__eq__(TCs self, TCs Cs) -> bool
Parameters:
Cs: TCs const &
"""
return _snap.TCs___eq__(self, *args)
def __iadd__(self, *args):
"""
__iadd__(TCs self, TCs Cs) -> TCs
Parameters:
Cs: TCs const &
__iadd__(TCs self, char const & Ch) -> TCs
Parameters:
Ch: char const &
__iadd__(TCs self, int const & Int) -> TCs
Parameters:
Int: int const &
"""
return _snap.TCs___iadd__(self, *args)
def Get(self):
"""
Get(TCs self) -> int
Parameters:
self: TCs const *
"""
return _snap.TCs_Get(self)
def GetCsFromBf(*args):
"""
GetCsFromBf(char * Bf, int const & BfL) -> TCs
Parameters:
Bf: char *
BfL: int const &
"""
return _snap.TCs_GetCsFromBf(*args)
GetCsFromBf = staticmethod(GetCsFromBf)
__swig_destroy__ = _snap.delete_TCs
TCs.__eq__ = new_instancemethod(_snap.TCs___eq__,None,TCs)
TCs.__iadd__ = new_instancemethod(_snap.TCs___iadd__,None,TCs)
TCs.Get = new_instancemethod(_snap.TCs_Get,None,TCs)
TCs_swigregister = _snap.TCs_swigregister
TCs_swigregister(TCs)
def TCs_GetCsFromBf(*args):
"""
| |
from __future__ import print_function, division
from cProfile import label
from logging import raiseExceptions
from typing import Mapping, Union, Optional, Callable, Dict
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import os
from tqdm import tqdm, trange
from torchsummary import summary
import utils_alg
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
import platform
import models
import nn_utils
import copy
from timeit import default_timer as timer
from datetime import timedelta
import matplotlib.pyplot as plt
def clean():
plat = platform.system()
if plat == "Windows":
os.system("cls")
else:
os.system("clear")
class FedDevice():
def __init__(self, trainer:Union[nn_utils.Trainer, nn_utils.MFTrainer], state_dict, tag:str, pk:float, mask_weights:torch.tensor=None, nk=1):
self.trainer = trainer
self.state_dict=copy.deepcopy(state_dict)
self.tag = tag # Device label
self.pk = pk # probability to be picked for a training round
self.nk = nk # we can assign here the Device weight (e.g number of examples/tot or some other weighing logic)
self.mask_weights = mask_weights
self.major_class = self.eval_major_class()
def __str__(self):
return f"Device {self.tag} | Rounds Completed: {self.trainer.rounds_completed}"
def round_fit(self, model):
acc, loss = self.trainer.round_fit_from_checkpoint(model, checkpoint=self.state_dict)
self.state_dict = copy.deepcopy(model.state_dict())
return acc, loss
def load_state_dict(self, state_dict):
self.state_dict = copy.deepcopy(state_dict)
def set_mu(self, mu):
self.trainer.mu=mu
def free(self):
self.state_dict=None
def eval_major_class(self):
if self.mask_weights is None or not (self.mask_weights-1).any():
return None # all weights are 1
else:
return torch.argmax(self.mask_weights)
class FedServer():
def __init__(self, model, trainer, tag:str="server", weights_generator:Union[Callable,str]=None):
self.model = model
self.state_dict=model.state_dict()
self.tag = tag
self.trainer = trainer
self.updates_cnt = 0
if weights_generator is None or weights_generator == "average":
self.gen_method = self.dicts_avg
self.weights_generator = "average"
elif weights_generator == "first":
self.gen_method = self.dicts_first
self.weights_generator = "first"
elif weights_generator == "top-k_avg":
self.gen_method = self.dicts_top_k_avg
self.weights_generator = "top-k_avg"
else:
self.weights_generator = "custom"
self.gen_method = weights_generator
def __str__(self):
return f"Device {self.tag} | Rounds Completed: {self.updates}"
def round_fit(self, model):
acc, loss = self.trainer.round_fit_from_checkpoint(model, checkpoint=self.state_dict)
self.state_dict = copy.deepcopy(model.state_dict())
return acc, loss
def update(self, *args):
result = self.gen_method(*args)
self.updates_cnt += 1
if self.weights_generator == "custom":
self.load_state_dict(result)
# Takes the average of the dicts as the new server state dict.
def dicts_avg(self, wk_list):
if wk_list is None or len(wk_list) == 0:
self.model.load_state_dict(self.state_dict)
return None
if len(wk_list) == 1:
self.state_dict = copy.deepcopy(wk_list[0])
self.model.load_state_dict(self.state_dict)
return self.state_dict
# cloning first element in state_dict
self.state_dict = copy.deepcopy(wk_list[0])
for key in wk_list[0]:
tot = wk_list[0][key]
for client_wk in wk_list[1:]:
tot = tot + client_wk[key]
self.state_dict[key] = tot/len(wk_list)
# cloning result in model_dict
self.model.load_state_dict(self.state_dict)
return self.state_dict
# Pick the first of the list as the new server state dict.
# If the list is given already ordered by accuracy/loss or wethever this
# will be like picking the most fitting trained instance.
# ALERT: is not advisible to use this when heterogenious clients data is
# involved or when the single clients trainings differ.
# dicts_avg can be also used in this way by giving a singleton list with
# the maximal state_dict
def dicts_first(self, wk_list):
if wk_list is None or len(wk_list) == 0:
self.model.load_state_dict(self.state_dict)
return None
else:
self.state_dict = copy.deepcopy(wk_list[0])
self.model.load_state_dict(self.state_dict)
return self.state_dict
def dicts_top_k_avg(self, wk_dict, perform, K):
if wk_dict is None or len(wk_dict) == 0:
self.model.load_state_dict(self.state_dict)
return None
elif len(wk_dict) == 1:
self.state_dict = copy.deepcopy(list(wk_dict.values())[0])
self.model.load_state_dict(self.state_dict)
return self.state_dict
else:
K = max(1,K)
top_devs = {k: v for k, v in sorted(perform.items(), key=lambda item: item[1])[::-1]}
top_devs = list(top_devs.keys())[:K]
top_k_weights = [ wk_dict[tag] for tag in top_devs]
self.state_dict = copy.deepcopy(top_k_weights[0])
for key in top_k_weights[0]:
tot = top_k_weights[0][key]
for client_wk in top_k_weights[1:]:
tot = tot + client_wk[key]
self.state_dict[key] = tot/len(top_k_weights) # len(..) can differ by K if top_devs is shorter
self.model.load_state_dict(self.state_dict)
return self.state_dict
def load_state_dict(self, state_dict):
self.model.load_state_dict(state_dict)
self.state_dict = copy.deepcopy(state_dict)
def test(self):
return self.trainer.test(self.model)
def set_mu(self, mu):
self.trainer.mu=mu
def update_weights(devices_list, server_weights):
for dev in devices_list:
dev.load_state_dict(server_weights)
def update_mu(devices_list, mu):
for dev in devices_list:
dev.set_mu(mu)
def free_all(devices_list):
for dev in devices_list:
dev.free()
nn_utils.set_reproducibility()
clean()
##### Dataset
n_channels = 3
input_size_w = 32
input_size_h = 32
input_size = input_size_w*input_size_h
##### Model Hyper params
# Multi Layer Perceptron
# n_hidden = 9
#model = models.MLP(input_size, n_channels, n_hidden, models.CIFAR10_output_size)
# Convolutional Nerual Network
n_features = 12
model = models.CNN(input_size, n_channels, n_features, models.CIFAR10_output_size)
##### Training Hyper params
device = torch.device("cpu") #torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
train_dict = {
"device": device,
"output_dim": models.CIFAR10_output_size, # 10
"epochs": 1,
"batch_size" : 128,
"batch_size_val" : 1000,
"data_transform" : "RGB",
"opt_name" : "Adam",
"lr": 0.003,
"momentum": 0.1,
"scheduler_bool": True,
"gamma": 0.9,
#"perm": nn_utils.permute_pixels,
"mu": 0.005 # if mu=0 => FedAvg
}
model.to(device)
test_trainer = False
if test_trainer:
#trainer = nn_utils.Trainer(model=model, train_dict=train_dict) # Model-based trainer
trainer = nn_utils.MFTrainer(train_dict=train_dict) # Model-free trainer
print(trainer.fit(model))
#### FedAVG/Prox Hyper params
emulated_devices = 200
rounds = 15
train_loner = True
pool = 20 # pool = emulated_devices => FedAvg
p_uniform = pool/emulated_devices # uniform probability to be choosed
adaptive_mu = False
adaptive_phase = 5
mu_inc = 0.1
# Synthetic Data Heterogeneity (alpha = beta = 0 homogeneous case)
# Imbalance follows this power law : clip(exp(vals*-alpha*numb_of_classes)+beta, min=0)
alpha = 0.09 # power factor
beta = 0 #0.2 # constant factor
devices_list = []
w_generators = ["average", "first", "top-k_avg"]
weights_generator = w_generators[0]
pick_top_k = pool # for top-k_avg
fn_list = ["uniform", "normal"]
sample_prob_fn = fn_list[0]
if sample_prob_fn == "uniform":
sample_prob = lambda : np.random.uniform(0,1)
# Note: this is not so usefull in an unifrom device probability scenario
elif sample_prob_fn == "normal":
from scipy.stats import norm
norm_mean = 0.5
sigma = 0.3/emulated_devices
sample_prob = lambda : norm.cdf(np.random.uniform(-4,4)) #np.random.normal(norm_mean, sigma)
# Test Sampling
#utils_alg.test_sampling(pool, emulated_devices, sample_prob)
# Using a single data loaders pair for the homegenous case may improve the
# performances but it could also not be ideal for some specfic models:
# https://stackoverflow.com/questions/60311307/how-does-one-reset-the-dataloader-in-pytorch
train_loader, test_loader, train_dataset, test_dataset = models.get_CIFARloaders(train_dict["batch_size"],train_dict["batch_size_val"],train_dict["data_transform"], ret_datasets=True)
data_loaders = (train_loader, test_loader)
# Server Device Initialization
trainer = nn_utils.MFTrainer(data_loaders=data_loaders, train_dict=train_dict) # this is needed only for the testing phase
server = FedServer(model, trainer, tag="server", weights_generator=weights_generator)
if train_loner:
train_dict_loner = copy.deepcopy(train_dict)
train_dict_loner["mu"] = 0
# Loner Device used for comparison (weights do not update with server)
trainer = nn_utils.MFTrainer(data_loaders=data_loaders, train_dict=train_dict_loner)
loner = FedDevice(trainer=trainer, state_dict=server.state_dict, tag="loner", pk=1)
# Initializating devices to emulate
for i in range(emulated_devices):
# resetting state_dict is not necessary since they are gonna train after a global model update by the server
# nn_utils.reset_model_params(model)
# initial_state_dict = model.state_dict()
# Note: hard_mask=True slows the Decives Initialization but is more realistic (expecially when simulating few devices)
train_loader, mask_weights = utils_alg.SIP(train_dataset, torch.arange(models.CIFAR10_output_size), train_dict["batch_size"], alpha=alpha, beta=beta, hard_mask=True)
data_loaders = (train_loader, data_loaders[1])
trainer = nn_utils.MFTrainer(data_loaders=data_loaders, train_dict=train_dict)
dev = FedDevice(trainer=trainer, state_dict=None, tag=str(i), pk=p_uniform, mask_weights=mask_weights)
devices_list.append(dev)
print(f"Building Federation Clients (devices): {i}/{emulated_devices}", end="\r")
# Test initial accuracy
test_out, test_string = devices_list[0].trainer.test(model)
init_loss = test_out["loss_averager"](None).detach().numpy()
print("\n\n"+test_string)
# Testing FedAvg
seq_runs = 0 # counts the number of sequential model training (counting the loner device also)
start_time = timer() # timer to get the total elapsed time
sampled = [] # store at each round the number of sampled devices (mean should be the pool value)
server_acc = [] # store at each round the server accuracy
mean_client_acc = [] # store at each round the mean of clients' accuracy
server_loss = [] # store at each round the server loss
best_dev = [] # store at each round the client device with best accuracy
tot_masks = torch.zeros(mask_weights.shape) # store the sum of the weights of the different masks
# Initializing accuracy of the untrained model
server_acc.append(test_out["accuracy"])
mean_client_acc.append(test_out["accuracy"])
if train_loner:
loner_loss = [] # store at each round the loner loss
loner_acc = [] # store at each round the loner device accuracy
# Initializing accuracy of the untrained model
loner_acc.append(test_out["accuracy"])
for round in range(1,rounds+1):
round_weights = {}
round_sampled_devices = []
# Sampling phase
for dev in devices_list:
if sample_prob() <= dev.pk:
round_sampled_devices.append(dev)
tot_masks += dev.mask_weights
sampled_len = len(round_sampled_devices)
sampled.append(sampled_len)
update_weights(round_sampled_devices, server.state_dict) # more efficient, we update only this round working devices
print("\n##########################################\n")
sampled_len = len(round_sampled_devices)
print(f"\n\n## Round {round}/{rounds} | Selected: {sampled_len}\n")
# Training
sum_acc = 0
max_acc = 0
bdev = None # best device tag
client_perform = {}
for i, dev in enumerate(round_sampled_devices):
print(f"Training Client {i+1}/{sampled_len}:\n")
acc, _ = dev.round_fit(server.model)
client_perform[dev.tag] = acc
if acc > max_acc:
max_acc = acc
bdev = int(dev.tag)
sum_acc += acc
print(str(dev) + f"/{round} | Accuracy: {acc} % | Major class: {dev.major_class} | Device hash: {nn_utils.state_hash(dev.state_dict)}\n")
# print(f"\nDevice hash: {nn_utils.state_hash(dev.state_dict)}\n")
print("-----------------------------\n")
round_weights[dev.tag] = dev.state_dict
seq_runs += 1
if sampled_len != 0:
mean_acc = sum_acc/sampled_len
best_dev.append(bdev)
else:
if len(mean_client_acc)!=0:
mean_acc = mean_client_acc[-1]
else:
mean_acc = sum_acc
mean_client_acc.append(mean_acc)
if train_loner:
# Training the loner
print(f"Training Loner device:\n")
acc, lon_loss = loner.round_fit(server.model)
loner_acc.append(acc)
loner_loss.append(lon_loss.numpy())
print(str(loner) | |
#!python3
import os
import logging
import pandas as pd
import numpy as np
import json
import time
from BarSeqPy.translate_R_to_pandas import *
"""
All functions and subroutines:
analysis_2:
initialize_gene_fit_d
FitReadMetrics
FitQuality
CrudeOp
AdjacentPairs
paircor
FEBA_Exp_Status
normalize_per_strain_values
StrainClosestGenes
(py_unsplit) from translate_R...
create_strain_lrn
"""
def analysis_2(GeneFitResults, exps_df, all_df, genes_df,
strainsUsed_list, t0tot,
cfg=None,
meta_ix=7, debug=False):
"""
Args:
GeneFitResults:
setnameIndex -> ret_d
ret_d:
gene_fit: DataFrame, contains cols:
locusId <str>: The locusId to which this row is associated.
fit: fitRaw column normalized by Median
fitNaive (float): Median normalized log2 difference between tot0 and tot
fitRaw (float): Sum of weighted adjusted fitness scores divided by total weight.
n (int): Total number of strains in this locusId
nEff (float ): The sum of the strain weights in these indeces/ max weight
pseudovar (float): [ (median(abs(fit1 - fit2))^2)/Constant ] * (sdNaive/(median(sdNaive[genesUsed12])^2))
sd (float): Standard Deviation computed fancy way
sumsq (float): [Sum of the weighted square of the difference between adjusted fitness
and fitRaw] divided by total weight.
sdNaive (float): Standard Deviation computed in Naive way
tot (int ): The sum of the experiment reads over the locusID
tot0 (int): The sum of the Time0s over the locusId
se (float) Standard Error
t: (float) t-statistic
fit1 (float): For every locusId found in genesUsed12, we give the fit value of first_half_df
fit2 (float): For every locusId found in genesUsed12, we give the fit value of second_half_df
fitnorm (float): Scaffold normalized fit scores (median and mode)
fitnorm1 (float): fit1 + fitnorm - fit
fitnorm2 (float): fit2 + fitnorm - fit
tot1 (int or nan): For every locusId found in genesUsed12, we give the tot value of first_half_df
tot0_1 (int or nan): For every locusId found in genesUsed12, we give the tot0 value of first_half_df
tot2 (int or nan): For every locusId found in genesUsed12, we give the tot value of second_half_df
tot0_2 (int or nan): For every locusId found in genesUsed12, we give the tot0 value of second_half_df
strain_fit: pandas Series (float) per-strain fitness (len(all_df))
strain_se: pandas Series (float) (len(all_df))
cfg (python dict):
minT0Strain: int
status_d (python dict):
min_gMed : (float)
max_mad12 : (float)
min_cor12 : (float)
max_gccor : (float)
max_adjcor : (float)
Returns:
gene_fit_d: (python dict)
g (pandas Series (str)): pandas Series of locusIds
lr (float): dataframe with one column per setindexname
lrNaive (float): dataframe with one column per setindexname
lr1 (float): dataframe with one column per setindexname
lr2 (float): dataframe with one column per setindexname
lrn (float): dataframe with one column per setindexname
lrn1 (float): dataframe with one column per setindexname
lrn2 (float): dataframe with one column per setindexname
fitRaw (float): dataframe with one column per setindexname
n (int): dataframe with one column per setindexname
nEff (float): dataframe with one column per setindexname
pseudovar (float): dataframe with one column per setindexname
q (pandas DataFrame): contains columns:
name (str),
short (str),
t0set (str),
num (int),
nMapped (int),
nPastEnd (int),
nGenic (int),
nUsed (int),
gMed (int),
gMedt0 (int),
gMean (float),
cor12 (float),
mad12 (float),
mad12c (float),
mad12c_t0 (float),
opcor (float),
adjcor (float),
gccor (float),
maxFit (float)
u (bool)
sumsq (float): dataframe with one column per setindexname
sd (float): dataframe with one column per setindexname
sdNaive (float): dataframe with one column per setindexname
se (float) Standard Error dataframe with one column per setindexname
t: (float) t-statistic dataframe with one column per setindexname
tot1 (int or nan) dataframe with one column per setindexname
tot0_1 (int or nan) dataframe with one column per setindexname
tot2 (int or nan) dataframe with one column per setindexname
tot0_2 (int or nan) dataframe with one column per setindexname
tot (int or nan) dataframe with one column per setindexname
tot0 (int or nan) dataframe with one column per setindexname
version (str)
Description:
First thing, we create 'gene_fit_d', which is a python dict.
We take the GeneFitResults, a python dict that
has keys being the experiment names and dataframes with
categories (like log ratios) being the values, and we convert it to
a dict with each category being a key, and those point
to dataframes with each column being an experiment name,
and each row being associated with one gene, the genes
being denoted by a pandas Series within the dictionary
under the key 'g'. (This pandas series 'g' contains the
locusIds (strings)).
Another thing we do in this creation of gene_fit_d, is
we replace the key 'fitnorm' with 'lrn' (for log ratios
normalized) and 'fit' with 'lr' (for just log ratios).
Then we initialize the Quality DataFrame.
Out of the rows of exps_df, we only take the experiments
that are found in the columns of the dataframe of normalized
log ratios. Meaning we only take the experiments
that passed the thresholds necessary to have analysis
performed on them. We take these rows from the dataframe
'exps_df' but only the columns "name", "short", and "t0set",
all other columns we ignore. We find these rows through the column
"name".
Next we run FitReadMetrics and FitQuality in order to add
more columns to our Quality DataFrame.
FitReadMetrics:
We take the subset of all_df with the used experiment names
as the columns (all rows included), and we compute the sums
over all rows for each of the experiments for the following:
nMapped (the total number of reads mapped under that column).
nPastEnd (the total number of pastEnd reads under that column).
nGenic (the total number of reads with good gene insertions).
Good gene insertions means inserted between .1 and .9 as 'f'.
Then we create a dataframe whose index is the experiment
names and the columns are 'nMapped', 'nPastEnd' and 'nGenic',
so the number of rows of the dataframe is the same as the
number of good experiments, whereas the number of columns
is fixed to 3.
In FitQuality we create 12 metrics based on correlations
and finding adjacent pairs of genes or genes that are
expected to be in the same operon based on their distance.
First we get the dataframe crudeOpGenes, which is similar to the genes_df dataframe
in terms of its columns, but it is a dataframe with pairs of consecutive
genes from genes_df, with an extra column 'Sep' denoting the distance
between the genes, and a column 'bOp' that says whether this distance
is less than the median distance or not (True if less, False if greater).
We get another similar dataframe called adj which are the adjacent pairs
but without the Sep or bOp values. (Can we not use this dataframe?)
Then we get the matching index between gene_fit_d genes (under the
column name 'g') and the locusIds in genes_df. Using these matching index,
we compute the correlation between each column in the normalized log ratios
dataframe and the GC values of those genes.
in genes_df. GC_corr is a pandas Series whose length is the number
of experiments (columns) in gene_fit_d['lrn']; where the index
labels are the experiment names, and the values are the Pearson
correlations.
Finally, we compute statistics regarding the experiments and return
a dataframe with the following labels and meanings:
"nUsed" (int): Sum over all locusIds of the sum over the locusId (Sum of sum) per experiment
"gMed" (int): Median of all locusIds over the sums per locusId per experiment
"gMedt0" (int): Median over the time0 totals per experiment
"gMean" (float): Mean of all locusIDs over the sums per locusId per experiment
"cor12" (float): Correlation of normalized log ratios between first and second half
gene insertion locations per experiment
"mad12" (float): Medians of the Absolute values of the differences between first and second half
normalized log ratios.
"mad12c" (float): Median of some statistic over all experiments
"mad12c_t0" (float): Same as above but over the time0s
"opcor" (float): Correlation |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.