hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
de757b204e1bc6bdc8f1f9c1dc88993ecfe575ec | 167,685 | py | Python | pysnmp/Nortel-Magellan-Passport-MpaNetworkLinkMIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 11 | 2021-02-02T16:27:16.000Z | 2021-08-31T06:22:49.000Z | pysnmp/Nortel-Magellan-Passport-MpaNetworkLinkMIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 75 | 2021-02-24T17:30:31.000Z | 2021-12-08T00:01:18.000Z | pysnmp/Nortel-Magellan-Passport-MpaNetworkLinkMIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module Nortel-Magellan-Passport-MpaNetworkLinkMIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Nortel-Magellan-Passport-MpaNetworkLinkMIB
# Produced by pysmi-0.3.4 at Mon Apr 29 20:18:31 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsIntersection, ValueRangeConstraint, ConstraintsUnion, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "ConstraintsUnion", "ValueSizeConstraint")
RowPointer, PassportCounter64, Integer32, DisplayString, RowStatus, StorageType, Counter32, Gauge32, InterfaceIndex, Unsigned32 = mibBuilder.importSymbols("Nortel-Magellan-Passport-StandardTextualConventionsMIB", "RowPointer", "PassportCounter64", "Integer32", "DisplayString", "RowStatus", "StorageType", "Counter32", "Gauge32", "InterfaceIndex", "Unsigned32")
Hex, HexString, Unsigned64, DigitString, AsciiString, Link, EnterpriseDateAndTime, NonReplicated = mibBuilder.importSymbols("Nortel-Magellan-Passport-TextualConventionsMIB", "Hex", "HexString", "Unsigned64", "DigitString", "AsciiString", "Link", "EnterpriseDateAndTime", "NonReplicated")
passportMIBs, components = mibBuilder.importSymbols("Nortel-Magellan-Passport-UsefulDefinitionsMIB", "passportMIBs", "components")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, Integer32, iso, Counter64, ObjectIdentity, Counter32, MibIdentifier, NotificationType, Gauge32, IpAddress, ModuleIdentity, Unsigned32, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "Integer32", "iso", "Counter64", "ObjectIdentity", "Counter32", "MibIdentifier", "NotificationType", "Gauge32", "IpAddress", "ModuleIdentity", "Unsigned32", "Bits")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
mpaNetworkLinkMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 119))
mpanl = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123))
mpanlRowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 1), )
if mibBuilder.loadTexts: mpanlRowStatusTable.setStatus('mandatory')
mpanlRowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 1, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"))
if mibBuilder.loadTexts: mpanlRowStatusEntry.setStatus('mandatory')
mpanlRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 1, 1, 1), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mpanlRowStatus.setStatus('mandatory')
mpanlComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlComponentName.setStatus('mandatory')
mpanlStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 1, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlStorageType.setStatus('mandatory')
mpanlIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 1, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)))
if mibBuilder.loadTexts: mpanlIndex.setStatus('mandatory')
mpanlCidDataTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 10), )
if mibBuilder.loadTexts: mpanlCidDataTable.setStatus('mandatory')
mpanlCidDataEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 10, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"))
if mibBuilder.loadTexts: mpanlCidDataEntry.setStatus('mandatory')
mpanlCustomerIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 10, 1, 1), Unsigned32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(1, 8191), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlCustomerIdentifier.setStatus('mandatory')
mpanlProvTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 12), )
if mibBuilder.loadTexts: mpanlProvTable.setStatus('mandatory')
mpanlProvEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 12, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"))
if mibBuilder.loadTexts: mpanlProvEntry.setStatus('mandatory')
mpanlCommentText = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 12, 1, 1), AsciiString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mpanlCommentText.setStatus('mandatory')
mpanlEmissionPriorityQsTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 13), )
if mibBuilder.loadTexts: mpanlEmissionPriorityQsTable.setStatus('mandatory')
mpanlEmissionPriorityQsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 13, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"))
if mibBuilder.loadTexts: mpanlEmissionPriorityQsEntry.setStatus('mandatory')
mpanlNumberOfEmissionQs = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 13, 1, 1), Unsigned32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(2, 2), ValueRangeConstraint(4, 4), )).clone(4)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlNumberOfEmissionQs.setStatus('mandatory')
mpanlStateTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 14), )
if mibBuilder.loadTexts: mpanlStateTable.setStatus('mandatory')
mpanlStateEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 14, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"))
if mibBuilder.loadTexts: mpanlStateEntry.setStatus('mandatory')
mpanlAdminState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 14, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("locked", 0), ("unlocked", 1), ("shuttingDown", 2))).clone('unlocked')).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlAdminState.setStatus('mandatory')
mpanlOperationalState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 14, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disabled", 0), ("enabled", 1))).clone('disabled')).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlOperationalState.setStatus('mandatory')
mpanlUsageState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 14, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("idle", 0), ("active", 1), ("busy", 2))).clone('idle')).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlUsageState.setStatus('mandatory')
mpanlAvailabilityStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 14, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(2, 2)).setFixedLength(2)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlAvailabilityStatus.setStatus('mandatory')
mpanlProceduralStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 14, 1, 5), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlProceduralStatus.setStatus('mandatory')
mpanlControlStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 14, 1, 6), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlControlStatus.setStatus('mandatory')
mpanlAlarmStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 14, 1, 7), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlAlarmStatus.setStatus('mandatory')
mpanlStandbyStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 14, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 15))).clone(namedValues=NamedValues(("hotStandby", 0), ("coldStandby", 1), ("providingService", 2), ("notSet", 15))).clone('notSet')).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlStandbyStatus.setStatus('mandatory')
mpanlUnknownStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 14, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("false", 0), ("true", 1))).clone('false')).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlUnknownStatus.setStatus('mandatory')
mpanlStatsTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 16), )
if mibBuilder.loadTexts: mpanlStatsTable.setStatus('mandatory')
mpanlStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 16, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"))
if mibBuilder.loadTexts: mpanlStatsEntry.setStatus('mandatory')
mpanlLastUnknownDlci = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 16, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 1023))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlLastUnknownDlci.setStatus('mandatory')
mpanlUnknownDlciFramesFromIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 16, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlUnknownDlciFramesFromIf.setStatus('mandatory')
mpanlInvalidHeaderFramesFromIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 16, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlInvalidHeaderFramesFromIf.setStatus('mandatory')
mpanlTrafficStatsTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 17), )
if mibBuilder.loadTexts: mpanlTrafficStatsTable.setStatus('mandatory')
mpanlTrafficStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 17, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"))
if mibBuilder.loadTexts: mpanlTrafficStatsEntry.setStatus('mandatory')
mpanlFrmToIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 17, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlFrmToIf.setStatus('mandatory')
mpanlOctetToIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 17, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlOctetToIf.setStatus('mandatory')
mpanlFrmFromIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 17, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlFrmFromIf.setStatus('mandatory')
mpanlOctetFromIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 17, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlOctetFromIf.setStatus('mandatory')
mpanlIfEntryTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 23), )
if mibBuilder.loadTexts: mpanlIfEntryTable.setStatus('mandatory')
mpanlIfEntryEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 23, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"))
if mibBuilder.loadTexts: mpanlIfEntryEntry.setStatus('mandatory')
mpanlIfAdminStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 23, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("up", 1), ("down", 2), ("testing", 3))).clone('up')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mpanlIfAdminStatus.setStatus('mandatory')
mpanlIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 23, 1, 2), InterfaceIndex().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlIfIndex.setStatus('mandatory')
mpanlOperStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 24), )
if mibBuilder.loadTexts: mpanlOperStatusTable.setStatus('mandatory')
mpanlOperStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 24, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"))
if mibBuilder.loadTexts: mpanlOperStatusEntry.setStatus('mandatory')
mpanlSnmpOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 24, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("up", 1), ("down", 2), ("testing", 3))).clone('up')).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSnmpOperStatus.setStatus('mandatory')
mpanlOperTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 25), )
if mibBuilder.loadTexts: mpanlOperTable.setStatus('mandatory')
mpanlOperEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 25, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"))
if mibBuilder.loadTexts: mpanlOperEntry.setStatus('mandatory')
mpanlRoundTripDelay = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 25, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 10000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlRoundTripDelay.setStatus('mandatory')
mpanlFrmToIfByQueueTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 350), )
if mibBuilder.loadTexts: mpanlFrmToIfByQueueTable.setStatus('mandatory')
mpanlFrmToIfByQueueEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 350, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlFrmToIfByQueueIndex"))
if mibBuilder.loadTexts: mpanlFrmToIfByQueueEntry.setStatus('mandatory')
mpanlFrmToIfByQueueIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 350, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 3)))
if mibBuilder.loadTexts: mpanlFrmToIfByQueueIndex.setStatus('mandatory')
mpanlFrmToIfByQueueValue = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 350, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlFrmToIfByQueueValue.setStatus('mandatory')
mpanlOctetToIfByQueueTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 351), )
if mibBuilder.loadTexts: mpanlOctetToIfByQueueTable.setStatus('mandatory')
mpanlOctetToIfByQueueEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 351, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlOctetToIfByQueueIndex"))
if mibBuilder.loadTexts: mpanlOctetToIfByQueueEntry.setStatus('mandatory')
mpanlOctetToIfByQueueIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 351, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 3)))
if mibBuilder.loadTexts: mpanlOctetToIfByQueueIndex.setStatus('mandatory')
mpanlOctetToIfByQueueValue = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 351, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlOctetToIfByQueueValue.setStatus('mandatory')
mpanlDna = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 2))
mpanlDnaRowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 2, 1), )
if mibBuilder.loadTexts: mpanlDnaRowStatusTable.setStatus('mandatory')
mpanlDnaRowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 2, 1, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlDnaIndex"))
if mibBuilder.loadTexts: mpanlDnaRowStatusEntry.setStatus('mandatory')
mpanlDnaRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 2, 1, 1, 1), RowStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDnaRowStatus.setStatus('mandatory')
mpanlDnaComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 2, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDnaComponentName.setStatus('mandatory')
mpanlDnaStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 2, 1, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDnaStorageType.setStatus('mandatory')
mpanlDnaIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 2, 1, 1, 10), NonReplicated())
if mibBuilder.loadTexts: mpanlDnaIndex.setStatus('mandatory')
mpanlDnaOutgoingOptionsTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 2, 11), )
if mibBuilder.loadTexts: mpanlDnaOutgoingOptionsTable.setStatus('mandatory')
mpanlDnaOutgoingOptionsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 2, 11, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlDnaIndex"))
if mibBuilder.loadTexts: mpanlDnaOutgoingOptionsEntry.setStatus('mandatory')
mpanlDnaDefaultTransferPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 2, 11, 1, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15))).clone(namedValues=NamedValues(("n0", 0), ("n1", 1), ("n2", 2), ("n3", 3), ("n4", 4), ("n5", 5), ("n6", 6), ("n7", 7), ("n8", 8), ("n9", 9), ("n10", 10), ("n11", 11), ("n12", 12), ("n13", 13), ("n14", 14), ("n15", 15))).clone('n0')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mpanlDnaDefaultTransferPriority.setStatus('mandatory')
mpanlDnaCallOptionsTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 2, 13), )
if mibBuilder.loadTexts: mpanlDnaCallOptionsTable.setStatus('mandatory')
mpanlDnaCallOptionsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 2, 13, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlDnaIndex"))
if mibBuilder.loadTexts: mpanlDnaCallOptionsEntry.setStatus('mandatory')
mpanlDnaAccountClass = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 2, 13, 1, 10), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mpanlDnaAccountClass.setStatus('mandatory')
mpanlDnaAccountCollection = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 2, 13, 1, 11), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1).clone(hexValue="80")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mpanlDnaAccountCollection.setStatus('mandatory')
mpanlDnaServiceExchange = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 2, 13, 1, 12), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mpanlDnaServiceExchange.setStatus('mandatory')
mpanlDnaEgressAccounting = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 2, 13, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("no", 0), ("yes", 1))).clone('no')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mpanlDnaEgressAccounting.setStatus('mandatory')
mpanlFramer = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 3))
mpanlFramerRowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 3, 1), )
if mibBuilder.loadTexts: mpanlFramerRowStatusTable.setStatus('mandatory')
mpanlFramerRowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 3, 1, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlFramerIndex"))
if mibBuilder.loadTexts: mpanlFramerRowStatusEntry.setStatus('mandatory')
mpanlFramerRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 3, 1, 1, 1), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mpanlFramerRowStatus.setStatus('mandatory')
mpanlFramerComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 3, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlFramerComponentName.setStatus('mandatory')
mpanlFramerStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 3, 1, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlFramerStorageType.setStatus('mandatory')
mpanlFramerIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 3, 1, 1, 10), NonReplicated())
if mibBuilder.loadTexts: mpanlFramerIndex.setStatus('mandatory')
mpanlFramerProvTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 3, 10), )
if mibBuilder.loadTexts: mpanlFramerProvTable.setStatus('mandatory')
mpanlFramerProvEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 3, 10, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlFramerIndex"))
if mibBuilder.loadTexts: mpanlFramerProvEntry.setStatus('mandatory')
mpanlFramerInterfaceName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 3, 10, 1, 1), Link()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mpanlFramerInterfaceName.setStatus('mandatory')
mpanlFramerLinkTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 3, 11), )
if mibBuilder.loadTexts: mpanlFramerLinkTable.setStatus('mandatory')
mpanlFramerLinkEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 3, 11, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlFramerIndex"))
if mibBuilder.loadTexts: mpanlFramerLinkEntry.setStatus('mandatory')
mpanlFramerFlagsBetweenFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 3, 11, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 16)).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mpanlFramerFlagsBetweenFrames.setStatus('mandatory')
mpanlFramerStateTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 3, 12), )
if mibBuilder.loadTexts: mpanlFramerStateTable.setStatus('mandatory')
mpanlFramerStateEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 3, 12, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlFramerIndex"))
if mibBuilder.loadTexts: mpanlFramerStateEntry.setStatus('mandatory')
mpanlFramerAdminState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 3, 12, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("locked", 0), ("unlocked", 1), ("shuttingDown", 2))).clone('unlocked')).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlFramerAdminState.setStatus('mandatory')
mpanlFramerOperationalState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 3, 12, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disabled", 0), ("enabled", 1))).clone('disabled')).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlFramerOperationalState.setStatus('mandatory')
mpanlFramerUsageState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 3, 12, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("idle", 0), ("active", 1), ("busy", 2))).clone('idle')).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlFramerUsageState.setStatus('mandatory')
mpanlFramerStatsTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 3, 13), )
if mibBuilder.loadTexts: mpanlFramerStatsTable.setStatus('mandatory')
mpanlFramerStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 3, 13, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlFramerIndex"))
if mibBuilder.loadTexts: mpanlFramerStatsEntry.setStatus('mandatory')
mpanlFramerFrmToIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 3, 13, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlFramerFrmToIf.setStatus('mandatory')
mpanlFramerFrmFromIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 3, 13, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlFramerFrmFromIf.setStatus('mandatory')
mpanlFramerOctetFromIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 3, 13, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlFramerOctetFromIf.setStatus('mandatory')
mpanlFramerAborts = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 3, 13, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlFramerAborts.setStatus('mandatory')
mpanlFramerCrcErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 3, 13, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlFramerCrcErrors.setStatus('mandatory')
mpanlFramerLrcErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 3, 13, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlFramerLrcErrors.setStatus('mandatory')
mpanlFramerNonOctetErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 3, 13, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlFramerNonOctetErrors.setStatus('mandatory')
mpanlFramerOverruns = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 3, 13, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlFramerOverruns.setStatus('mandatory')
mpanlFramerUnderruns = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 3, 13, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlFramerUnderruns.setStatus('mandatory')
mpanlFramerLargeFrmErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 3, 13, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlFramerLargeFrmErrors.setStatus('mandatory')
mpanlFramerFrmModeErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 3, 13, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlFramerFrmModeErrors.setStatus('mandatory')
mpanlFramerUtilTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 3, 14), )
if mibBuilder.loadTexts: mpanlFramerUtilTable.setStatus('mandatory')
mpanlFramerUtilEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 3, 14, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlFramerIndex"))
if mibBuilder.loadTexts: mpanlFramerUtilEntry.setStatus('mandatory')
mpanlFramerNormPrioLinkUtilToIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 3, 14, 1, 1), Gauge32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlFramerNormPrioLinkUtilToIf.setStatus('mandatory')
mpanlFramerNormPrioLinkUtilFromIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 3, 14, 1, 3), Gauge32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlFramerNormPrioLinkUtilFromIf.setStatus('mandatory')
mpanlPrefixDna = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 4))
mpanlPrefixDnaRowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 4, 1), )
if mibBuilder.loadTexts: mpanlPrefixDnaRowStatusTable.setStatus('mandatory')
mpanlPrefixDnaRowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 4, 1, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlPrefixDnaNumberingPlanIndicatorIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlPrefixDnaDataNetworkAddressIndex"))
if mibBuilder.loadTexts: mpanlPrefixDnaRowStatusEntry.setStatus('mandatory')
mpanlPrefixDnaRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 4, 1, 1, 1), RowStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlPrefixDnaRowStatus.setStatus('mandatory')
mpanlPrefixDnaComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 4, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlPrefixDnaComponentName.setStatus('mandatory')
mpanlPrefixDnaStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 4, 1, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlPrefixDnaStorageType.setStatus('mandatory')
mpanlPrefixDnaNumberingPlanIndicatorIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 4, 1, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("x121", 0), ("e164", 1))))
if mibBuilder.loadTexts: mpanlPrefixDnaNumberingPlanIndicatorIndex.setStatus('mandatory')
mpanlPrefixDnaDataNetworkAddressIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 4, 1, 1, 11), DigitString().subtype(subtypeSpec=ValueSizeConstraint(1, 15)))
if mibBuilder.loadTexts: mpanlPrefixDnaDataNetworkAddressIndex.setStatus('mandatory')
mpanlDlci = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5))
mpanlDlciRowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 1), )
if mibBuilder.loadTexts: mpanlDlciRowStatusTable.setStatus('mandatory')
mpanlDlciRowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 1, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlDlciIndex"))
if mibBuilder.loadTexts: mpanlDlciRowStatusEntry.setStatus('mandatory')
mpanlDlciRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 1, 1, 1), RowStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciRowStatus.setStatus('mandatory')
mpanlDlciComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciComponentName.setStatus('mandatory')
mpanlDlciStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 1, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciStorageType.setStatus('mandatory')
mpanlDlciIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 1, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(17, 1007)))
if mibBuilder.loadTexts: mpanlDlciIndex.setStatus('mandatory')
mpanlDlciStateTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 10), )
if mibBuilder.loadTexts: mpanlDlciStateTable.setStatus('mandatory')
mpanlDlciStateEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 10, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlDlciIndex"))
if mibBuilder.loadTexts: mpanlDlciStateEntry.setStatus('mandatory')
mpanlDlciAdminState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 10, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("locked", 0), ("unlocked", 1), ("shuttingDown", 2))).clone('unlocked')).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciAdminState.setStatus('mandatory')
mpanlDlciOperationalState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 10, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disabled", 0), ("enabled", 1))).clone('disabled')).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciOperationalState.setStatus('mandatory')
mpanlDlciUsageState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 10, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("idle", 0), ("active", 1), ("busy", 2))).clone('idle')).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciUsageState.setStatus('mandatory')
mpanlDlciAvailabilityStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 10, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(2, 2)).setFixedLength(2)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciAvailabilityStatus.setStatus('mandatory')
mpanlDlciProceduralStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 10, 1, 5), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciProceduralStatus.setStatus('mandatory')
mpanlDlciControlStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 10, 1, 6), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciControlStatus.setStatus('mandatory')
mpanlDlciAlarmStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 10, 1, 7), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciAlarmStatus.setStatus('mandatory')
mpanlDlciStandbyStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 10, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 15))).clone(namedValues=NamedValues(("hotStandby", 0), ("coldStandby", 1), ("providingService", 2), ("notSet", 15))).clone('notSet')).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciStandbyStatus.setStatus('mandatory')
mpanlDlciUnknownStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 10, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("false", 0), ("true", 1))).clone('false')).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciUnknownStatus.setStatus('mandatory')
mpanlDlciCalldTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 11), )
if mibBuilder.loadTexts: mpanlDlciCalldTable.setStatus('mandatory')
mpanlDlciCalldEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 11, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlDlciIndex"))
if mibBuilder.loadTexts: mpanlDlciCalldEntry.setStatus('mandatory')
mpanlDlciQ933CallState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 11, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 3, 6, 9, 10, 11, 12, 19, 20))).clone(namedValues=NamedValues(("null", 0), ("callInitiated", 1), ("outgoingCallProceeding", 3), ("callPresent", 6), ("incomingCallProceeding", 9), ("active", 10), ("disconnectRequest", 11), ("disconnectIndication", 12), ("releaseRequest", 19), ("notApplicable", 20)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciQ933CallState.setStatus('mandatory')
mpanlDlciQ933CallReference = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 11, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 32767))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciQ933CallReference.setStatus('mandatory')
mpanlDlciSpOpTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 12), )
if mibBuilder.loadTexts: mpanlDlciSpOpTable.setStatus('mandatory')
mpanlDlciSpOpEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 12, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlDlciIndex"))
if mibBuilder.loadTexts: mpanlDlciSpOpEntry.setStatus('mandatory')
mpanlDlciMaximumFrameSize = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 12, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4096))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciMaximumFrameSize.setStatus('mandatory')
mpanlDlciCommittedBurstSize = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 12, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 50000000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciCommittedBurstSize.setStatus('mandatory')
mpanlDlciExcessBurstSize = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 12, 1, 5), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 50000000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciExcessBurstSize.setStatus('mandatory')
mpanlDlciAccounting = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 12, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("off", 0), ("on", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciAccounting.setStatus('mandatory')
mpanlDlciEmissionPriorityToIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 12, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(1, 1), ValueRangeConstraint(2, 2), ValueRangeConstraint(3, 3), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciEmissionPriorityToIf.setStatus('mandatory')
mpanlDlciTransferPriToNwk = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 12, 1, 10), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 15))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciTransferPriToNwk.setStatus('mandatory')
mpanlDlciTransferPriFromNwk = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 12, 1, 11), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 15))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciTransferPriFromNwk.setStatus('mandatory')
mpanlDlciStatsTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 13), )
if mibBuilder.loadTexts: mpanlDlciStatsTable.setStatus('mandatory')
mpanlDlciStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 13, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlDlciIndex"))
if mibBuilder.loadTexts: mpanlDlciStatsEntry.setStatus('mandatory')
mpanlDlciFrmToIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 13, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciFrmToIf.setStatus('mandatory')
mpanlDlciFecnFrmToIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 13, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciFecnFrmToIf.setStatus('mandatory')
mpanlDlciBecnFrmToIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 13, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciBecnFrmToIf.setStatus('mandatory')
mpanlDlciBciToSubnet = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 13, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciBciToSubnet.setStatus('mandatory')
mpanlDlciDeFrmToIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 13, 1, 5), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciDeFrmToIf.setStatus('mandatory')
mpanlDlciDiscCongestedToIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 13, 1, 6), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciDiscCongestedToIf.setStatus('mandatory')
mpanlDlciDiscDeCongestedToIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 13, 1, 7), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciDiscDeCongestedToIf.setStatus('mandatory')
mpanlDlciFrmFromIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 13, 1, 8), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciFrmFromIf.setStatus('mandatory')
mpanlDlciFecnFrmFromIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 13, 1, 9), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciFecnFrmFromIf.setStatus('mandatory')
mpanlDlciBecnFrmFromIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 13, 1, 10), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciBecnFrmFromIf.setStatus('mandatory')
mpanlDlciFciFromSubnet = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 13, 1, 11), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciFciFromSubnet.setStatus('mandatory')
mpanlDlciBciFromSubnet = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 13, 1, 12), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciBciFromSubnet.setStatus('mandatory')
mpanlDlciDeFrmFromIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 13, 1, 13), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciDeFrmFromIf.setStatus('mandatory')
mpanlDlciExcessFrmFromIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 13, 1, 14), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciExcessFrmFromIf.setStatus('mandatory')
mpanlDlciDiscExcessFromIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 13, 1, 15), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciDiscExcessFromIf.setStatus('mandatory')
mpanlDlciDiscFrameAbit = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 13, 1, 16), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciDiscFrameAbit.setStatus('mandatory')
mpanlDlciDiscCongestedFromIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 13, 1, 17), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciDiscCongestedFromIf.setStatus('mandatory')
mpanlDlciDiscDeCongestedFromIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 13, 1, 18), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciDiscDeCongestedFromIf.setStatus('mandatory')
mpanlDlciErrorShortFrmFromIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 13, 1, 19), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciErrorShortFrmFromIf.setStatus('mandatory')
mpanlDlciErrorLongFrmFromIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 13, 1, 20), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciErrorLongFrmFromIf.setStatus('mandatory')
mpanlDlciBecnFrmSetByService = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 13, 1, 21), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciBecnFrmSetByService.setStatus('mandatory')
mpanlDlciBytesToIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 13, 1, 22), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciBytesToIf.setStatus('mandatory')
mpanlDlciDeBytesToIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 13, 1, 23), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciDeBytesToIf.setStatus('mandatory')
mpanlDlciDiscCongestedToIfBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 13, 1, 24), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciDiscCongestedToIfBytes.setStatus('mandatory')
mpanlDlciDiscDeCongestedToIfBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 13, 1, 25), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciDiscDeCongestedToIfBytes.setStatus('mandatory')
mpanlDlciBytesFromIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 13, 1, 26), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciBytesFromIf.setStatus('mandatory')
mpanlDlciDeBytesFromIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 13, 1, 27), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciDeBytesFromIf.setStatus('mandatory')
mpanlDlciExcessBytesFromIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 13, 1, 28), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciExcessBytesFromIf.setStatus('mandatory')
mpanlDlciDiscExcessFromIfBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 13, 1, 29), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciDiscExcessFromIfBytes.setStatus('mandatory')
mpanlDlciDiscByteAbit = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 13, 1, 30), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciDiscByteAbit.setStatus('mandatory')
mpanlDlciDiscCongestedFromIfBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 13, 1, 31), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciDiscCongestedFromIfBytes.setStatus('mandatory')
mpanlDlciDiscDeCongestedFromIfBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 13, 1, 32), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciDiscDeCongestedFromIfBytes.setStatus('mandatory')
mpanlDlciErrorLongBytesFromIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 13, 1, 34), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciErrorLongBytesFromIf.setStatus('mandatory')
mpanlDlciTransferPriorityToNetwork = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 13, 1, 37), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15))).clone(namedValues=NamedValues(("n0", 0), ("n1", 1), ("n2", 2), ("n3", 3), ("n4", 4), ("n5", 5), ("n6", 6), ("n7", 7), ("n8", 8), ("n9", 9), ("n10", 10), ("n11", 11), ("n12", 12), ("n13", 13), ("n14", 14), ("n15", 15)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciTransferPriorityToNetwork.setStatus('obsolete')
mpanlDlciTransferPriorityFromNetwork = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 13, 1, 38), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15))).clone(namedValues=NamedValues(("n0", 0), ("n1", 1), ("n2", 2), ("n3", 3), ("n4", 4), ("n5", 5), ("n6", 6), ("n7", 7), ("n8", 8), ("n9", 9), ("n10", 10), ("n11", 11), ("n12", 12), ("n13", 13), ("n14", 14), ("n15", 15)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciTransferPriorityFromNetwork.setStatus('obsolete')
mpanlDlciIntTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 14), )
if mibBuilder.loadTexts: mpanlDlciIntTable.setStatus('mandatory')
mpanlDlciIntEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 14, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlDlciIndex"))
if mibBuilder.loadTexts: mpanlDlciIntEntry.setStatus('mandatory')
mpanlDlciStartTime = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 14, 1, 1), EnterpriseDateAndTime().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(0, 0), ValueSizeConstraint(19, 19), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciStartTime.setStatus('mandatory')
mpanlDlciTotalIngressBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 14, 1, 2), Unsigned64().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciTotalIngressBytes.setStatus('mandatory')
mpanlDlciTotalEgressBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 14, 1, 3), Unsigned64().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciTotalEgressBytes.setStatus('mandatory')
mpanlDlciEirIngressBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 14, 1, 4), Unsigned64().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciEirIngressBytes.setStatus('mandatory')
mpanlDlciEirEgressBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 14, 1, 5), Unsigned64().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciEirEgressBytes.setStatus('mandatory')
mpanlDlciDiscardedBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 14, 1, 6), Unsigned64().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciDiscardedBytes.setStatus('mandatory')
mpanlDlciTotalIngressSegFrm = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 14, 1, 7), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciTotalIngressSegFrm.setStatus('mandatory')
mpanlDlciTotalEgressSegFrm = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 14, 1, 8), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciTotalEgressSegFrm.setStatus('mandatory')
mpanlDlciEirIngressSegFrm = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 14, 1, 9), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciEirIngressSegFrm.setStatus('mandatory')
mpanlDlciEirEgressSegFrm = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 14, 1, 10), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciEirEgressSegFrm.setStatus('mandatory')
mpanlDlciDiscardedSegFrm = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 14, 1, 11), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciDiscardedSegFrm.setStatus('mandatory')
mpanlDlciCallReferenceNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 14, 1, 17), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciCallReferenceNumber.setStatus('mandatory')
mpanlDlciElapsedDifference = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 14, 1, 18), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciElapsedDifference.setStatus('mandatory')
mpanlDlciAbitTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 15), )
if mibBuilder.loadTexts: mpanlDlciAbitTable.setStatus('mandatory')
mpanlDlciAbitEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 15, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlDlciIndex"))
if mibBuilder.loadTexts: mpanlDlciAbitEntry.setStatus('mandatory')
mpanlDlciABitStatusToIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 15, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("inactive", 0), ("active", 1), ("notApplicable", 2))).clone('inactive')).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciABitStatusToIf.setStatus('mandatory')
mpanlDlciABitReasonToIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 15, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 3, 5, 6))).clone(namedValues=NamedValues(("notApplicable", 0), ("remoteUserSignaled", 1), ("remoteLmiError", 3), ("remoteLinkDown", 5), ("vcDown", 6))).clone('vcDown')).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciABitReasonToIf.setStatus('mandatory')
mpanlDlciABitStatusFromIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 15, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("inactive", 0), ("active", 1), ("notApplicable", 2))).clone('inactive')).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciABitStatusFromIf.setStatus('mandatory')
mpanlDlciABitReasonFromIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 15, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 3, 5, 6))).clone(namedValues=NamedValues(("notApplicable", 0), ("remoteUserSignaled", 1), ("remoteLmiError", 3), ("remoteLinkDown", 5), ("vcDown", 6))).clone('vcDown')).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciABitReasonFromIf.setStatus('mandatory')
mpanlDlciLoopbackState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 15, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("off", 0), ("on", 1))).clone('off')).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLoopbackState.setStatus('mandatory')
mpanlDlciLb = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 2))
mpanlDlciLbRowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 2, 1), )
if mibBuilder.loadTexts: mpanlDlciLbRowStatusTable.setStatus('mandatory')
mpanlDlciLbRowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 2, 1, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlDlciIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlDlciLbIndex"))
if mibBuilder.loadTexts: mpanlDlciLbRowStatusEntry.setStatus('mandatory')
mpanlDlciLbRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 2, 1, 1, 1), RowStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLbRowStatus.setStatus('mandatory')
mpanlDlciLbComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 2, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLbComponentName.setStatus('mandatory')
mpanlDlciLbStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 2, 1, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLbStorageType.setStatus('mandatory')
mpanlDlciLbIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 2, 1, 1, 10), NonReplicated())
if mibBuilder.loadTexts: mpanlDlciLbIndex.setStatus('mandatory')
mpanlDlciLbStatsTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 2, 10), )
if mibBuilder.loadTexts: mpanlDlciLbStatsTable.setStatus('mandatory')
mpanlDlciLbStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 2, 10, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlDlciIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlDlciLbIndex"))
if mibBuilder.loadTexts: mpanlDlciLbStatsEntry.setStatus('mandatory')
mpanlDlciLbLocalTotalFrm = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 2, 10, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLbLocalTotalFrm.setStatus('mandatory')
mpanlDlciLbLocalTotalBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 2, 10, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLbLocalTotalBytes.setStatus('mandatory')
mpanlDlciLbLocalFecnFrm = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 2, 10, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLbLocalFecnFrm.setStatus('mandatory')
mpanlDlciLbLocalBecnFrm = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 2, 10, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLbLocalBecnFrm.setStatus('mandatory')
mpanlDlciLbLocalDeFrm = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 2, 10, 1, 5), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLbLocalDeFrm.setStatus('mandatory')
mpanlDlciLbLocalDeBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 2, 10, 1, 6), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLbLocalDeBytes.setStatus('mandatory')
mpanlDlciLbRemoteTotalFrm = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 2, 10, 1, 7), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLbRemoteTotalFrm.setStatus('mandatory')
mpanlDlciLbRemoteTotalBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 2, 10, 1, 8), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLbRemoteTotalBytes.setStatus('mandatory')
mpanlDlciLbRemoteFecnFrm = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 2, 10, 1, 9), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLbRemoteFecnFrm.setStatus('mandatory')
mpanlDlciLbRemoteBecnFrm = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 2, 10, 1, 10), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLbRemoteBecnFrm.setStatus('mandatory')
mpanlDlciLbRemoteDeFrm = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 2, 10, 1, 13), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLbRemoteDeFrm.setStatus('mandatory')
mpanlDlciLbRemoteDeBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 2, 10, 1, 14), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLbRemoteDeBytes.setStatus('mandatory')
mpanlDlciVc = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3))
mpanlDlciVcRowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 1), )
if mibBuilder.loadTexts: mpanlDlciVcRowStatusTable.setStatus('mandatory')
mpanlDlciVcRowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 1, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlDlciIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlDlciVcIndex"))
if mibBuilder.loadTexts: mpanlDlciVcRowStatusEntry.setStatus('mandatory')
mpanlDlciVcRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 1, 1, 1), RowStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcRowStatus.setStatus('mandatory')
mpanlDlciVcComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcComponentName.setStatus('mandatory')
mpanlDlciVcStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 1, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcStorageType.setStatus('mandatory')
mpanlDlciVcIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 1, 1, 10), NonReplicated())
if mibBuilder.loadTexts: mpanlDlciVcIndex.setStatus('mandatory')
mpanlDlciVcCadTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 10), )
if mibBuilder.loadTexts: mpanlDlciVcCadTable.setStatus('mandatory')
mpanlDlciVcCadEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 10, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlDlciIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlDlciVcIndex"))
if mibBuilder.loadTexts: mpanlDlciVcCadEntry.setStatus('mandatory')
mpanlDlciVcType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 10, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("svc", 0), ("pvc", 1), ("spvc", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcType.setStatus('mandatory')
mpanlDlciVcState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 10, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("creating", 0), ("readyP1", 1), ("dteWaitingP2", 2), ("dceWaitingP3", 3), ("dataTransferP4", 4), ("unsupportedP5", 5), ("dteClearRequestP6", 6), ("dceClearIndicationP7", 7), ("termination", 8)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcState.setStatus('mandatory')
mpanlDlciVcPreviousState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 10, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("creating", 0), ("readyP1", 1), ("dteWaitingP2", 2), ("dceWaitingP3", 3), ("dataTransferP4", 4), ("unsupportedP5", 5), ("dteClearRequestP6", 6), ("dceClearIndicationP7", 7), ("termination", 8)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcPreviousState.setStatus('mandatory')
mpanlDlciVcDiagnosticCode = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 10, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcDiagnosticCode.setStatus('mandatory')
mpanlDlciVcPreviousDiagnosticCode = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 10, 1, 5), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcPreviousDiagnosticCode.setStatus('mandatory')
mpanlDlciVcCalledNpi = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 10, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("x121", 0), ("e164", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcCalledNpi.setStatus('mandatory')
mpanlDlciVcCalledDna = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 10, 1, 7), DigitString().subtype(subtypeSpec=ValueSizeConstraint(1, 15))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcCalledDna.setStatus('mandatory')
mpanlDlciVcCalledLcn = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 10, 1, 8), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcCalledLcn.setStatus('mandatory')
mpanlDlciVcCallingNpi = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 10, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("x121", 0), ("e164", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcCallingNpi.setStatus('mandatory')
mpanlDlciVcCallingDna = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 10, 1, 10), DigitString().subtype(subtypeSpec=ValueSizeConstraint(1, 15))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcCallingDna.setStatus('mandatory')
mpanlDlciVcCallingLcn = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 10, 1, 11), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcCallingLcn.setStatus('mandatory')
mpanlDlciVcAccountingEnabled = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 10, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("yes", 0), ("no", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcAccountingEnabled.setStatus('mandatory')
mpanlDlciVcFastSelectCall = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 10, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("no", 0), ("yes", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcFastSelectCall.setStatus('mandatory')
mpanlDlciVcPathReliability = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 10, 1, 19), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("high", 0), ("normal", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcPathReliability.setStatus('mandatory')
mpanlDlciVcAccountingEnd = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 10, 1, 20), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("callingEnd", 0), ("calledEnd", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcAccountingEnd.setStatus('mandatory')
mpanlDlciVcPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 10, 1, 21), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("normal", 0), ("high", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcPriority.setStatus('mandatory')
mpanlDlciVcSegmentSize = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 10, 1, 22), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4096))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcSegmentSize.setStatus('mandatory')
mpanlDlciVcMaxSubnetPktSize = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 10, 1, 27), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4096))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcMaxSubnetPktSize.setStatus('mandatory')
mpanlDlciVcRcosToNetwork = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 10, 1, 28), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("throughput", 0), ("delay", 1), ("multimedia", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcRcosToNetwork.setStatus('mandatory')
mpanlDlciVcRcosFromNetwork = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 10, 1, 29), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("throughput", 0), ("delay", 1), ("multimedia", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcRcosFromNetwork.setStatus('mandatory')
mpanlDlciVcEmissionPriorityToNetwork = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 10, 1, 30), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("normal", 0), ("high", 1), ("interrupting", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcEmissionPriorityToNetwork.setStatus('mandatory')
mpanlDlciVcEmissionPriorityFromNetwork = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 10, 1, 31), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("normal", 0), ("high", 1), ("interrupting", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcEmissionPriorityFromNetwork.setStatus('mandatory')
mpanlDlciVcDataPath = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 10, 1, 32), AsciiString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcDataPath.setStatus('mandatory')
mpanlDlciVcIntdTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 11), )
if mibBuilder.loadTexts: mpanlDlciVcIntdTable.setStatus('mandatory')
mpanlDlciVcIntdEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 11, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlDlciIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlDlciVcIndex"))
if mibBuilder.loadTexts: mpanlDlciVcIntdEntry.setStatus('mandatory')
mpanlDlciVcCallReferenceNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 11, 1, 1), Hex().subtype(subtypeSpec=ValueRangeConstraint(0, 16777215))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcCallReferenceNumber.setStatus('mandatory')
mpanlDlciVcElapsedTimeTillNow = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 11, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 16777215))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcElapsedTimeTillNow.setStatus('mandatory')
mpanlDlciVcSegmentsRx = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 11, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 16777215))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcSegmentsRx.setStatus('mandatory')
mpanlDlciVcSegmentsSent = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 11, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 16777215))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcSegmentsSent.setStatus('mandatory')
mpanlDlciVcStartTime = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 11, 1, 5), EnterpriseDateAndTime().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(0, 0), ValueSizeConstraint(19, 19), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcStartTime.setStatus('mandatory')
mpanlDlciVcFrdTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 12), )
if mibBuilder.loadTexts: mpanlDlciVcFrdTable.setStatus('mandatory')
mpanlDlciVcFrdEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 12, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlDlciIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlDlciVcIndex"))
if mibBuilder.loadTexts: mpanlDlciVcFrdEntry.setStatus('mandatory')
mpanlDlciVcFrmCongestedToSubnet = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 12, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 5000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcFrmCongestedToSubnet.setStatus('mandatory')
mpanlDlciVcCannotForwardToSubnet = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 12, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 5000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcCannotForwardToSubnet.setStatus('mandatory')
mpanlDlciVcNotDataXferToSubnet = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 12, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 5000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcNotDataXferToSubnet.setStatus('mandatory')
mpanlDlciVcOutOfRangeFrmFromSubnet = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 12, 1, 5), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 5000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcOutOfRangeFrmFromSubnet.setStatus('mandatory')
mpanlDlciVcCombErrorsFromSubnet = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 12, 1, 6), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 5000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcCombErrorsFromSubnet.setStatus('mandatory')
mpanlDlciVcDuplicatesFromSubnet = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 12, 1, 7), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 5000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcDuplicatesFromSubnet.setStatus('mandatory')
mpanlDlciVcNotDataXferFromSubnet = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 12, 1, 8), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 5000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcNotDataXferFromSubnet.setStatus('mandatory')
mpanlDlciVcFrmLossTimeouts = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 12, 1, 9), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 5000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcFrmLossTimeouts.setStatus('mandatory')
mpanlDlciVcOoSeqByteCntExceeded = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 12, 1, 10), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 5000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcOoSeqByteCntExceeded.setStatus('mandatory')
mpanlDlciVcPeakOoSeqPktCount = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 12, 1, 11), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 5000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcPeakOoSeqPktCount.setStatus('mandatory')
mpanlDlciVcPeakOoSeqFrmForwarded = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 12, 1, 12), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 5000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcPeakOoSeqFrmForwarded.setStatus('mandatory')
mpanlDlciVcSendSequenceNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 12, 1, 13), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 5000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcSendSequenceNumber.setStatus('mandatory')
mpanlDlciVcPktRetryTimeouts = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 12, 1, 15), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 5000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcPktRetryTimeouts.setStatus('mandatory')
mpanlDlciVcPeakRetryQueueSize = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 12, 1, 16), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 5000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcPeakRetryQueueSize.setStatus('mandatory')
mpanlDlciVcSubnetRecoveries = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 12, 1, 17), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 5000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcSubnetRecoveries.setStatus('mandatory')
mpanlDlciVcOoSeqPktCntExceeded = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 12, 1, 19), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcOoSeqPktCntExceeded.setStatus('mandatory')
mpanlDlciVcPeakOoSeqByteCount = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 12, 1, 20), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 50000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcPeakOoSeqByteCount.setStatus('mandatory')
mpanlDlciVcDmepTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 417), )
if mibBuilder.loadTexts: mpanlDlciVcDmepTable.setStatus('mandatory')
mpanlDlciVcDmepEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 417, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlDlciIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlDlciVcIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlDlciVcDmepValue"))
if mibBuilder.loadTexts: mpanlDlciVcDmepEntry.setStatus('mandatory')
mpanlDlciVcDmepValue = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 3, 417, 1, 1), RowPointer()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciVcDmepValue.setStatus('mandatory')
mpanlDlciLCo = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4))
mpanlDlciLCoRowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 1), )
if mibBuilder.loadTexts: mpanlDlciLCoRowStatusTable.setStatus('mandatory')
mpanlDlciLCoRowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 1, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlDlciIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlDlciLCoIndex"))
if mibBuilder.loadTexts: mpanlDlciLCoRowStatusEntry.setStatus('mandatory')
mpanlDlciLCoRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 1, 1, 1), RowStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLCoRowStatus.setStatus('mandatory')
mpanlDlciLCoComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLCoComponentName.setStatus('mandatory')
mpanlDlciLCoStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 1, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLCoStorageType.setStatus('mandatory')
mpanlDlciLCoIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 1, 1, 10), NonReplicated())
if mibBuilder.loadTexts: mpanlDlciLCoIndex.setStatus('mandatory')
mpanlDlciLCoPathDataTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 10), )
if mibBuilder.loadTexts: mpanlDlciLCoPathDataTable.setStatus('mandatory')
mpanlDlciLCoPathDataEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 10, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlDlciIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlDlciLCoIndex"))
if mibBuilder.loadTexts: mpanlDlciLCoPathDataEntry.setStatus('mandatory')
mpanlDlciLCoState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 10, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4))).clone(namedValues=NamedValues(("pathDown", 0), ("selectingRoute", 1), ("connecting", 2), ("pathUp", 3), ("pathDownRetrying", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLCoState.setStatus('mandatory')
mpanlDlciLCoEnd = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 10, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("calling", 0), ("called", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLCoEnd.setStatus('mandatory')
mpanlDlciLCoCostMetric = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 10, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLCoCostMetric.setStatus('mandatory')
mpanlDlciLCoDelayMetric = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 10, 1, 5), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 100000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLCoDelayMetric.setStatus('mandatory')
mpanlDlciLCoRoundTripDelay = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 10, 1, 6), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 200000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLCoRoundTripDelay.setStatus('mandatory')
mpanlDlciLCoSetupPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 10, 1, 7), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLCoSetupPriority.setStatus('mandatory')
mpanlDlciLCoHoldingPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 10, 1, 8), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLCoHoldingPriority.setStatus('mandatory')
mpanlDlciLCoRequiredTxBandwidth = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 10, 1, 9), Gauge32().subtype(subtypeSpec=ValueRangeConstraint(0, 2048000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLCoRequiredTxBandwidth.setStatus('mandatory')
mpanlDlciLCoRequiredRxBandwidth = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 10, 1, 10), Gauge32().subtype(subtypeSpec=ValueRangeConstraint(0, 2048000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLCoRequiredRxBandwidth.setStatus('mandatory')
mpanlDlciLCoRequiredTrafficType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 10, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("voice", 0), ("data", 1), ("video", 2), ("trafficType1", 3), ("trafficType2", 4), ("trafficType3", 5), ("trafficType4", 6), ("trafficType5", 7)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLCoRequiredTrafficType.setStatus('mandatory')
mpanlDlciLCoPermittedTrunkTypes = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 10, 1, 12), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLCoPermittedTrunkTypes.setStatus('mandatory')
mpanlDlciLCoRequiredSecurity = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 10, 1, 13), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 7))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLCoRequiredSecurity.setStatus('mandatory')
mpanlDlciLCoRequiredCustomerParameter = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 10, 1, 14), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 7))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLCoRequiredCustomerParameter.setStatus('mandatory')
mpanlDlciLCoEmissionPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 10, 1, 15), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 2))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLCoEmissionPriority.setStatus('mandatory')
mpanlDlciLCoDiscardPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 10, 1, 16), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 3))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLCoDiscardPriority.setStatus('mandatory')
mpanlDlciLCoRetryCount = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 10, 1, 18), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLCoRetryCount.setStatus('mandatory')
mpanlDlciLCoPathFailureCount = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 10, 1, 19), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLCoPathFailureCount.setStatus('mandatory')
mpanlDlciLCoReasonForNoRoute = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 10, 1, 20), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14))).clone(namedValues=NamedValues(("none", 0), ("destinationNameTooLong", 1), ("destinationNotSpecified", 2), ("unknownDestinationName", 3), ("incorrectDestination", 4), ("incorrectDestinationEndPoint", 5), ("unknownSource", 6), ("unknownDestination", 7), ("sameNode", 8), ("routeCostTooMuch", 9), ("routesDelayTooLong", 10), ("attributesNotMet", 11), ("anError", 12), ("attributeProfileProblem", 13), ("manualPathIndexProblem", 14))).clone('none')).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLCoReasonForNoRoute.setStatus('mandatory')
mpanlDlciLCoLastTearDownReason = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 10, 1, 21), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22))).clone(namedValues=NamedValues(("none", 0), ("normalShutDown", 1), ("insufficientTxLcOrBandwidth", 2), ("insufficientRxLcOrBandwidth", 3), ("trunkFailure", 4), ("trunkCardFailure", 5), ("operatorForced", 6), ("lostLcnClash", 7), ("networkCongestion", 8), ("trunkNotFound", 9), ("farEndNotFound", 10), ("wrongModuleReached", 11), ("farEndBusy", 12), ("callLoopedBack", 13), ("unknownReason", 14), ("farEndNotReady", 15), ("remoteNameMismatch", 16), ("serviceTypeMismatch", 17), ("reconnectFromFarEnd", 18), ("bumped", 19), ("accessCardFailure", 20), ("optimized", 21), ("overrideRemoteName", 22))).clone('none')).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLCoLastTearDownReason.setStatus('mandatory')
mpanlDlciLCoPathFailureAction = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 10, 1, 22), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disconnectConnection", 0), ("reRoutePath", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLCoPathFailureAction.setStatus('mandatory')
mpanlDlciLCoBumpPreference = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 10, 1, 23), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("bumpWhenNecessary", 0), ("bumpToObtainBestRoute", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLCoBumpPreference.setStatus('mandatory')
mpanlDlciLCoOptimization = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 10, 1, 24), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disabled", 0), ("enabled", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLCoOptimization.setStatus('mandatory')
mpanlDlciLCoPathUpDateTime = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 10, 1, 25), EnterpriseDateAndTime().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(0, 0), ValueSizeConstraint(19, 19), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLCoPathUpDateTime.setStatus('mandatory')
mpanlDlciLCoStatsTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 11), )
if mibBuilder.loadTexts: mpanlDlciLCoStatsTable.setStatus('mandatory')
mpanlDlciLCoStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 11, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlDlciIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlDlciLCoIndex"))
if mibBuilder.loadTexts: mpanlDlciLCoStatsEntry.setStatus('mandatory')
mpanlDlciLCoPktsToNetwork = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 11, 1, 1), PassportCounter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLCoPktsToNetwork.setStatus('mandatory')
mpanlDlciLCoBytesToNetwork = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 11, 1, 2), PassportCounter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLCoBytesToNetwork.setStatus('mandatory')
mpanlDlciLCoPktsFromNetwork = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 11, 1, 3), PassportCounter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLCoPktsFromNetwork.setStatus('mandatory')
mpanlDlciLCoBytesFromNetwork = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 11, 1, 4), PassportCounter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLCoBytesFromNetwork.setStatus('mandatory')
mpanlDlciLCoCallDataTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 12), )
if mibBuilder.loadTexts: mpanlDlciLCoCallDataTable.setStatus('mandatory')
mpanlDlciLCoCallDataEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 12, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlDlciIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlDlciLCoIndex"))
if mibBuilder.loadTexts: mpanlDlciLCoCallDataEntry.setStatus('mandatory')
mpanlDlciLCoCallingNpi = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 12, 1, 27), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("x121", 0), ("e164", 1))).clone('x121')).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLCoCallingNpi.setStatus('mandatory')
mpanlDlciLCoCallingDna = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 12, 1, 28), DigitString().subtype(subtypeSpec=ValueSizeConstraint(0, 15))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLCoCallingDna.setStatus('mandatory')
mpanlDlciLCoElapsedTimeTillNow = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 12, 1, 30), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 16777215))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLCoElapsedTimeTillNow.setStatus('mandatory')
mpanlDlciLCoCallReferenceNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 12, 1, 31), Hex().subtype(subtypeSpec=ValueRangeConstraint(0, 16777215))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLCoCallReferenceNumber.setStatus('mandatory')
mpanlDlciLCoCalledNpi = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 12, 1, 33), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("x121", 0), ("e164", 1))).clone('x121')).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLCoCalledNpi.setStatus('mandatory')
mpanlDlciLCoCalledDna = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 12, 1, 34), DigitString().subtype(subtypeSpec=ValueSizeConstraint(0, 15))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLCoCalledDna.setStatus('mandatory')
mpanlDlciLCoPathTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 401), )
if mibBuilder.loadTexts: mpanlDlciLCoPathTable.setStatus('mandatory')
mpanlDlciLCoPathEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 401, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlDlciIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlDlciLCoIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlDlciLCoPathValue"))
if mibBuilder.loadTexts: mpanlDlciLCoPathEntry.setStatus('mandatory')
mpanlDlciLCoPathValue = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 4, 401, 1, 1), AsciiString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciLCoPathValue.setStatus('mandatory')
mpanlDlciJvc = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 5))
mpanlDlciJvcRowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 5, 1), )
if mibBuilder.loadTexts: mpanlDlciJvcRowStatusTable.setStatus('mandatory')
mpanlDlciJvcRowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 5, 1, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlDlciIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlDlciJvcIndex"))
if mibBuilder.loadTexts: mpanlDlciJvcRowStatusEntry.setStatus('mandatory')
mpanlDlciJvcRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 5, 1, 1, 1), RowStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciJvcRowStatus.setStatus('mandatory')
mpanlDlciJvcComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 5, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciJvcComponentName.setStatus('mandatory')
mpanlDlciJvcStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 5, 1, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciJvcStorageType.setStatus('mandatory')
mpanlDlciJvcIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 5, 1, 1, 10), NonReplicated())
if mibBuilder.loadTexts: mpanlDlciJvcIndex.setStatus('mandatory')
mpanlDlciJvcOperTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 5, 10), )
if mibBuilder.loadTexts: mpanlDlciJvcOperTable.setStatus('mandatory')
mpanlDlciJvcOperEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 5, 10, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlDlciIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlDlciJvcIndex"))
if mibBuilder.loadTexts: mpanlDlciJvcOperEntry.setStatus('mandatory')
mpanlDlciJvcCurrentState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 5, 10, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("null", 0), ("callRequest", 1), ("callIndication", 2), ("callBlockPresent", 3), ("active", 4), ("discInitiated", 5), ("discPktPresent", 6), ("callDisconnected", 7), ("callTerminated", 8)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciJvcCurrentState.setStatus('mandatory')
mpanlDlciJvcPreviousState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 5, 10, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("null", 0), ("callRequest", 1), ("callIndication", 2), ("callBlockPresent", 3), ("active", 4), ("discInitiated", 5), ("discPktPresent", 6), ("callDisconnected", 7), ("callTerminated", 8)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciJvcPreviousState.setStatus('mandatory')
mpanlDlciJvcCallingNpi = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 5, 10, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("x121", 0), ("e164", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciJvcCallingNpi.setStatus('mandatory')
mpanlDlciJvcCallingAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 5, 10, 1, 7), DigitString().subtype(subtypeSpec=ValueSizeConstraint(1, 15))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciJvcCallingAddress.setStatus('mandatory')
mpanlDlciJvcCallingLcn = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 5, 10, 1, 8), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4095))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciJvcCallingLcn.setStatus('mandatory')
mpanlDlciJvcCalledNpi = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 5, 10, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("x121", 0), ("e164", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciJvcCalledNpi.setStatus('mandatory')
mpanlDlciJvcCalledAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 5, 10, 1, 10), DigitString().subtype(subtypeSpec=ValueSizeConstraint(1, 15))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciJvcCalledAddress.setStatus('mandatory')
mpanlDlciJvcCalledLcn = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 5, 10, 1, 11), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4095))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciJvcCalledLcn.setStatus('mandatory')
mpanlDlciJvcStatTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 5, 11), )
if mibBuilder.loadTexts: mpanlDlciJvcStatTable.setStatus('mandatory')
mpanlDlciJvcStatEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 5, 11, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlDlciIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlDlciJvcIndex"))
if mibBuilder.loadTexts: mpanlDlciJvcStatEntry.setStatus('mandatory')
mpanlDlciJvcPacketsFromSubnet = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 5, 11, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciJvcPacketsFromSubnet.setStatus('mandatory')
mpanlDlciJvcPacketsToSubnet = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 5, 11, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciJvcPacketsToSubnet.setStatus('mandatory')
mpanlDlciJvcPacketsDiscarded = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 5, 11, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciJvcPacketsDiscarded.setStatus('mandatory')
mpanlDlciJvcProtocolErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 5, 5, 11, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlDlciJvcProtocolErrors.setStatus('mandatory')
mpanlSig = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6))
mpanlSigRowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 1), )
if mibBuilder.loadTexts: mpanlSigRowStatusTable.setStatus('mandatory')
mpanlSigRowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 1, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlSigIndex"))
if mibBuilder.loadTexts: mpanlSigRowStatusEntry.setStatus('mandatory')
mpanlSigRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 1, 1, 1), RowStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigRowStatus.setStatus('mandatory')
mpanlSigComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigComponentName.setStatus('mandatory')
mpanlSigStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 1, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigStorageType.setStatus('mandatory')
mpanlSigIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 1, 1, 10), NonReplicated())
if mibBuilder.loadTexts: mpanlSigIndex.setStatus('mandatory')
mpanlSigSysParmsTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 13), )
if mibBuilder.loadTexts: mpanlSigSysParmsTable.setStatus('mandatory')
mpanlSigSysParmsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 13, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlSigIndex"))
if mibBuilder.loadTexts: mpanlSigSysParmsEntry.setStatus('mandatory')
mpanlSigCallSetupTimer = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 13, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 255)).clone(4)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mpanlSigCallSetupTimer.setStatus('mandatory')
mpanlSigDisconnectTimer = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 13, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 255)).clone(30)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mpanlSigDisconnectTimer.setStatus('mandatory')
mpanlSigReleaseTimer = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 13, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 255)).clone(4)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mpanlSigReleaseTimer.setStatus('mandatory')
mpanlSigCallProceedingTimer = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 13, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 255)).clone(10)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mpanlSigCallProceedingTimer.setStatus('mandatory')
mpanlSigNetworkType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 13, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("private", 1), ("public", 2))).clone('private')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mpanlSigNetworkType.setStatus('mandatory')
mpanlSigLapfSysTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 14), )
if mibBuilder.loadTexts: mpanlSigLapfSysTable.setStatus('mandatory')
mpanlSigLapfSysEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 14, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlSigIndex"))
if mibBuilder.loadTexts: mpanlSigLapfSysEntry.setStatus('mandatory')
mpanlSigWindowSize = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 14, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 127)).clone(7)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mpanlSigWindowSize.setStatus('mandatory')
mpanlSigRetransmitLimit = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 14, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 20)).clone(3)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mpanlSigRetransmitLimit.setStatus('mandatory')
mpanlSigAckTimer = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 14, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1000, 10000)).clone(1500)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mpanlSigAckTimer.setStatus('mandatory')
mpanlSigAckDelayTimer = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 14, 1, 5), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 10000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mpanlSigAckDelayTimer.setStatus('mandatory')
mpanlSigIdleProbeTimer = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 14, 1, 6), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1000, 65535000)).clone(30000)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mpanlSigIdleProbeTimer.setStatus('mandatory')
mpanlSigSvcaccTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 15), )
if mibBuilder.loadTexts: mpanlSigSvcaccTable.setStatus('mandatory')
mpanlSigSvcaccEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 15, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlSigIndex"))
if mibBuilder.loadTexts: mpanlSigSvcaccEntry.setStatus('mandatory')
mpanlSigDefaultAccounting = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 15, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("off", 0), ("on", 1))).clone('on')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mpanlSigDefaultAccounting.setStatus('mandatory')
mpanlSigStateTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 16), )
if mibBuilder.loadTexts: mpanlSigStateTable.setStatus('mandatory')
mpanlSigStateEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 16, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlSigIndex"))
if mibBuilder.loadTexts: mpanlSigStateEntry.setStatus('mandatory')
mpanlSigAdminState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 16, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("locked", 0), ("unlocked", 1), ("shuttingDown", 2))).clone('unlocked')).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigAdminState.setStatus('mandatory')
mpanlSigOperationalState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 16, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disabled", 0), ("enabled", 1))).clone('disabled')).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigOperationalState.setStatus('mandatory')
mpanlSigUsageState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 16, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("idle", 0), ("active", 1), ("busy", 2))).clone('idle')).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigUsageState.setStatus('mandatory')
mpanlSigStatsTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 17), )
if mibBuilder.loadTexts: mpanlSigStatsTable.setStatus('mandatory')
mpanlSigStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 17, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlSigIndex"))
if mibBuilder.loadTexts: mpanlSigStatsEntry.setStatus('mandatory')
mpanlSigCurrentNumberOfSvcCalls = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 17, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 991))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigCurrentNumberOfSvcCalls.setStatus('mandatory')
mpanlSigInCalls = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 17, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigInCalls.setStatus('mandatory')
mpanlSigInCallsRefused = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 17, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigInCallsRefused.setStatus('mandatory')
mpanlSigOutCalls = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 17, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigOutCalls.setStatus('mandatory')
mpanlSigOutCallsFailed = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 17, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigOutCallsFailed.setStatus('mandatory')
mpanlSigProtocolErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 17, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigProtocolErrors.setStatus('mandatory')
mpanlSigQualityOfServiceNotAvailable = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 17, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigQualityOfServiceNotAvailable.setStatus('mandatory')
mpanlSigSetupTimeout = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 17, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigSetupTimeout.setStatus('mandatory')
mpanlSigLastCauseInStatusMsgReceived = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 17, 1, 11), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigLastCauseInStatusMsgReceived.setStatus('mandatory')
mpanlSigLastStateInStatusMsgReceived = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 17, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63))).clone(namedValues=NamedValues(("null", 0), ("callInitiated", 1), ("n2", 2), ("outgoingCallProceeding", 3), ("n4", 4), ("n5", 5), ("callPresent", 6), ("n7", 7), ("n8", 8), ("incomingCallProceeding", 9), ("active", 10), ("disconnectRequest", 11), ("disconnectIndication", 12), ("n13", 13), ("n14", 14), ("n15", 15), ("n16", 16), ("n17", 17), ("n18", 18), ("releaseRequest", 19), ("notApplicable", 20), ("n21", 21), ("n22", 22), ("n23", 23), ("n24", 24), ("n25", 25), ("n26", 26), ("n27", 27), ("n28", 28), ("n29", 29), ("n30", 30), ("n31", 31), ("n32", 32), ("n33", 33), ("n34", 34), ("n35", 35), ("n36", 36), ("n37", 37), ("n38", 38), ("n39", 39), ("n40", 40), ("n41", 41), ("n42", 42), ("n43", 43), ("n44", 44), ("n45", 45), ("n46", 46), ("n47", 47), ("n48", 48), ("n49", 49), ("n50", 50), ("n51", 51), ("n52", 52), ("n53", 53), ("n54", 54), ("n55", 55), ("n56", 56), ("n57", 57), ("n58", 58), ("n59", 59), ("n60", 60), ("n61", 61), ("n62", 62), ("n63", 63)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigLastStateInStatusMsgReceived.setStatus('mandatory')
mpanlSigLastDlciReceivedStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 17, 1, 13), Unsigned32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(17, 1007), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigLastDlciReceivedStatus.setStatus('mandatory')
mpanlSigLastQ933StateReceivedStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 17, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 3, 6, 9, 10, 11, 12, 19, 20))).clone(namedValues=NamedValues(("null", 0), ("callInitiated", 1), ("outgoingCallProceeding", 3), ("callPresent", 6), ("incomingCallProceeding", 9), ("active", 10), ("disconnectRequest", 11), ("disconnectIndication", 12), ("releaseRequest", 19), ("notApplicable", 20)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigLastQ933StateReceivedStatus.setStatus('mandatory')
mpanlSigLastTimeMsgBlockCongested = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 17, 1, 15), EnterpriseDateAndTime().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(0, 0), ValueSizeConstraint(16, 16), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigLastTimeMsgBlockCongested.setStatus('mandatory')
mpanlSigLastDlciWithMsgBlockCongestion = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 17, 1, 16), Unsigned32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(16, 1007), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigLastDlciWithMsgBlockCongestion.setStatus('mandatory')
mpanlSigLapfStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 18), )
if mibBuilder.loadTexts: mpanlSigLapfStatusTable.setStatus('mandatory')
mpanlSigLapfStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 18, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlSigIndex"))
if mibBuilder.loadTexts: mpanlSigLapfStatusEntry.setStatus('mandatory')
mpanlSigCurrentState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 18, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 4, 5, 7))).clone(namedValues=NamedValues(("disconnected", 1), ("linkSetup", 2), ("disconnectRequest", 4), ("informationTransfer", 5), ("waitingAck", 7)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigCurrentState.setStatus('mandatory')
mpanlSigLastStateChangeReason = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 18, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 3, 5, 6, 7, 8, 9, 10, 12, 13))).clone(namedValues=NamedValues(("notStarted", 1), ("abmeEntered", 3), ("abmeReset", 5), ("dmReceived", 6), ("dmSent", 7), ("discReceived", 8), ("discSent", 9), ("frmrReceived", 10), ("n200RetranTimeOut", 12), ("other", 13)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigLastStateChangeReason.setStatus('mandatory')
mpanlSigFrmrReceive = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 18, 1, 3), HexString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigFrmrReceive.setStatus('mandatory')
mpanlSigCurrentQueueSize = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 18, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigCurrentQueueSize.setStatus('mandatory')
mpanlSigLapfStatsTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 19), )
if mibBuilder.loadTexts: mpanlSigLapfStatsTable.setStatus('mandatory')
mpanlSigLapfStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 19, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlSigIndex"))
if mibBuilder.loadTexts: mpanlSigLapfStatsEntry.setStatus('mandatory')
mpanlSigStateChange = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 19, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigStateChange.setStatus('mandatory')
mpanlSigRemoteBusy = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 19, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigRemoteBusy.setStatus('mandatory')
mpanlSigReceiveRejectFrame = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 19, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigReceiveRejectFrame.setStatus('mandatory')
mpanlSigAckTimeout = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 19, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigAckTimeout.setStatus('mandatory')
mpanlSigIFramesTransmitted = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 19, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigIFramesTransmitted.setStatus('mandatory')
mpanlSigIFramesTxDiscarded = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 19, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigIFramesTxDiscarded.setStatus('mandatory')
mpanlSigIFramesReceived = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 19, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigIFramesReceived.setStatus('mandatory')
mpanlSigIFramesRcvdDiscarded = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 6, 19, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigIFramesRcvdDiscarded.setStatus('mandatory')
mpanlSigMpanl = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 7))
mpanlSigMpanlRowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 7, 1), )
if mibBuilder.loadTexts: mpanlSigMpanlRowStatusTable.setStatus('mandatory')
mpanlSigMpanlRowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 7, 1, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlSigMpanlIndex"))
if mibBuilder.loadTexts: mpanlSigMpanlRowStatusEntry.setStatus('mandatory')
mpanlSigMpanlRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 7, 1, 1, 1), RowStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigMpanlRowStatus.setStatus('mandatory')
mpanlSigMpanlComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 7, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigMpanlComponentName.setStatus('mandatory')
mpanlSigMpanlStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 7, 1, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigMpanlStorageType.setStatus('mandatory')
mpanlSigMpanlIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 7, 1, 1, 10), NonReplicated())
if mibBuilder.loadTexts: mpanlSigMpanlIndex.setStatus('mandatory')
mpanlSigMpanlStateTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 7, 10), )
if mibBuilder.loadTexts: mpanlSigMpanlStateTable.setStatus('mandatory')
mpanlSigMpanlStateEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 7, 10, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlSigMpanlIndex"))
if mibBuilder.loadTexts: mpanlSigMpanlStateEntry.setStatus('mandatory')
mpanlSigMpanlAdminState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 7, 10, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("locked", 0), ("unlocked", 1), ("shuttingDown", 2))).clone('unlocked')).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigMpanlAdminState.setStatus('mandatory')
mpanlSigMpanlOperationalState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 7, 10, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disabled", 0), ("enabled", 1))).clone('disabled')).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigMpanlOperationalState.setStatus('mandatory')
mpanlSigMpanlUsageState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 7, 10, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("idle", 0), ("active", 1), ("busy", 2))).clone('idle')).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigMpanlUsageState.setStatus('mandatory')
mpanlSigMpanlProfileTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 7, 11), )
if mibBuilder.loadTexts: mpanlSigMpanlProfileTable.setStatus('mandatory')
mpanlSigMpanlProfileEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 7, 11, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlSigMpanlIndex"))
if mibBuilder.loadTexts: mpanlSigMpanlProfileEntry.setStatus('mandatory')
mpanlSigMpanlDteCustomerId = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 7, 11, 1, 1), Unsigned32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(1, 8191), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigMpanlDteCustomerId.setStatus('mandatory')
mpanlSigMpanlDteNodeId = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 7, 11, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4095))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigMpanlDteNodeId.setStatus('mandatory')
mpanlSigMpanlDteComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 7, 11, 1, 3), AsciiString().subtype(subtypeSpec=ValueSizeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigMpanlDteComponentName.setStatus('mandatory')
mpanlSigMpanlHighestDlci = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 7, 11, 1, 4), Unsigned32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(17, 1007), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigMpanlHighestDlci.setStatus('mandatory')
mpanlSigMpanlStatsTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 7, 12), )
if mibBuilder.loadTexts: mpanlSigMpanlStatsTable.setStatus('mandatory')
mpanlSigMpanlStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 7, 12, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlSigMpanlIndex"))
if mibBuilder.loadTexts: mpanlSigMpanlStatsEntry.setStatus('mandatory')
mpanlSigMpanlProtocolErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 7, 12, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigMpanlProtocolErrors.setStatus('mandatory')
mpanlSigMpanlSap0CommandsRx = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 7, 12, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigMpanlSap0CommandsRx.setStatus('mandatory')
mpanlSigMpanlSap0CommandsTx = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 7, 12, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigMpanlSap0CommandsTx.setStatus('mandatory')
mpanlSigMpanlSapXCommandsRx = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 7, 12, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigMpanlSapXCommandsRx.setStatus('mandatory')
mpanlSigMpanlSapXCommandsTx = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 7, 12, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigMpanlSapXCommandsTx.setStatus('mandatory')
mpanlSigMpanlLapfStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 7, 13), )
if mibBuilder.loadTexts: mpanlSigMpanlLapfStatusTable.setStatus('mandatory')
mpanlSigMpanlLapfStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 7, 13, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlSigMpanlIndex"))
if mibBuilder.loadTexts: mpanlSigMpanlLapfStatusEntry.setStatus('mandatory')
mpanlSigMpanlCurrentState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 7, 13, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 4, 5, 7))).clone(namedValues=NamedValues(("disconnected", 1), ("linkSetup", 2), ("disconnectRequest", 4), ("informationTransfer", 5), ("waitingAck", 7)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigMpanlCurrentState.setStatus('mandatory')
mpanlSigMpanlLastStateChangeReason = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 7, 13, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 3, 5, 6, 7, 8, 9, 10, 12, 13))).clone(namedValues=NamedValues(("notStarted", 1), ("abmeEntered", 3), ("abmeReset", 5), ("dmReceived", 6), ("dmSent", 7), ("discReceived", 8), ("discSent", 9), ("frmrReceived", 10), ("n200RetranTimeOut", 12), ("other", 13)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigMpanlLastStateChangeReason.setStatus('mandatory')
mpanlSigMpanlFrmrReceive = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 7, 13, 1, 3), HexString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigMpanlFrmrReceive.setStatus('mandatory')
mpanlSigMpanlCurrentQueueSize = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 7, 13, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigMpanlCurrentQueueSize.setStatus('mandatory')
mpanlSigMpanlLapfStatsTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 7, 14), )
if mibBuilder.loadTexts: mpanlSigMpanlLapfStatsTable.setStatus('mandatory')
mpanlSigMpanlLapfStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 7, 14, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlSigMpanlIndex"))
if mibBuilder.loadTexts: mpanlSigMpanlLapfStatsEntry.setStatus('mandatory')
mpanlSigMpanlStateChange = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 7, 14, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigMpanlStateChange.setStatus('mandatory')
mpanlSigMpanlRemoteBusy = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 7, 14, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigMpanlRemoteBusy.setStatus('mandatory')
mpanlSigMpanlReceiveRejectFrame = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 7, 14, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigMpanlReceiveRejectFrame.setStatus('mandatory')
mpanlSigMpanlAckTimeout = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 7, 14, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigMpanlAckTimeout.setStatus('mandatory')
mpanlSigMpanlIFramesTransmitted = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 7, 14, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigMpanlIFramesTransmitted.setStatus('mandatory')
mpanlSigMpanlIFramesTxDiscarded = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 7, 14, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigMpanlIFramesTxDiscarded.setStatus('mandatory')
mpanlSigMpanlIFramesReceived = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 7, 14, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigMpanlIFramesReceived.setStatus('mandatory')
mpanlSigMpanlIFramesRcvdDiscarded = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 7, 14, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlSigMpanlIFramesRcvdDiscarded.setStatus('mandatory')
mpanlLmi = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 8))
mpanlLmiRowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 8, 1), )
if mibBuilder.loadTexts: mpanlLmiRowStatusTable.setStatus('mandatory')
mpanlLmiRowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 8, 1, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlLmiIndex"))
if mibBuilder.loadTexts: mpanlLmiRowStatusEntry.setStatus('mandatory')
mpanlLmiRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 8, 1, 1, 1), RowStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlLmiRowStatus.setStatus('mandatory')
mpanlLmiComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 8, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlLmiComponentName.setStatus('mandatory')
mpanlLmiStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 8, 1, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlLmiStorageType.setStatus('mandatory')
mpanlLmiIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 8, 1, 1, 10), NonReplicated())
if mibBuilder.loadTexts: mpanlLmiIndex.setStatus('mandatory')
mpanlLmiParmsTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 8, 10), )
if mibBuilder.loadTexts: mpanlLmiParmsTable.setStatus('mandatory')
mpanlLmiParmsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 8, 10, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlLmiIndex"))
if mibBuilder.loadTexts: mpanlLmiParmsEntry.setStatus('mandatory')
mpanlLmiProcedures = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 8, 10, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("none", 0), ("vendorForum", 1), ("ansi", 2), ("ccitt", 3))).clone('none')).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlLmiProcedures.setStatus('mandatory')
mpanlLmiStateTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 8, 12), )
if mibBuilder.loadTexts: mpanlLmiStateTable.setStatus('mandatory')
mpanlLmiStateEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 8, 12, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlLmiIndex"))
if mibBuilder.loadTexts: mpanlLmiStateEntry.setStatus('mandatory')
mpanlLmiAdminState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 8, 12, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("locked", 0), ("unlocked", 1), ("shuttingDown", 2))).clone('unlocked')).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlLmiAdminState.setStatus('mandatory')
mpanlLmiOperationalState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 8, 12, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disabled", 0), ("enabled", 1))).clone('disabled')).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlLmiOperationalState.setStatus('mandatory')
mpanlLmiUsageState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 8, 12, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("idle", 0), ("active", 1), ("busy", 2))).clone('idle')).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlLmiUsageState.setStatus('mandatory')
mpanlVoFr = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 18))
mpanlVoFrRowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 18, 1), )
if mibBuilder.loadTexts: mpanlVoFrRowStatusTable.setStatus('mandatory')
mpanlVoFrRowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 18, 1, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlVoFrIndex"))
if mibBuilder.loadTexts: mpanlVoFrRowStatusEntry.setStatus('mandatory')
mpanlVoFrRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 18, 1, 1, 1), RowStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlVoFrRowStatus.setStatus('mandatory')
mpanlVoFrComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 18, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlVoFrComponentName.setStatus('mandatory')
mpanlVoFrStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 18, 1, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlVoFrStorageType.setStatus('mandatory')
mpanlVoFrIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 18, 1, 1, 10), NonReplicated())
if mibBuilder.loadTexts: mpanlVoFrIndex.setStatus('mandatory')
mpanlVoFrOperTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 18, 10), )
if mibBuilder.loadTexts: mpanlVoFrOperTable.setStatus('mandatory')
mpanlVoFrOperEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 18, 10, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlVoFrIndex"))
if mibBuilder.loadTexts: mpanlVoFrOperEntry.setStatus('mandatory')
mpanlVoFrMaximumFrameSize = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 18, 10, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlVoFrMaximumFrameSize.setStatus('mandatory')
mpanlVoFrTransmitInformationRate = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 18, 10, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlVoFrTransmitInformationRate.setStatus('mandatory')
mpanlVoFrStatsTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 18, 11), )
if mibBuilder.loadTexts: mpanlVoFrStatsTable.setStatus('mandatory')
mpanlVoFrStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 18, 11, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlVoFrIndex"))
if mibBuilder.loadTexts: mpanlVoFrStatsEntry.setStatus('mandatory')
mpanlVoFrFragmentedHighestPriorityFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 18, 11, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlVoFrFragmentedHighestPriorityFrames.setStatus('mandatory')
mpanlVoFrLostFragmentsFromIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 18, 11, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlVoFrLostFragmentsFromIf.setStatus('mandatory')
mpanlVoFrProtocolViolationsFromIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 18, 11, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlVoFrProtocolViolationsFromIf.setStatus('mandatory')
mpanlFrMuxSetup = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 19))
mpanlFrMuxSetupRowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 19, 1), )
if mibBuilder.loadTexts: mpanlFrMuxSetupRowStatusTable.setStatus('mandatory')
mpanlFrMuxSetupRowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 19, 1, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlFrMuxSetupIndex"))
if mibBuilder.loadTexts: mpanlFrMuxSetupRowStatusEntry.setStatus('mandatory')
mpanlFrMuxSetupRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 19, 1, 1, 1), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mpanlFrMuxSetupRowStatus.setStatus('mandatory')
mpanlFrMuxSetupComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 19, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlFrMuxSetupComponentName.setStatus('mandatory')
mpanlFrMuxSetupStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 19, 1, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlFrMuxSetupStorageType.setStatus('mandatory')
mpanlFrMuxSetupIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 19, 1, 1, 10), NonReplicated())
if mibBuilder.loadTexts: mpanlFrMuxSetupIndex.setStatus('mandatory')
mpanlFrMuxSetupOpTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 19, 11), )
if mibBuilder.loadTexts: mpanlFrMuxSetupOpTable.setStatus('mandatory')
mpanlFrMuxSetupOpEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 19, 11, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlFrMuxSetupIndex"))
if mibBuilder.loadTexts: mpanlFrMuxSetupOpEntry.setStatus('mandatory')
mpanlFrMuxSetupCommittedInformationRate = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 19, 11, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(16000, 4294967295)).clone(16000)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlFrMuxSetupCommittedInformationRate.setStatus('mandatory')
mpanlFrMuxSetupDlciCompName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 19, 11, 1, 2), RowPointer()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlFrMuxSetupDlciCompName.setStatus('mandatory')
mpanlFrMuxSetupPvcSetup = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 19, 2))
mpanlFrMuxSetupPvcSetupRowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 19, 2, 1), )
if mibBuilder.loadTexts: mpanlFrMuxSetupPvcSetupRowStatusTable.setStatus('mandatory')
mpanlFrMuxSetupPvcSetupRowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 19, 2, 1, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlFrMuxSetupIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlFrMuxSetupPvcSetupIndex"))
if mibBuilder.loadTexts: mpanlFrMuxSetupPvcSetupRowStatusEntry.setStatus('mandatory')
mpanlFrMuxSetupPvcSetupRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 19, 2, 1, 1, 1), RowStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlFrMuxSetupPvcSetupRowStatus.setStatus('mandatory')
mpanlFrMuxSetupPvcSetupComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 19, 2, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlFrMuxSetupPvcSetupComponentName.setStatus('mandatory')
mpanlFrMuxSetupPvcSetupStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 19, 2, 1, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlFrMuxSetupPvcSetupStorageType.setStatus('mandatory')
mpanlFrMuxSetupPvcSetupIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 19, 2, 1, 1, 10), NonReplicated())
if mibBuilder.loadTexts: mpanlFrMuxSetupPvcSetupIndex.setStatus('mandatory')
mpanlFrMuxSetupPvcSetupProvTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 19, 2, 10), )
if mibBuilder.loadTexts: mpanlFrMuxSetupPvcSetupProvTable.setStatus('mandatory')
mpanlFrMuxSetupPvcSetupProvEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 19, 2, 10, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlFrMuxSetupIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlFrMuxSetupPvcSetupIndex"))
if mibBuilder.loadTexts: mpanlFrMuxSetupPvcSetupProvEntry.setStatus('mandatory')
mpanlFrMuxSetupPvcSetupDlciName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 19, 2, 10, 1, 1), Link()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mpanlFrMuxSetupPvcSetupDlciName.setStatus('mandatory')
mpanlIsdn = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 22))
mpanlIsdnRowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 22, 1), )
if mibBuilder.loadTexts: mpanlIsdnRowStatusTable.setStatus('mandatory')
mpanlIsdnRowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 22, 1, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIsdnIndex"))
if mibBuilder.loadTexts: mpanlIsdnRowStatusEntry.setStatus('mandatory')
mpanlIsdnRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 22, 1, 1, 1), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mpanlIsdnRowStatus.setStatus('mandatory')
mpanlIsdnComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 22, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlIsdnComponentName.setStatus('mandatory')
mpanlIsdnStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 22, 1, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlIsdnStorageType.setStatus('mandatory')
mpanlIsdnIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 22, 1, 1, 10), NonReplicated())
if mibBuilder.loadTexts: mpanlIsdnIndex.setStatus('mandatory')
mpanlIsdnProvTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 22, 11), )
if mibBuilder.loadTexts: mpanlIsdnProvTable.setStatus('mandatory')
mpanlIsdnProvEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 22, 11, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIsdnIndex"))
if mibBuilder.loadTexts: mpanlIsdnProvEntry.setStatus('mandatory')
mpanlIsdnT320 = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 22, 11, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 255)).clone(60)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mpanlIsdnT320.setStatus('mandatory')
mpanlIsdnAddressSignalling = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 22, 11, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("isdnDna", 0), ("normalBehavior", 1))).clone('normalBehavior')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mpanlIsdnAddressSignalling.setStatus('mandatory')
mpanlIsdnOperTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 22, 12), )
if mibBuilder.loadTexts: mpanlIsdnOperTable.setStatus('mandatory')
mpanlIsdnOperEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 22, 12, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIndex"), (0, "Nortel-Magellan-Passport-MpaNetworkLinkMIB", "mpanlIsdnIndex"))
if mibBuilder.loadTexts: mpanlIsdnOperEntry.setStatus('mandatory')
mpanlIsdnDataSigChan = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 22, 12, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlIsdnDataSigChan.setStatus('mandatory')
mpanlIsdnBChannelState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 22, 12, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("idle", 0), ("busy", 1), ("disabled", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlIsdnBChannelState.setStatus('mandatory')
mpanlIsdnLastUsedCgpn = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 22, 12, 1, 3), DigitString().subtype(subtypeSpec=ValueSizeConstraint(0, 15))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlIsdnLastUsedCgpn.setStatus('mandatory')
mpanlIsdnBChanIntState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 22, 12, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("isdnInit", 0), ("waitAccEnable", 1), ("waitLnsResponse", 2), ("waitFramerData", 3), ("enabling", 4), ("waitAccRegAck", 5), ("up", 6), ("down", 7), ("releasing", 8)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlIsdnBChanIntState.setStatus('mandatory')
mpanlIsdnActiveVirtualCircuitsCount = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 123, 22, 12, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mpanlIsdnActiveVirtualCircuitsCount.setStatus('mandatory')
mpaNetworkLinkGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 119, 1))
mpaNetworkLinkGroupBE = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 119, 1, 5))
mpaNetworkLinkGroupBE01 = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 119, 1, 5, 2))
mpaNetworkLinkGroupBE01A = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 119, 1, 5, 2, 2))
mpaNetworkLinkCapabilities = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 119, 3))
mpaNetworkLinkCapabilitiesBE = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 119, 3, 5))
mpaNetworkLinkCapabilitiesBE01 = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 119, 3, 5, 2))
mpaNetworkLinkCapabilitiesBE01A = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 119, 3, 5, 2, 2))
mibBuilder.exportSymbols("Nortel-Magellan-Passport-MpaNetworkLinkMIB", mpanlVoFrRowStatusTable=mpanlVoFrRowStatusTable, mpanlSigStateEntry=mpanlSigStateEntry, mpanlDlciJvcPacketsFromSubnet=mpanlDlciJvcPacketsFromSubnet, mpanlSigMpanlDteNodeId=mpanlSigMpanlDteNodeId, mpanlAdminState=mpanlAdminState, mpanlSigMpanlAdminState=mpanlSigMpanlAdminState, mpanlDlciAvailabilityStatus=mpanlDlciAvailabilityStatus, mpanlProvEntry=mpanlProvEntry, mpanlDlciDeFrmFromIf=mpanlDlciDeFrmFromIf, mpanlDlciLCoCallDataEntry=mpanlDlciLCoCallDataEntry, mpanlVoFrComponentName=mpanlVoFrComponentName, mpanlVoFrIndex=mpanlVoFrIndex, mpanlFrmToIfByQueueIndex=mpanlFrmToIfByQueueIndex, mpanlSigMpanlIFramesReceived=mpanlSigMpanlIFramesReceived, mpanlDlciVcCombErrorsFromSubnet=mpanlDlciVcCombErrorsFromSubnet, mpanlSigOutCallsFailed=mpanlSigOutCallsFailed, mpanlFramerStateTable=mpanlFramerStateTable, mpanlDlciLCoEnd=mpanlDlciLCoEnd, mpanlUnknownDlciFramesFromIf=mpanlUnknownDlciFramesFromIf, mpanlDlciLbRemoteFecnFrm=mpanlDlciLbRemoteFecnFrm, mpanlDlciJvcRowStatus=mpanlDlciJvcRowStatus, mpanlDlciControlStatus=mpanlDlciControlStatus, mpanlSigWindowSize=mpanlSigWindowSize, mpanlSigCallProceedingTimer=mpanlSigCallProceedingTimer, mpanlSigMpanlStateChange=mpanlSigMpanlStateChange, mpanlLmiComponentName=mpanlLmiComponentName, mpanlDlciTransferPriorityToNetwork=mpanlDlciTransferPriorityToNetwork, mpanlVoFrTransmitInformationRate=mpanlVoFrTransmitInformationRate, mpanlFrmToIf=mpanlFrmToIf, mpanlSigMpanlDteCustomerId=mpanlSigMpanlDteCustomerId, mpanlProvTable=mpanlProvTable, mpanlLmiProcedures=mpanlLmiProcedures, mpanlDlciFrmFromIf=mpanlDlciFrmFromIf, mpanlDlciErrorShortFrmFromIf=mpanlDlciErrorShortFrmFromIf, mpanlDlciVc=mpanlDlciVc, mpanlVoFrProtocolViolationsFromIf=mpanlVoFrProtocolViolationsFromIf, mpanlDlciLCoCalledDna=mpanlDlciLCoCalledDna, mpanlDlciVcOoSeqPktCntExceeded=mpanlDlciVcOoSeqPktCntExceeded, mpanlDlciVcPeakOoSeqByteCount=mpanlDlciVcPeakOoSeqByteCount, mpanlFrMuxSetupPvcSetupDlciName=mpanlFrMuxSetupPvcSetupDlciName, mpanlSigStatsTable=mpanlSigStatsTable, mpanlPrefixDnaNumberingPlanIndicatorIndex=mpanlPrefixDnaNumberingPlanIndicatorIndex, mpanlDlciLbLocalTotalFrm=mpanlDlciLbLocalTotalFrm, mpanlDlciVcStorageType=mpanlDlciVcStorageType, mpanlFramerStatsEntry=mpanlFramerStatsEntry, mpanlFramerStatsTable=mpanlFramerStatsTable, mpanlDlciIndex=mpanlDlciIndex, mpanlSigRowStatusTable=mpanlSigRowStatusTable, mpanlLmiIndex=mpanlLmiIndex, mpanlDlciVcPreviousDiagnosticCode=mpanlDlciVcPreviousDiagnosticCode, mpanlControlStatus=mpanlControlStatus, mpanlIsdnStorageType=mpanlIsdnStorageType, mpanlIsdnBChannelState=mpanlIsdnBChannelState, mpanlSigMpanlSap0CommandsRx=mpanlSigMpanlSap0CommandsRx, mpanlSnmpOperStatus=mpanlSnmpOperStatus, mpanlFramerUtilEntry=mpanlFramerUtilEntry, mpanlDlciLbLocalDeBytes=mpanlDlciLbLocalDeBytes, mpanlFramerRowStatusTable=mpanlFramerRowStatusTable, mpanlDlci=mpanlDlci, mpanlNumberOfEmissionQs=mpanlNumberOfEmissionQs, mpanlDnaRowStatusTable=mpanlDnaRowStatusTable, mpanlDlciProceduralStatus=mpanlDlciProceduralStatus, mpanlDlciLCoPathUpDateTime=mpanlDlciLCoPathUpDateTime, mpanlFramerComponentName=mpanlFramerComponentName, mpanlDlciABitReasonFromIf=mpanlDlciABitReasonFromIf, mpanlDlciVcDmepTable=mpanlDlciVcDmepTable, mpanlFrMuxSetupDlciCompName=mpanlFrMuxSetupDlciCompName, mpanlDlciLCoPathFailureAction=mpanlDlciLCoPathFailureAction, mpanlFramerLrcErrors=mpanlFramerLrcErrors, mpanlDlciJvcStorageType=mpanlDlciJvcStorageType, mpanlSigMpanl=mpanlSigMpanl, mpanlDlciIntEntry=mpanlDlciIntEntry, mpanlDlciLCoPathDataTable=mpanlDlciLCoPathDataTable, mpanlSigIndex=mpanlSigIndex, mpanlDlciVcFrmCongestedToSubnet=mpanlDlciVcFrmCongestedToSubnet, mpanlOperStatusTable=mpanlOperStatusTable, mpanlFrMuxSetup=mpanlFrMuxSetup, mpanlStatsTable=mpanlStatsTable, mpanlFramerStorageType=mpanlFramerStorageType, mpanlDnaEgressAccounting=mpanlDnaEgressAccounting, mpanlDlciTransferPriorityFromNetwork=mpanlDlciTransferPriorityFromNetwork, mpanlDlciJvcCallingLcn=mpanlDlciJvcCallingLcn, mpanlRowStatusTable=mpanlRowStatusTable, mpanlFrMuxSetupRowStatusTable=mpanlFrMuxSetupRowStatusTable, mpanlDlciVcSendSequenceNumber=mpanlDlciVcSendSequenceNumber, mpanlSigMpanlAckTimeout=mpanlSigMpanlAckTimeout, mpanlDlciTotalEgressBytes=mpanlDlciTotalEgressBytes, mpanlDlciJvcComponentName=mpanlDlciJvcComponentName, mpanlSigMpanlFrmrReceive=mpanlSigMpanlFrmrReceive, mpanlDlciErrorLongFrmFromIf=mpanlDlciErrorLongFrmFromIf, mpanlSigSysParmsEntry=mpanlSigSysParmsEntry, mpanlDnaRowStatusEntry=mpanlDnaRowStatusEntry, mpanlDlciVcRowStatus=mpanlDlciVcRowStatus, mpanlSigMpanlRowStatusEntry=mpanlSigMpanlRowStatusEntry, mpanlSigMpanlStorageType=mpanlSigMpanlStorageType, mpanlFrmToIfByQueueValue=mpanlFrmToIfByQueueValue, mpanlDlciLoopbackState=mpanlDlciLoopbackState, mpanlEmissionPriorityQsTable=mpanlEmissionPriorityQsTable, mpanlDnaAccountCollection=mpanlDnaAccountCollection, mpanlDlciUnknownStatus=mpanlDlciUnknownStatus, mpanlDlciLCoRowStatusTable=mpanlDlciLCoRowStatusTable, mpanlCustomerIdentifier=mpanlCustomerIdentifier, mpanlSigProtocolErrors=mpanlSigProtocolErrors, mpanlSigOutCalls=mpanlSigOutCalls, mpanlDlciBciToSubnet=mpanlDlciBciToSubnet, mpaNetworkLinkCapabilitiesBE=mpaNetworkLinkCapabilitiesBE, mpanlIsdnRowStatusEntry=mpanlIsdnRowStatusEntry, mpanlStorageType=mpanlStorageType, mpanlSigAckTimer=mpanlSigAckTimer, mpanlDlciLCoBytesFromNetwork=mpanlDlciLCoBytesFromNetwork, mpanlDnaOutgoingOptionsEntry=mpanlDnaOutgoingOptionsEntry, mpanlSigDisconnectTimer=mpanlSigDisconnectTimer, mpanlSigCurrentQueueSize=mpanlSigCurrentQueueSize, mpanlDlciJvcStatEntry=mpanlDlciJvcStatEntry, mpanlFramerUtilTable=mpanlFramerUtilTable, mpanlIsdnIndex=mpanlIsdnIndex, mpanlFrMuxSetupPvcSetupComponentName=mpanlFrMuxSetupPvcSetupComponentName, mpanlIfEntryEntry=mpanlIfEntryEntry, mpanlLmiRowStatusTable=mpanlLmiRowStatusTable, mpanlFrMuxSetupPvcSetupProvEntry=mpanlFrMuxSetupPvcSetupProvEntry, mpanlSigMpanlIndex=mpanlSigMpanlIndex, mpanlUsageState=mpanlUsageState, mpanlLmiUsageState=mpanlLmiUsageState, mpanlDlciVcCannotForwardToSubnet=mpanlDlciVcCannotForwardToSubnet, mpanlDlciTotalIngressSegFrm=mpanlDlciTotalIngressSegFrm, mpanlSigOperationalState=mpanlSigOperationalState, mpanlFramerLinkEntry=mpanlFramerLinkEntry, mpanlDlciElapsedDifference=mpanlDlciElapsedDifference, mpanlDlciLCoComponentName=mpanlDlciLCoComponentName, mpanlDlciLbLocalFecnFrm=mpanlDlciLbLocalFecnFrm, mpanlFramerLargeFrmErrors=mpanlFramerLargeFrmErrors, mpanlAvailabilityStatus=mpanlAvailabilityStatus, mpanlDlciLCoPermittedTrunkTypes=mpanlDlciLCoPermittedTrunkTypes, mpanlSigStorageType=mpanlSigStorageType, mpanlDlciLCoOptimization=mpanlDlciLCoOptimization, mpanlSigRowStatusEntry=mpanlSigRowStatusEntry, mpanlOctetToIfByQueueIndex=mpanlOctetToIfByQueueIndex, mpanlDlciLCoEmissionPriority=mpanlDlciLCoEmissionPriority, mpanlDlciVcRcosFromNetwork=mpanlDlciVcRcosFromNetwork, mpanlSigLapfSysTable=mpanlSigLapfSysTable, mpanlDlciJvcCalledLcn=mpanlDlciJvcCalledLcn, mpanlDlciLCoIndex=mpanlDlciLCoIndex, mpanlDlciDiscardedBytes=mpanlDlciDiscardedBytes, mpanlSigComponentName=mpanlSigComponentName, mpanlInvalidHeaderFramesFromIf=mpanlInvalidHeaderFramesFromIf, mpanlPrefixDnaRowStatusEntry=mpanlPrefixDnaRowStatusEntry, mpanlLmiParmsTable=mpanlLmiParmsTable, mpanlSigLastStateInStatusMsgReceived=mpanlSigLastStateInStatusMsgReceived, mpanlDlciDiscDeCongestedFromIf=mpanlDlciDiscDeCongestedFromIf, mpanlDlciLCoPktsFromNetwork=mpanlDlciLCoPktsFromNetwork, mpanlSigUsageState=mpanlSigUsageState, mpanlDlciJvcPacketsDiscarded=mpanlDlciJvcPacketsDiscarded, mpanlIsdnAddressSignalling=mpanlIsdnAddressSignalling, mpanlDlciVcStartTime=mpanlDlciVcStartTime, mpaNetworkLinkCapabilities=mpaNetworkLinkCapabilities, mpanlFramerUnderruns=mpanlFramerUnderruns, mpanlRoundTripDelay=mpanlRoundTripDelay, mpanlDlciFrmToIf=mpanlDlciFrmToIf, mpanlDnaAccountClass=mpanlDnaAccountClass, mpanlDlciVcRcosToNetwork=mpanlDlciVcRcosToNetwork, mpanlDlciVcOutOfRangeFrmFromSubnet=mpanlDlciVcOutOfRangeFrmFromSubnet, mpanlDlciBytesFromIf=mpanlDlciBytesFromIf, mpanlSigMpanlRowStatus=mpanlSigMpanlRowStatus, mpanlSigIdleProbeTimer=mpanlSigIdleProbeTimer, mpanlFrMuxSetupPvcSetupRowStatusEntry=mpanlFrMuxSetupPvcSetupRowStatusEntry, mpanlDlciJvcOperEntry=mpanlDlciJvcOperEntry, mpanlDlciStateTable=mpanlDlciStateTable, mpanlDlciLCoRowStatus=mpanlDlciLCoRowStatus, mpanlSigMpanlCurrentState=mpanlSigMpanlCurrentState, mpanlFrMuxSetupPvcSetupIndex=mpanlFrMuxSetupPvcSetupIndex, mpanlDlciStateEntry=mpanlDlciStateEntry, mpanlDlciFciFromSubnet=mpanlDlciFciFromSubnet, mpanlDlciVcDiagnosticCode=mpanlDlciVcDiagnosticCode, mpanlFrmToIfByQueueEntry=mpanlFrmToIfByQueueEntry, mpanlDlciErrorLongBytesFromIf=mpanlDlciErrorLongBytesFromIf, mpanlIsdnComponentName=mpanlIsdnComponentName, mpanlSigQualityOfServiceNotAvailable=mpanlSigQualityOfServiceNotAvailable, mpanlIsdnLastUsedCgpn=mpanlIsdnLastUsedCgpn, mpanlDlciVcSegmentsRx=mpanlDlciVcSegmentsRx, mpanlDlciVcFrmLossTimeouts=mpanlDlciVcFrmLossTimeouts, mpanlDlciVcNotDataXferFromSubnet=mpanlDlciVcNotDataXferFromSubnet, mpanlSigMpanlSapXCommandsRx=mpanlSigMpanlSapXCommandsRx, mpanlDlciVcPreviousState=mpanlDlciVcPreviousState, mpanlDlciLCoBytesToNetwork=mpanlDlciLCoBytesToNetwork, mpanlDlciLCoPathEntry=mpanlDlciLCoPathEntry, mpanlDlciLbRowStatus=mpanlDlciLbRowStatus, mpanlDlciCommittedBurstSize=mpanlDlciCommittedBurstSize, mpanlSigMpanlSap0CommandsTx=mpanlSigMpanlSap0CommandsTx, mpanlDlciStartTime=mpanlDlciStartTime, mpanlTrafficStatsEntry=mpanlTrafficStatsEntry, mpanlIfAdminStatus=mpanlIfAdminStatus, mpanlDlciLCoState=mpanlDlciLCoState, mpanlFramerAborts=mpanlFramerAborts, mpanlSigReceiveRejectFrame=mpanlSigReceiveRejectFrame, mpanlFramerFrmModeErrors=mpanlFramerFrmModeErrors, mpanlDlciMaximumFrameSize=mpanlDlciMaximumFrameSize, mpaNetworkLinkGroup=mpaNetworkLinkGroup, mpanlDlciLbRowStatusTable=mpanlDlciLbRowStatusTable, mpanlDlciJvcCurrentState=mpanlDlciJvcCurrentState, mpanlSigMpanlLapfStatsTable=mpanlSigMpanlLapfStatsTable, mpanlStateEntry=mpanlStateEntry, mpanlDlciTransferPriToNwk=mpanlDlciTransferPriToNwk, mpanlDlciJvcProtocolErrors=mpanlDlciJvcProtocolErrors, mpanlDlciABitStatusToIf=mpanlDlciABitStatusToIf, mpanlVoFrStatsEntry=mpanlVoFrStatsEntry, mpaNetworkLinkCapabilitiesBE01=mpaNetworkLinkCapabilitiesBE01, mpanlOctetToIfByQueueEntry=mpanlOctetToIfByQueueEntry, mpanlDlciVcOoSeqByteCntExceeded=mpanlDlciVcOoSeqByteCntExceeded, mpanlDlciEmissionPriorityToIf=mpanlDlciEmissionPriorityToIf, mpanlDlciVcIndex=mpanlDlciVcIndex, mpanlPrefixDna=mpanlPrefixDna, mpanlDlciAbitTable=mpanlDlciAbitTable, mpanlDlciLbRemoteBecnFrm=mpanlDlciLbRemoteBecnFrm, mpanlDlciLCoRequiredTxBandwidth=mpanlDlciLCoRequiredTxBandwidth, mpanlDlciLCoHoldingPriority=mpanlDlciLCoHoldingPriority, mpanlDlciDeFrmToIf=mpanlDlciDeFrmToIf, mpanlSigLapfSysEntry=mpanlSigLapfSysEntry, mpanlLmiRowStatusEntry=mpanlLmiRowStatusEntry, mpanlDlciLbComponentName=mpanlDlciLbComponentName, mpanlFramerProvTable=mpanlFramerProvTable, mpanlDlciJvcCallingNpi=mpanlDlciJvcCallingNpi, mpanlDlciStatsTable=mpanlDlciStatsTable, mpanlDlciLCoCallDataTable=mpanlDlciLCoCallDataTable, mpanlVoFrOperTable=mpanlVoFrOperTable, mpanlDlciLCoPathValue=mpanlDlciLCoPathValue, mpanlDlciDiscFrameAbit=mpanlDlciDiscFrameAbit, mpanlSigSvcaccTable=mpanlSigSvcaccTable, mpanlDlciEirEgressBytes=mpanlDlciEirEgressBytes, mpanlDlciVcDataPath=mpanlDlciVcDataPath, mpanlSigAckDelayTimer=mpanlSigAckDelayTimer, mpanlDlciJvcCalledAddress=mpanlDlciJvcCalledAddress, mpanlOctetFromIf=mpanlOctetFromIf, mpanlSigLastDlciWithMsgBlockCongestion=mpanlSigLastDlciWithMsgBlockCongestion, mpanlDlciQ933CallState=mpanlDlciQ933CallState, mpanlCidDataEntry=mpanlCidDataEntry, mpanlDlciAlarmStatus=mpanlDlciAlarmStatus, mpanlDlciDiscCongestedFromIfBytes=mpanlDlciDiscCongestedFromIfBytes, mpanlDlciLCoStatsEntry=mpanlDlciLCoStatsEntry, mpanlDlciVcPeakOoSeqPktCount=mpanlDlciVcPeakOoSeqPktCount, mpanlDlciLCoElapsedTimeTillNow=mpanlDlciLCoElapsedTimeTillNow, mpanlFramerNormPrioLinkUtilFromIf=mpanlFramerNormPrioLinkUtilFromIf, mpanlDlciVcSegmentsSent=mpanlDlciVcSegmentsSent, mpanlDlciVcFrdTable=mpanlDlciVcFrdTable, mpanlSigLastTimeMsgBlockCongested=mpanlSigLastTimeMsgBlockCongested, mpanlOperEntry=mpanlOperEntry, mpanlPrefixDnaComponentName=mpanlPrefixDnaComponentName, mpanlDlciLCoCallingNpi=mpanlDlciLCoCallingNpi, mpanlFrMuxSetupPvcSetup=mpanlFrMuxSetupPvcSetup, mpanlDlciBciFromSubnet=mpanlDlciBciFromSubnet, mpanlSigMpanlProfileEntry=mpanlSigMpanlProfileEntry, mpanlDnaDefaultTransferPriority=mpanlDnaDefaultTransferPriority, mpanlDlciLCoPathDataEntry=mpanlDlciLCoPathDataEntry, mpanlDlciLCoBumpPreference=mpanlDlciLCoBumpPreference, mpanlDlciJvcCalledNpi=mpanlDlciJvcCalledNpi)
mibBuilder.exportSymbols("Nortel-Magellan-Passport-MpaNetworkLinkMIB", mpanlDnaIndex=mpanlDnaIndex, mpanlDlciExcessBytesFromIf=mpanlDlciExcessBytesFromIf, mpanlDlciLbStorageType=mpanlDlciLbStorageType, mpanlDlciTotalEgressSegFrm=mpanlDlciTotalEgressSegFrm, mpanlFrMuxSetupCommittedInformationRate=mpanlFrMuxSetupCommittedInformationRate, mpanlFramerProvEntry=mpanlFramerProvEntry, mpanlDlciLCo=mpanlDlciLCo, mpanlDnaCallOptionsEntry=mpanlDnaCallOptionsEntry, mpanlSigMpanlStateTable=mpanlSigMpanlStateTable, mpanlFramer=mpanlFramer, mpanlDlciTransferPriFromNwk=mpanlDlciTransferPriFromNwk, mpanlSigLapfStatusEntry=mpanlSigLapfStatusEntry, mpanlDlciVcRowStatusEntry=mpanlDlciVcRowStatusEntry, mpanlSigInCalls=mpanlSigInCalls, mpanlIsdnProvEntry=mpanlIsdnProvEntry, mpanlFramerAdminState=mpanlFramerAdminState, mpanlFramerCrcErrors=mpanlFramerCrcErrors, mpanlDnaServiceExchange=mpanlDnaServiceExchange, mpanlFramerStateEntry=mpanlFramerStateEntry, mpanlDlciVcPathReliability=mpanlDlciVcPathReliability, mpanlDlciVcSubnetRecoveries=mpanlDlciVcSubnetRecoveries, mpanlSigIFramesRcvdDiscarded=mpanlSigIFramesRcvdDiscarded, mpanlSigInCallsRefused=mpanlSigInCallsRefused, mpaNetworkLinkCapabilitiesBE01A=mpaNetworkLinkCapabilitiesBE01A, mpanlLmiRowStatus=mpanlLmiRowStatus, mpanlSigMpanlProtocolErrors=mpanlSigMpanlProtocolErrors, mpanlDlciJvcPacketsToSubnet=mpanlDlciJvcPacketsToSubnet, mpanlRowStatus=mpanlRowStatus, mpanlSigLastQ933StateReceivedStatus=mpanlSigLastQ933StateReceivedStatus, mpanlSigMpanlLapfStatusEntry=mpanlSigMpanlLapfStatusEntry, mpanlDlciLbStatsTable=mpanlDlciLbStatsTable, mpaNetworkLinkGroupBE01A=mpaNetworkLinkGroupBE01A, mpanlFrMuxSetupPvcSetupStorageType=mpanlFrMuxSetupPvcSetupStorageType, mpanlDlciVcDmepEntry=mpanlDlciVcDmepEntry, mpanlIsdnRowStatus=mpanlIsdnRowStatus, mpanlFramerFlagsBetweenFrames=mpanlFramerFlagsBetweenFrames, mpanlDlciStatsEntry=mpanlDlciStatsEntry, mpanlSigMpanlProfileTable=mpanlSigMpanlProfileTable, mpanlDlciFecnFrmFromIf=mpanlDlciFecnFrmFromIf, mpanlRowStatusEntry=mpanlRowStatusEntry, mpanlFramerNonOctetErrors=mpanlFramerNonOctetErrors, mpanlVoFrRowStatusEntry=mpanlVoFrRowStatusEntry, mpanlDlciVcPriority=mpanlDlciVcPriority, mpanlDlciDiscardedSegFrm=mpanlDlciDiscardedSegFrm, mpanlFrMuxSetupIndex=mpanlFrMuxSetupIndex, mpanlSigLapfStatsTable=mpanlSigLapfStatsTable, mpanlDlciLCoCallingDna=mpanlDlciLCoCallingDna, mpaNetworkLinkGroupBE01=mpaNetworkLinkGroupBE01, mpanlSigIFramesTxDiscarded=mpanlSigIFramesTxDiscarded, mpanlSigLastDlciReceivedStatus=mpanlSigLastDlciReceivedStatus, mpanlSigCallSetupTimer=mpanlSigCallSetupTimer, mpanlSigMpanlComponentName=mpanlSigMpanlComponentName, mpanlDlciVcCadTable=mpanlDlciVcCadTable, mpanlOctetToIfByQueueValue=mpanlOctetToIfByQueueValue, mpanlIsdnOperTable=mpanlIsdnOperTable, mpanlDlciVcPeakOoSeqFrmForwarded=mpanlDlciVcPeakOoSeqFrmForwarded, mpanlDlciEirIngressBytes=mpanlDlciEirIngressBytes, mpanlDlciDiscCongestedToIf=mpanlDlciDiscCongestedToIf, mpanlDnaOutgoingOptionsTable=mpanlDnaOutgoingOptionsTable, mpanlDlciLbRowStatusEntry=mpanlDlciLbRowStatusEntry, mpanlDlciLCoPathTable=mpanlDlciLCoPathTable, mpanlDnaComponentName=mpanlDnaComponentName, mpanlVoFrOperEntry=mpanlVoFrOperEntry, mpanlFrMuxSetupOpEntry=mpanlFrMuxSetupOpEntry, mpanlDlciDiscDeCongestedToIfBytes=mpanlDlciDiscDeCongestedToIfBytes, mpanlSigSysParmsTable=mpanlSigSysParmsTable, mpanlDlciLCoSetupPriority=mpanlDlciLCoSetupPriority, mpanlDlciLCoCostMetric=mpanlDlciLCoCostMetric, mpanlDlciLCoDiscardPriority=mpanlDlciLCoDiscardPriority, mpanlLastUnknownDlci=mpanlLastUnknownDlci, mpanlDlciDiscCongestedToIfBytes=mpanlDlciDiscCongestedToIfBytes, mpanlDnaStorageType=mpanlDnaStorageType, mpanlSigIFramesTransmitted=mpanlSigIFramesTransmitted, mpanlAlarmStatus=mpanlAlarmStatus, mpanlDlciRowStatusTable=mpanlDlciRowStatusTable, mpanlFramerOverruns=mpanlFramerOverruns, mpanlDlciVcCalledDna=mpanlDlciVcCalledDna, mpanlUnknownStatus=mpanlUnknownStatus, mpanlFramerUsageState=mpanlFramerUsageState, mpanlDlciJvcPreviousState=mpanlDlciJvcPreviousState, mpanlDlciBecnFrmToIf=mpanlDlciBecnFrmToIf, mpanlFrMuxSetupOpTable=mpanlFrMuxSetupOpTable, mpanlDlciLbRemoteTotalBytes=mpanlDlciLbRemoteTotalBytes, mpanlDlciAbitEntry=mpanlDlciAbitEntry, mpanlFrMuxSetupRowStatusEntry=mpanlFrMuxSetupRowStatusEntry, mpanlDlciTotalIngressBytes=mpanlDlciTotalIngressBytes, mpanlDlciLCoRetryCount=mpanlDlciLCoRetryCount, mpanlOctetToIf=mpanlOctetToIf, mpanlDlciLCoRequiredRxBandwidth=mpanlDlciLCoRequiredRxBandwidth, mpanlDlciStorageType=mpanlDlciStorageType, mpanlSigLapfStatusTable=mpanlSigLapfStatusTable, mpanlLmi=mpanlLmi, mpanlOperTable=mpanlOperTable, mpanlDlciVcCallReferenceNumber=mpanlDlciVcCallReferenceNumber, mpanlVoFrRowStatus=mpanlVoFrRowStatus, mpanlSigMpanlLastStateChangeReason=mpanlSigMpanlLastStateChangeReason, mpanlDlciDeBytesFromIf=mpanlDlciDeBytesFromIf, mpanlDlciLCoCallReferenceNumber=mpanlDlciLCoCallReferenceNumber, mpanlSigNetworkType=mpanlSigNetworkType, mpanlCidDataTable=mpanlCidDataTable, mpanlSigMpanlIFramesRcvdDiscarded=mpanlSigMpanlIFramesRcvdDiscarded, mpanlDlciLbRemoteDeFrm=mpanlDlciLbRemoteDeFrm, mpanlIndex=mpanlIndex, mpanlFramerFrmToIf=mpanlFramerFrmToIf, mpanlSigMpanlReceiveRejectFrame=mpanlSigMpanlReceiveRejectFrame, mpanlDna=mpanlDna, mpanlDlciLbStatsEntry=mpanlDlciLbStatsEntry, mpanlSigMpanlOperationalState=mpanlSigMpanlOperationalState, mpanlDlciComponentName=mpanlDlciComponentName, mpanlDlciDiscDeCongestedFromIfBytes=mpanlDlciDiscDeCongestedFromIfBytes, mpanlFrMuxSetupStorageType=mpanlFrMuxSetupStorageType, mpanlDlciVcCalledNpi=mpanlDlciVcCalledNpi, mpanlIsdnBChanIntState=mpanlIsdnBChanIntState, mpanlDlciQ933CallReference=mpanlDlciQ933CallReference, mpanlDlciVcIntdEntry=mpanlDlciVcIntdEntry, mpanlFrMuxSetupPvcSetupRowStatus=mpanlFrMuxSetupPvcSetupRowStatus, mpanlDlciVcDmepValue=mpanlDlciVcDmepValue, mpanlLmiStorageType=mpanlLmiStorageType, mpanlFramerIndex=mpanlFramerIndex, mpanlDlciSpOpEntry=mpanlDlciSpOpEntry, mpanlOperationalState=mpanlOperationalState, mpanlSigRetransmitLimit=mpanlSigRetransmitLimit, mpanlLmiAdminState=mpanlLmiAdminState, mpanlDlciLbRemoteTotalFrm=mpanlDlciLbRemoteTotalFrm, mpanlDlciJvcOperTable=mpanlDlciJvcOperTable, mpanlIsdnProvTable=mpanlIsdnProvTable, mpanlDlciVcSegmentSize=mpanlDlciVcSegmentSize, mpanlDlciStandbyStatus=mpanlDlciStandbyStatus, mpanlOperStatusEntry=mpanlOperStatusEntry, mpanlDlciVcEmissionPriorityToNetwork=mpanlDlciVcEmissionPriorityToNetwork, mpanlVoFrStorageType=mpanlVoFrStorageType, mpanlTrafficStatsTable=mpanlTrafficStatsTable, mpanlSigLastStateChangeReason=mpanlSigLastStateChangeReason, mpanlDlciLCoRoundTripDelay=mpanlDlciLCoRoundTripDelay, mpanlDlciLbLocalBecnFrm=mpanlDlciLbLocalBecnFrm, mpanlFrMuxSetupRowStatus=mpanlFrMuxSetupRowStatus, mpanlDlciJvcStatTable=mpanlDlciJvcStatTable, mpanlDlciJvcRowStatusTable=mpanlDlciJvcRowStatusTable, mpaNetworkLinkGroupBE=mpaNetworkLinkGroupBE, mpanlDlciVcElapsedTimeTillNow=mpanlDlciVcElapsedTimeTillNow, mpanlEmissionPriorityQsEntry=mpanlEmissionPriorityQsEntry, mpanlDlciJvcRowStatusEntry=mpanlDlciJvcRowStatusEntry, mpanlPrefixDnaDataNetworkAddressIndex=mpanlPrefixDnaDataNetworkAddressIndex, mpanlDlciLCoRequiredTrafficType=mpanlDlciLCoRequiredTrafficType, mpanlVoFrStatsTable=mpanlVoFrStatsTable, mpanlDlciDiscDeCongestedToIf=mpanlDlciDiscDeCongestedToIf, mpanlDlciLCoRequiredCustomerParameter=mpanlDlciLCoRequiredCustomerParameter, mpanlDlciJvcIndex=mpanlDlciJvcIndex, mpanlPrefixDnaRowStatusTable=mpanlPrefixDnaRowStatusTable, mpanlDlciABitReasonToIf=mpanlDlciABitReasonToIf, mpanlIsdn=mpanlIsdn, mpanlFramerNormPrioLinkUtilToIf=mpanlFramerNormPrioLinkUtilToIf, mpanlVoFrFragmentedHighestPriorityFrames=mpanlVoFrFragmentedHighestPriorityFrames, mpanlSigRemoteBusy=mpanlSigRemoteBusy, mpanlDlciDiscByteAbit=mpanlDlciDiscByteAbit, mpanlDlciVcAccountingEnabled=mpanlDlciVcAccountingEnabled, mpanlDlciBytesToIf=mpanlDlciBytesToIf, mpanlSigMpanlRemoteBusy=mpanlSigMpanlRemoteBusy, mpanlLmiOperationalState=mpanlLmiOperationalState, mpanlFrMuxSetupComponentName=mpanlFrMuxSetupComponentName, mpanlSigCurrentNumberOfSvcCalls=mpanlSigCurrentNumberOfSvcCalls, mpanlIsdnRowStatusTable=mpanlIsdnRowStatusTable, mpanlDlciVcCadEntry=mpanlDlciVcCadEntry, mpanlSigStatsEntry=mpanlSigStatsEntry, mpanlSigAckTimeout=mpanlSigAckTimeout, mpanlFramerRowStatus=mpanlFramerRowStatus, mpanlDlciVcPeakRetryQueueSize=mpanlDlciVcPeakRetryQueueSize, mpanlSigMpanlLapfStatsEntry=mpanlSigMpanlLapfStatsEntry, mpanlSigMpanlIFramesTransmitted=mpanlSigMpanlIFramesTransmitted, mpanlDlciCalldEntry=mpanlDlciCalldEntry, mpanlDlciExcessFrmFromIf=mpanlDlciExcessFrmFromIf, mpanlDnaCallOptionsTable=mpanlDnaCallOptionsTable, mpanlDlciVcCalledLcn=mpanlDlciVcCalledLcn, mpanlDlciEirEgressSegFrm=mpanlDlciEirEgressSegFrm, mpanlDlciLbRemoteDeBytes=mpanlDlciLbRemoteDeBytes, mpanlDlciLCoReasonForNoRoute=mpanlDlciLCoReasonForNoRoute, mpanlStandbyStatus=mpanlStandbyStatus, mpanlSigSvcaccEntry=mpanlSigSvcaccEntry, mpanlVoFr=mpanlVoFr, mpanlSigAdminState=mpanlSigAdminState, mpanlSigMpanlStatsEntry=mpanlSigMpanlStatsEntry, mpanlDlciVcNotDataXferToSubnet=mpanlDlciVcNotDataXferToSubnet, mpanlIsdnActiveVirtualCircuitsCount=mpanlIsdnActiveVirtualCircuitsCount, mpanlSigRowStatus=mpanlSigRowStatus, mpanlStateTable=mpanlStateTable, mpanlSigSetupTimeout=mpanlSigSetupTimeout, mpanlSigMpanlCurrentQueueSize=mpanlSigMpanlCurrentQueueSize, mpanlFramerOctetFromIf=mpanlFramerOctetFromIf, mpanlSigMpanlIFramesTxDiscarded=mpanlSigMpanlIFramesTxDiscarded, mpanlDlciLCoPathFailureCount=mpanlDlciLCoPathFailureCount, mpanlDlciSpOpTable=mpanlDlciSpOpTable, mpanlDlciLCoLastTearDownReason=mpanlDlciLCoLastTearDownReason, mpanlStatsEntry=mpanlStatsEntry, mpanlProceduralStatus=mpanlProceduralStatus, mpanlIfEntryTable=mpanlIfEntryTable, mpanlDlciLbIndex=mpanlDlciLbIndex, mpanlDlciExcessBurstSize=mpanlDlciExcessBurstSize, mpanlDlciVcCallingLcn=mpanlDlciVcCallingLcn, mpanlLmiStateEntry=mpanlLmiStateEntry, mpanlDlciLbLocalTotalBytes=mpanlDlciLbLocalTotalBytes, mpanlSigCurrentState=mpanlSigCurrentState, mpanlSigStateChange=mpanlSigStateChange, mpanlDlciLb=mpanlDlciLb, mpanlDlciLCoDelayMetric=mpanlDlciLCoDelayMetric, mpanlSig=mpanlSig, mpanlLmiParmsEntry=mpanlLmiParmsEntry, mpanlSigMpanlLapfStatusTable=mpanlSigMpanlLapfStatusTable, mpanlFrmFromIf=mpanlFrmFromIf, mpanlDlciBecnFrmFromIf=mpanlDlciBecnFrmFromIf, mpanlSigMpanlRowStatusTable=mpanlSigMpanlRowStatusTable, mpanlSigDefaultAccounting=mpanlSigDefaultAccounting, mpanlDlciDiscCongestedFromIf=mpanlDlciDiscCongestedFromIf, mpanlDlciVcCallingNpi=mpanlDlciVcCallingNpi, mpanlComponentName=mpanlComponentName, mpanlDnaRowStatus=mpanlDnaRowStatus, mpanlDlciLCoStorageType=mpanlDlciLCoStorageType, mpanlDlciVcState=mpanlDlciVcState, mpanlFramerInterfaceName=mpanlFramerInterfaceName, mpanlSigMpanlDteComponentName=mpanlSigMpanlDteComponentName, mpaNetworkLinkMIB=mpaNetworkLinkMIB, mpanlSigStateTable=mpanlSigStateTable, mpanlDlciVcRowStatusTable=mpanlDlciVcRowStatusTable, mpanlVoFrLostFragmentsFromIf=mpanlVoFrLostFragmentsFromIf, mpanlDlciAdminState=mpanlDlciAdminState, mpanlFramerRowStatusEntry=mpanlFramerRowStatusEntry, mpanlDlciVcAccountingEnd=mpanlDlciVcAccountingEnd, mpanlDlciLCoCalledNpi=mpanlDlciLCoCalledNpi, mpanlFramerOperationalState=mpanlFramerOperationalState, mpanlIsdnOperEntry=mpanlIsdnOperEntry, mpanlSigIFramesReceived=mpanlSigIFramesReceived, mpanlIfIndex=mpanlIfIndex, mpanlDlciVcCallingDna=mpanlDlciVcCallingDna, mpanlSigLapfStatsEntry=mpanlSigLapfStatsEntry, mpanlSigReleaseTimer=mpanlSigReleaseTimer, mpanlSigMpanlStateEntry=mpanlSigMpanlStateEntry, mpanlCommentText=mpanlCommentText, mpanlDlciRowStatusEntry=mpanlDlciRowStatusEntry, mpanlDlciOperationalState=mpanlDlciOperationalState, mpanlDlciVcFrdEntry=mpanlDlciVcFrdEntry, mpanlPrefixDnaRowStatus=mpanlPrefixDnaRowStatus, mpanlDlciJvcCallingAddress=mpanlDlciJvcCallingAddress, mpanlDlciFecnFrmToIf=mpanlDlciFecnFrmToIf, mpanlIsdnT320=mpanlIsdnT320, mpanlDlciEirIngressSegFrm=mpanlDlciEirIngressSegFrm, mpanlOctetToIfByQueueTable=mpanlOctetToIfByQueueTable, mpanlDlciDeBytesToIf=mpanlDlciDeBytesToIf, mpanlDlciVcComponentName=mpanlDlciVcComponentName, mpanl=mpanl, mpanlDlciVcFastSelectCall=mpanlDlciVcFastSelectCall, mpanlIsdnDataSigChan=mpanlIsdnDataSigChan, mpanlDlciCalldTable=mpanlDlciCalldTable, mpanlSigMpanlStatsTable=mpanlSigMpanlStatsTable, mpanlFrmToIfByQueueTable=mpanlFrmToIfByQueueTable, mpanlSigMpanlHighestDlci=mpanlSigMpanlHighestDlci)
mibBuilder.exportSymbols("Nortel-Magellan-Passport-MpaNetworkLinkMIB", mpanlLmiStateTable=mpanlLmiStateTable, mpanlDlciLCoRequiredSecurity=mpanlDlciLCoRequiredSecurity, mpanlVoFrMaximumFrameSize=mpanlVoFrMaximumFrameSize, mpanlDlciLCoRowStatusEntry=mpanlDlciLCoRowStatusEntry, mpanlFramerLinkTable=mpanlFramerLinkTable, mpanlDlciIntTable=mpanlDlciIntTable, mpanlDlciLCoPktsToNetwork=mpanlDlciLCoPktsToNetwork, mpanlDlciDiscExcessFromIf=mpanlDlciDiscExcessFromIf, mpanlDlciVcType=mpanlDlciVcType, mpanlDlciRowStatus=mpanlDlciRowStatus, mpanlDlciVcMaxSubnetPktSize=mpanlDlciVcMaxSubnetPktSize, mpanlSigMpanlUsageState=mpanlSigMpanlUsageState, mpanlDlciDiscExcessFromIfBytes=mpanlDlciDiscExcessFromIfBytes, mpanlFramerFrmFromIf=mpanlFramerFrmFromIf, mpanlDlciAccounting=mpanlDlciAccounting, mpanlDlciCallReferenceNumber=mpanlDlciCallReferenceNumber, mpanlDlciBecnFrmSetByService=mpanlDlciBecnFrmSetByService, mpanlDlciVcEmissionPriorityFromNetwork=mpanlDlciVcEmissionPriorityFromNetwork, mpanlDlciVcPktRetryTimeouts=mpanlDlciVcPktRetryTimeouts, mpanlSigFrmrReceive=mpanlSigFrmrReceive, mpanlPrefixDnaStorageType=mpanlPrefixDnaStorageType, mpanlFrMuxSetupPvcSetupProvTable=mpanlFrMuxSetupPvcSetupProvTable, mpanlDlciUsageState=mpanlDlciUsageState, mpanlFrMuxSetupPvcSetupRowStatusTable=mpanlFrMuxSetupPvcSetupRowStatusTable, mpanlDlciLCoStatsTable=mpanlDlciLCoStatsTable, mpanlSigMpanlSapXCommandsTx=mpanlSigMpanlSapXCommandsTx, mpanlDlciVcIntdTable=mpanlDlciVcIntdTable, mpanlDlciABitStatusFromIf=mpanlDlciABitStatusFromIf, mpanlDlciLbLocalDeFrm=mpanlDlciLbLocalDeFrm, mpanlDlciJvc=mpanlDlciJvc, mpanlSigLastCauseInStatusMsgReceived=mpanlSigLastCauseInStatusMsgReceived, mpanlDlciVcDuplicatesFromSubnet=mpanlDlciVcDuplicatesFromSubnet)
| 155.986047 | 12,626 | 0.76788 |
353b0c1e82c943061d93ecba5497a39b4509004d | 1,452 | py | Python | tools/com/formatting.py | AnthonyEdvalson/Machina | fefb058591dd7b62817c75277d5ca0eb6dbd8c3a | [
"MIT"
] | null | null | null | tools/com/formatting.py | AnthonyEdvalson/Machina | fefb058591dd7b62817c75277d5ca0eb6dbd8c3a | [
"MIT"
] | null | null | null | tools/com/formatting.py | AnthonyEdvalson/Machina | fefb058591dd7b62817c75277d5ca0eb6dbd8c3a | [
"MIT"
] | null | null | null | import datetime
import json
from enum import Enum
from tools.config.config import Config
def serialize(data, format):
assert format in formats
return formats[format].serialize(data)
def deserialize(data, format):
assert type(data) is bytes
return formats[format].deserialize(data)
class LooseJsonEncoder(json.JSONEncoder):
def default(self, o):
t = type(o)
if isinstance(o, Enum):
return o.value
if t == datetime.timedelta:
return o.total_seconds()
if t == datetime.datetime:
return o.timestamp()
if t == Config:
return o.as_dict()
return dict(filter(lambda a: a[0][0] != "_", o.__dict__.items()))
class Json:
def __init__(self):
self.encoder = LooseJsonEncoder()
def serialize(self, data):
return self.encoder.encode(data).encode()
def deserialize(self, data):
return json.loads(data.decode())
class Text:
def serialize(self, data):
return str(data).encode()
def deserialize(self, data):
return data.decode()
class Bytes:
def serialize(self, data):
assert type(data) is bytes
if data is None:
return b''
return data
def deserialize(self, data):
return data
formats = {
"json": Json(),
"text": Text(),
"bytes": Bytes()
}
| 20.742857 | 74 | 0.575758 |
5f874ce6bd3b0b6d10926e48dc55b11589860566 | 22,151 | py | Python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_03_01/aio/operations/_galleries_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | 8 | 2021-01-13T23:44:08.000Z | 2021-03-17T10:13:36.000Z | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_03_01/aio/operations/_galleries_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | null | null | null | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_03_01/aio/operations/_galleries_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class GalleriesOperations:
"""GalleriesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2019_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
gallery_name: str,
gallery: "models.Gallery",
**kwargs
) -> "models.Gallery":
cls = kwargs.pop('cls', None) # type: ClsType["models.Gallery"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'galleryName': self._serialize.url("gallery_name", gallery_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(gallery, 'Gallery')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Gallery', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Gallery', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('Gallery', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
gallery_name: str,
gallery: "models.Gallery",
**kwargs
) -> AsyncLROPoller["models.Gallery"]:
"""Create or update a Shared Image Gallery.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Image Gallery. The allowed characters are alphabets
and numbers with dots and periods allowed in the middle. The maximum length is 80 characters.
:type gallery_name: str
:param gallery: Parameters supplied to the create or update Shared Image Gallery operation.
:type gallery: ~azure.mgmt.compute.v2019_03_01.models.Gallery
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Gallery or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2019_03_01.models.Gallery]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.Gallery"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery=gallery,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Gallery', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}'} # type: ignore
async def get(
self,
resource_group_name: str,
gallery_name: str,
**kwargs
) -> "models.Gallery":
"""Retrieves information about a Shared Image Gallery.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Image Gallery.
:type gallery_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Gallery, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2019_03_01.models.Gallery
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.Gallery"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'galleryName': self._serialize.url("gallery_name", gallery_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Gallery', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
gallery_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'galleryName': self._serialize.url("gallery_name", gallery_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
gallery_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Delete a Shared Image Gallery.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Image Gallery to be deleted.
:type gallery_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["models.GalleryList"]:
"""List galleries under a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either GalleryList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2019_03_01.models.GalleryList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.GalleryList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('GalleryList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries'} # type: ignore
def list(
self,
**kwargs
) -> AsyncIterable["models.GalleryList"]:
"""List galleries under a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either GalleryList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2019_03_01.models.GalleryList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.GalleryList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('GalleryList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/galleries'} # type: ignore
| 47.432548 | 186 | 0.658435 |
03ea3a5960d4c6d4265c24f5daf069f6b0e98cda | 4,747 | py | Python | services/crawler/models/houses.py | Sirius207/HousesAPI | 6f1ef5fcb3fbd70f82b700b84be31daa9c58bda8 | [
"MIT"
] | null | null | null | services/crawler/models/houses.py | Sirius207/HousesAPI | 6f1ef5fcb3fbd70f82b700b84be31daa9c58bda8 | [
"MIT"
] | 6 | 2021-06-05T16:54:41.000Z | 2021-06-22T11:43:50.000Z | services/crawler/models/houses.py | Sirius207/HousesAPI | 6f1ef5fcb3fbd70f82b700b84be31daa9c58bda8 | [
"MIT"
] | 2 | 2021-06-05T16:51:46.000Z | 2021-06-06T05:04:21.000Z | """
Module for houses detailed data parsing
"""
import os
import re
import uuid
from typing import Optional, Tuple
from loguru import logger
from requests_html import HTMLSession
from bs4 import BeautifulSoup
logger.add("house_parse.log", level="DEBUG")
# pylint: disable= R0902
class House:
def __init__(self, url: str, title: str, data):
self.url = url
self.title = title
self.phone = data["linkInfo"]["mobile"]
self.city = data["breadcrumb"][0]["name"].replace("租屋", "市")
self.district = data["breadcrumb"][1]["name"].replace("租屋", "市")
self.house_status = data["breadcrumb"][2]["name"].replace("租屋", "市")
self.lessor, self.lessor_gender, self.lessor_identity = self._get_lessor_info(
data
)
self.sold = None
self.house_type = self._get_house_type(data)
self.gender_requirement = self._get_gender_requirement(data)
self.house_condition = self._get_house_condition(data)
@staticmethod
def _get_lessor_info(data) -> Tuple:
"""[summary]
Args:
html ([type]): [description]
Returns:
Tuple: [description]
"""
lessor_gender: Optional[str] = None
lessor_identity: Optional[str] = None
lessor = data["linkInfo"]["name"]
pattern_after_colon = r":\s*(.*)"
lessor = re.findall(pattern_after_colon, lessor)[0].strip()
lessor_identity = data["linkInfo"]["name"].replace(f": {lessor}", "")
if lessor:
if "先生" in lessor:
lessor_gender = "男"
elif "小姐" in lessor:
lessor_gender = "女"
return lessor, lessor_gender, lessor_identity
@staticmethod
def _get_house_type(data) -> Optional[str]:
"""parse the "型態" value from house page
Args:
html (object): the html object generate by request_html
Returns:
Optional[str]: the "型態" field. e.g. "電梯大樓"
"""
for item in data["infoData"]["data"]:
if item["name"] == "型態":
return item["value"]
return None
@staticmethod
def _get_gender_requirement(data) -> Optional[str]:
"""parse the "性別要求" value from house page
Args:
html ([type]): the html object generate by request_html
Returns:
Optional[str]: the "性別要求" value. e.g. "男女生皆可"
"""
rule = data["service"]["rule"]
if "限男生" in rule:
return "男生"
if "限女生" in rule:
return "女生"
return "男女生皆可"
@staticmethod
def _get_house_condition(data) -> Optional[str]:
"""parse the "屋況說明" value from house page
Args:
html ([type]): the html object generate by request_html
Returns:
Optional[str]: the "屋況說明" value
"""
house_condition = data["remark"]["content"]
soup = BeautifulSoup(house_condition, features="html.parser")
return soup.get_text() if house_condition else None
def to_dict(self) -> dict:
return {
"url": self.url,
"title": self.title,
"city": self.city,
"district": self.district,
"lessor": self.lessor,
"lessor_gender": self.lessor_gender,
"lessor_identity": self.lessor_identity,
"house_type": self.house_type,
"house_status": self.house_status,
"sold": self.sold,
"phone": self.phone,
"gender_requirement": self.gender_requirement,
"house_condition": self.house_condition,
}
# pylint: enable= R0902
def parse_single_house(url, title, proxy=None) -> Optional[dict]:
"""[summary]
Args:
url ([type]): the url of this house
title ([type]): the title of this house
proxy ([type], optional): the proxy IP. Defaults to None.
Returns:
Optional[dict]: the house detailed data
"""
session_arg = {"browser_args": [f"--proxy-server={proxy}"]} if proxy else {}
headers = {
"device": "pc",
"deviceid": str(uuid.uuid4()),
}
house_id = url.replace(os.environ.get("WEB_URL_PREFIX"), "").replace(".html", "")
url = f"{os.environ.get('API_WEB_URL')}/tw/v1/house/rent/detail?id={house_id}&isOnline=1"
res = HTMLSession(**session_arg).get(url, headers=headers)
status = res.status_code
logger.info(f"Parse: {url} {status}")
if status != 200:
logger.error(status, res.text)
return None
try:
return House(url, title, res.json()["data"]).to_dict()
except AttributeError as error:
logger.warning(f"{url}\n{error}")
return None
| 28.945122 | 93 | 0.577839 |
8ae2f789a39c7626073b5de77f488f00c416ba0e | 16,663 | py | Python | python/mxnet/gluon/nn/basic_layers.py | yinscapital/incubator-mxnet | 4c0df6249d03841f5eb30e1428aa25fc230fed30 | [
"Apache-2.0"
] | null | null | null | python/mxnet/gluon/nn/basic_layers.py | yinscapital/incubator-mxnet | 4c0df6249d03841f5eb30e1428aa25fc230fed30 | [
"Apache-2.0"
] | null | null | null | python/mxnet/gluon/nn/basic_layers.py | yinscapital/incubator-mxnet | 4c0df6249d03841f5eb30e1428aa25fc230fed30 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable= arguments-differ
"""Basic neural network layers."""
import warnings
from ..block import Block, HybridBlock
from ..utils import _indent
class Sequential(Block):
"""Stacks `Block`s sequentially.
Example::
net = nn.Sequential()
# use net's name_scope to give child Blocks appropriate names.
with net.name_scope():
net.add(nn.Dense(10, activation='relu'))
net.add(nn.Dense(20))
"""
def __init__(self, prefix=None, params=None):
super(Sequential, self).__init__(prefix=prefix, params=params)
def add(self, *blocks):
"""Adds block on top of the stack."""
for block in blocks:
self.register_child(block)
def forward(self, x):
for block in self._children:
x = block(x)
return x
def __repr__(self):
s = '{name}(\n{modstr}\n)'
modstr = '\n'.join([' ({key}): {block}'.format(key=key,
block=_indent(block.__repr__(), 2))
for key, block in enumerate(self._children)
if isinstance(block, Block)])
return s.format(name=self.__class__.__name__,
modstr=modstr)
def __getitem__(self, key):
return self._children[key]
def __len__(self):
return len(self._children)
def hybridize(self, active=True):
"""Activates or deactivates `HybridBlock`s recursively. Has no effect on
non-hybrid children.
Parameters
----------
active : bool, default True
Whether to turn hybrid on or off.
"""
if self._children and all(isinstance(c, HybridBlock) for c in self._children):
warnings.warn('All children of this Sequential layer are HybridBlocks. Consider ' \
'using HybridSequential for the best performance.')
super(Sequential, self).hybridize(active)
class HybridSequential(HybridBlock):
"""Stacks `HybridBlock`s sequentially.
Example::
net = nn.Sequential()
# use net's name_scope to give child Blocks appropriate names.
with net.name_scope():
net.add(nn.Dense(10, activation='relu'))
net.add(nn.Dense(20))
"""
def __init__(self, prefix=None, params=None):
super(HybridSequential, self).__init__(prefix=prefix, params=params)
def add(self, *blocks):
"""Adds block on top of the stack."""
for block in blocks:
self.register_child(block)
def hybrid_forward(self, F, x):
for block in self._children:
x = block(x)
return x
def __repr__(self):
s = '{name}(\n{modstr}\n)'
modstr = '\n'.join([' ({key}): {block}'.format(key=key,
block=_indent(block.__repr__(), 2))
for key, block in enumerate(self._children)
if isinstance(block, Block)])
return s.format(name=self.__class__.__name__,
modstr=modstr)
def __getitem__(self, key):
return self._children[key]
def __len__(self):
return len(self._children)
class Dense(HybridBlock):
r"""Just your regular densely-connected NN layer.
`Dense` implements the operation:
`output = activation(dot(input, weight) + bias)`
where `activation` is the element-wise activation function
passed as the `activation` argument, `weight` is a weights matrix
created by the layer, and `bias` is a bias vector created by the layer
(only applicable if `use_bias` is `True`).
Note: the input must be a tensor with rank 2. Use `flatten` to convert it
to rank 2 manually if necessary.
Parameters
----------
units : int
Dimensionality of the output space.
activation : str
Activation function to use. See help on `Activation` layer.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias : bool
Whether the layer uses a bias vector.
flatten: bool
Whether the input tensor should be flattened.
If true, all but the first axis of input data are collapsed together.
If false, all but the last axis of input data are kept the same, and the transformation
applies on the last axis.
weight_initializer : str or `Initializer`
Initializer for the `kernel` weights matrix.
bias_initializer: str or `Initializer`
Initializer for the bias vector.
in_units : int, optional
Size of the input data. If not specified, initialization will be
deferred to the first time `forward` is called and `in_units`
will be inferred from the shape of input data.
prefix : str or None
See document of `Block`.
params : ParameterDict or None
See document of `Block`.
If ``flatten`` is set to be True, then the shapes are:
Input shape:
An N-D input with shape
`(batch_size, x1, x2, ..., xn) with x1 * x2 * ... * xn equal to in_units`.
Output shape:
The output would have shape `(batch_size, units)`.
If ``flatten`` is set to be false, then the shapes are:
Input shape:
An N-D input with shape
`(x1, x2, ..., xn, in_units)`.
Output shape:
The output would have shape `(x1, x2, ..., xn, units)`.
"""
def __init__(self, units, activation=None, use_bias=True, flatten=True,
weight_initializer=None, bias_initializer='zeros',
in_units=0, **kwargs):
super(Dense, self).__init__(**kwargs)
self._flatten = flatten
with self.name_scope():
self._units = units
self._in_units = in_units
self.weight = self.params.get('weight', shape=(units, in_units),
init=weight_initializer,
allow_deferred_init=True)
if use_bias:
self.bias = self.params.get('bias', shape=(units,),
init=bias_initializer,
allow_deferred_init=True)
else:
self.bias = None
if activation is not None:
self.act = Activation(activation, prefix=activation+'_')
else:
self.act = None
def hybrid_forward(self, F, x, weight, bias=None):
act = F.FullyConnected(x, weight, bias, no_bias=bias is None, num_hidden=self._units,
flatten=self._flatten, name='fwd')
if self.act is not None:
act = self.act(act)
return act
def __repr__(self):
s = '{name}({layout}, {act})'
return s.format(name=self.__class__.__name__,
act=self.act if self.act else 'linear',
layout='{0} -> {1}'.format(self._in_units, self._units) if self._in_units
else self._units)
class Activation(HybridBlock):
"""Applies an activation function to input.
Parameters
----------
activation : str
Name of activation function to use.
See :func:`~mxnet.ndarray.Activation` for available choices.
Input shape:
Arbitrary.
Output shape:
Same shape as input.
"""
def __init__(self, activation, **kwargs):
self._act_type = activation
super(Activation, self).__init__(**kwargs)
def _alias(self):
return self._act_type
def hybrid_forward(self, F, x):
return F.Activation(x, act_type=self._act_type, name='fwd')
def __repr__(self):
s = '{name}({_act_type})'
return s.format(name=self.__class__.__name__,
**self.__dict__)
class Dropout(HybridBlock):
"""Applies Dropout to the input.
Dropout consists in randomly setting a fraction `rate` of input units
to 0 at each update during training time, which helps prevent overfitting.
Parameters
----------
rate : float
Fraction of the input units to drop. Must be a number between 0 and 1.
Input shape:
Arbitrary.
Output shape:
Same shape as input.
References
----------
`Dropout: A Simple Way to Prevent Neural Networks from Overfitting
<http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf>`_
"""
def __init__(self, rate, **kwargs):
super(Dropout, self).__init__(**kwargs)
self._rate = rate
def hybrid_forward(self, F, x):
return F.Dropout(x, p=self._rate, name='fwd')
def __repr__(self):
s = '{name}(p = {_rate})'
return s.format(name=self.__class__.__name__,
**self.__dict__)
class BatchNorm(HybridBlock):
"""Batch normalization layer (Ioffe and Szegedy, 2014).
Normalizes the input at each batch, i.e. applies a transformation
that maintains the mean activation close to 0 and the activation
standard deviation close to 1.
Parameters
----------
axis : int, default 1
The axis that should be normalized. This is typically the channels
(C) axis. For instance, after a `Conv2D` layer with `layout='NCHW'`,
set `axis=1` in `BatchNorm`. If `layout='NHWC'`, then set `axis=3`.
momentum: float, default 0.9
Momentum for the moving average.
epsilon: float, default 1e-5
Small float added to variance to avoid dividing by zero.
center: bool, default True
If True, add offset of `beta` to normalized tensor.
If False, `beta` is ignored.
scale: bool, default True
If True, multiply by `gamma`. If False, `gamma` is not used.
When the next layer is linear (also e.g. `nn.relu`),
this can be disabled since the scaling
will be done by the next layer.
beta_initializer: str or `Initializer`, default 'zeros'
Initializer for the beta weight.
gamma_initializer: str or `Initializer`, default 'ones'
Initializer for the gamma weight.
moving_mean_initializer: str or `Initializer`, default 'zeros'
Initializer for the moving mean.
moving_variance_initializer: str or `Initializer`, default 'ones'
Initializer for the moving variance.
in_channels : int, default 0
Number of channels (feature maps) in input data. If not specified,
initialization will be deferred to the first time `forward` is called
and `in_channels` will be inferred from the shape of input data.
Input shape:
Arbitrary.
Output shape:
Same shape as input.
"""
def __init__(self, axis=1, momentum=0.9, epsilon=1e-5, center=True, scale=True,
beta_initializer='zeros', gamma_initializer='ones',
running_mean_initializer='zeros', running_variance_initializer='ones',
in_channels=0, **kwargs):
super(BatchNorm, self).__init__(**kwargs)
self._kwargs = {'axis': axis, 'eps': epsilon, 'momentum': momentum,
'fix_gamma': not scale}
if in_channels != 0:
self.in_channels = in_channels
self.gamma = self.params.get('gamma', grad_req='write' if scale else 'null',
shape=(in_channels,), init=gamma_initializer,
allow_deferred_init=True,
differentiable=scale)
self.beta = self.params.get('beta', grad_req='write' if center else 'null',
shape=(in_channels,), init=beta_initializer,
allow_deferred_init=True,
differentiable=center)
self.running_mean = self.params.get('running_mean', grad_req='null',
shape=(in_channels,),
init=running_mean_initializer,
allow_deferred_init=True,
differentiable=False)
self.running_var = self.params.get('running_var', grad_req='null',
shape=(in_channels,),
init=running_variance_initializer,
allow_deferred_init=True,
differentiable=False)
def hybrid_forward(self, F, x, gamma, beta, running_mean, running_var):
return F.BatchNorm(x, gamma, beta, running_mean, running_var,
name='fwd', **self._kwargs)
def __repr__(self):
s = '{name}({content}'
if hasattr(self, 'in_channels'):
s += ', in_channels={0}'.format(self.in_channels)
s += ')'
return s.format(name=self.__class__.__name__,
content=', '.join(['='.join([k, v.__repr__()])
for k, v in self._kwargs.items()]))
class LeakyReLU(HybridBlock):
"""Leaky version of a Rectified Linear Unit.
It allows a small gradient when the unit is not active::
`f(x) = alpha * x for x < 0`,
`f(x) = x for x >= 0`.
Parameters
----------
alpha : float
slope coefficient for the negative half axis. Must be >= 0.
Input shape:
Arbitrary.
Output shape:
Same shape as input.
"""
def __init__(self, alpha, **kwargs):
super(LeakyReLU, self).__init__(**kwargs)
self._alpha = alpha
def hybrid_forward(self, F, x):
return F.LeakyReLU(x, act_type='leaky', slope=self._alpha, name='fwd')
def __repr__(self):
s = '{name}({alpha})'
return s.format(name=self.__class__.__name__,
alpha=self._alpha)
class Embedding(HybridBlock):
"""Turns non-negative integers (indexes/tokens) into dense vectors
of fixed size. eg. [[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]
Parameters
----------
input_dim : int
Size of the vocabulary, i.e. maximum integer index + 1.
output_dim : int
Dimension of the dense embedding.
dtype : str or np.dtype, default 'float32'
Data type of output embeddings.
weight_initializer : Initializer
Initializer for the `embeddings` matrix.
Input shape:
2D tensor with shape: `(N, M)`.
Output shape:
3D tensor with shape: `(N, M, output_dim)`.
"""
def __init__(self, input_dim, output_dim, dtype='float32',
weight_initializer=None, **kwargs):
super(Embedding, self).__init__(**kwargs)
self._kwargs = {'input_dim': input_dim, 'output_dim': output_dim,
'dtype': dtype}
self.weight = self.params.get('weight', shape=(input_dim, output_dim),
init=weight_initializer,
allow_deferred_init=True)
def hybrid_forward(self, F, x, weight):
return F.Embedding(x, weight, name='fwd', **self._kwargs)
def __repr__(self):
s = '{block_name}({input_dim} -> {output_dim}, {dtype})'
return s.format(block_name=self.__class__.__name__,
**self._kwargs)
class Flatten(HybridBlock):
"""Flattens the input to two dimensional.
Input shape:
Arbitrary shape `(N, a, b, c, ...)`
Output shape:
2D tensor with shape: `(N, a*b*c...)`
"""
def __init__(self, **kwargs):
super(Flatten, self).__init__(**kwargs)
def hybrid_forward(self, F, x):
return x.reshape((0, -1))
def __repr__(self):
return self.__class__.__name__
| 35.989201 | 97 | 0.584709 |
dd82f9bdf963284c4698348df176946417ac0f24 | 312 | py | Python | GearBot/Util/Emoji.py | TheMelvin/Gearbot | 2fd0d6a1fb6750a1cb6c49828c4b9d35acaa544a | [
"MIT"
] | null | null | null | GearBot/Util/Emoji.py | TheMelvin/Gearbot | 2fd0d6a1fb6750a1cb6c49828c4b9d35acaa544a | [
"MIT"
] | null | null | null | GearBot/Util/Emoji.py | TheMelvin/Gearbot | 2fd0d6a1fb6750a1cb6c49828c4b9d35acaa544a | [
"MIT"
] | null | null | null | from discord import utils
from Util import Configuration
emojis = dict()
def on_ready(bot):
for name, eid in Configuration.getMasterConfigVar("EMOJI").items():
emojis[name] = utils.get(bot.emojis, id=eid)
def get_chat_emoji(name):
emoji = emojis[name]
return f"<:{emoji.name}:{emoji.id}>" | 24 | 71 | 0.695513 |
d20e2b854a15dc01d863f36935ffe6dcf1f5c2d1 | 23,304 | py | Python | contrib/for_review/ModifiedBasicPreset/python/IECore/BasicPreset.py | gcodebackups/cortex-vfx | 72fa6c6eb3327fce4faf01361c8fcc2e1e892672 | [
"BSD-3-Clause"
] | 5 | 2016-07-26T06:09:28.000Z | 2022-03-07T03:58:51.000Z | contrib/for_review/ModifiedBasicPreset/python/IECore/BasicPreset.py | turbosun/cortex | 4bdc01a692652cd562f3bfa85f3dae99d07c0b15 | [
"BSD-3-Clause"
] | null | null | null | contrib/for_review/ModifiedBasicPreset/python/IECore/BasicPreset.py | turbosun/cortex | 4bdc01a692652cd562f3bfa85f3dae99d07c0b15 | [
"BSD-3-Clause"
] | 3 | 2015-03-25T18:45:24.000Z | 2020-02-15T15:37:18.000Z | ##########################################################################
#
# Copyright (c) 2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import os
import re
## Implements a Preset to permit values to be saved and restored
## from a Parameterised object. BasicPresets can be created either
## as in-memory representations of the parameters, or saved to disk.
class BasicPreset( IECore.Preset ) :
## The constructor is essentially in two forms:
##
## IECore.BasicPreset( parameterised, rootParameter=None, parameters=(), referenceData=False )
##
## This is the most common form, and should be used to create a new preset from the
## given parameterised holding object.
##
## IECore.BasicPreset( pathOrData )
##
## This form is used to restore data into a preset for application, and should rarely
## be used directly.
##
## \param pathOrData, this should be an absolute path to a CompoundObject on disk or a
## CompoundObject pointer itself. This object should contain the data structure for the preset.
## \param parameterised, The Parameterised object holding the parameters to be saved.
## \param rootParameter, IECore.Parameter, Where to start in the parmameter hierarchy.
## \param parameters, ( IECore.Parameter, ... ), A list of Parameters to include in the
## the preset. This allow certain values not to be included in the preset.
## \param referenceData, bool, When enabled, this stops the preset mechanism from
## copying the value data from the parameters it encapsulates. This can save memory
## when the preset is to be written straight to disk. The default behaviour
## copies any parameter values so the preset is not dependent on the source
## parameters state at the time of application.
def __init__( self, pathOrDataOrParameterised, rootParameter=None, parameters=(), referenceData=False ) :
self._header = None
self._data = None
self._cob = None
IECore.Preset.__init__( self )
self.parameters().addParameters(
[
# \todo Remove this parameter in next Cortex major version.
IECore.BoolParameter(
name = "overwriteMatchingComponents",
description = "When off, the preset will always append items to a " + \
"ClassVectorParameter, otherwise, it will replace the existing " + \
"classes with the same names, if they don't match the preset. " + \
"This does not affect and parameter values, these are always set " + \
"to match the preset. This parameter is deprecated. Use the other parameters " + \
"in this Preset to better specify the operation you want.",
defaultValue = False
),
IECore.BoolParameter(
name = "modifyValues",
description = "If On, it will apply value changes on non-compound parameters. This is usually the case.",
defaultValue = True,
),
IECore.BoolParameter(
name = "replaceClasses",
description = "If On, it will apply changes on the class name found on ClassParameters or ClassVectorParameters. If False, then it will skip processing these parameters if the current class name does not match what's stored in this preset.",
defaultValue = True,
),
IECore.BoolParameter(
name = "forceAddingClasses",
description = "If On, then it will force creating new classes in ClassVectorParameters for every item in this preset. If False, it will match the parameters by name and modify them in place.",
defaultValue = True,
),
IECore.BoolParameter(
name = "removePreviousClasses",
description = "If On, then it will remove from the ClassVectorParameters any class that does not exist in this preset.",
defaultValue = False,
),
IECore.BoolParameter(
name = "enforceClassesOrder",
description = "If On, then it will try to enforce the same ClassVectorParameters item order stored in this preset.",
defaultValue = False,
),
]
)
if isinstance( pathOrDataOrParameterised, str ) or isinstance( pathOrDataOrParameterised, unicode ) :
self._cob = pathOrDataOrParameterised
elif isinstance( pathOrDataOrParameterised, IECore.CompoundObject ) :
self._data = pathOrDataOrParameterised
elif hasattr( pathOrDataOrParameterised, "parameters" ):
data = IECore.CompoundObject()
if rootParameter is None:
rootParameter = pathOrDataOrParameterised.parameters()
BasicPreset._grabHierarchy( data, rootParameter, parameters )
# We need to prune any class entries without parameters, so that
# we don't meddle with classes the user asked us not to copy parameters for.
BasicPreset._pruneHierarchy( data )
if referenceData:
self._data = data
else:
self._data = data.copy()
else :
raise ValueError, "IECore.BasicPreset.__init__: Unsupported object passed: %s." % pathOrDataOrParameterised
## \return a dictionary of metatdata about the preset. BasicPresets
## provide the following keys, when a preset has been saved to disk.
## NOTE: Presets created by the 'Copy' method will not contain any
## pertinent information in theses fields:
##
## "title" : string, The user supplied name the preset.
## "description" : string, A multi-line string of arbitrary descriptive text.
## "categories" : ( string, .. ), A list of strings, one for each category
## the preset is considered part of.
def metadata( self ) :
self._ensureHeader()
h = self._header
return {
"title" : h["title"].value if "title" in h else self.__class__,
"description" : h["description"].value if "description" in h else "",
"categories" : list( h["categories"] ) if "categories" in h else (),
}
## \see IECore.Preset.applicableTo
def applicableTo( self, parameterised, rootParameter ) :
self._ensureData()
return self._applicableTo( parameterised, rootParameter, self._data )
## \see IECore.Preset.__call__
# \param parameterList A list of Parameter pointers that the preset should apply to.
# \param parameterListExcludes A bool, which when True, will treat the parameterList as a
# 'skip' list, rather than an 'application' list.
# NOTE: When parameterListExcludes is False, all parent parameters of a desired leaf parameter
# must be in this list. Otherwise the preset will not consider the parent so will never
# reach the child.
def __call__( self, parameterised, rootParameter, parameterList=[], parameterListExcludes=False ) :
if self["overwriteMatchingComponents"].getTypedValue() :
# tell the user about the deprecated parameter and set the new parameters to represent the previous behavior.
IECore.warning( "Deprecated parameter 'overwriteMatchingComponents' being used for BasicPreset object." )
self["forceAddingClasses"] = False
self["replaceClasses"] = True
self["modifyValues"] = True
self["removePreviousClasses"] = False
self["enforceClassesOrder"] = False
self._ensureData()
if not self.applicableTo( parameterised, rootParameter ) :
raise RuntimeError, "IECore.BasicPreset: Sorry, this preset is not applicable to that parameter."
if parameterList and not parameterListExcludes :
# Not much point getting out of bed if the root isn't in there...
if rootParameter not in parameterList:
# Copy the list so we don't modify the one we were given.
parameterList = parameterList[:]
parameterList.append( rootParameter )
self._applyHierarchy( parameterised, rootParameter, self._data, parameterList, parameterListExcludes )
## This method will save the specified parameters to disk in such a was
## as can be loaded by the IECore.ClassLoader
## \param path, string, The file system location the preset should be saved to
## note: this should be a directory name, not the desired preset name.
## \param name, string, The name of the preset, the preset will be saved under this
## name inside of 'path'. This name is not sanitised, and it is the
## responsibility of the caller to ensure that it is a legal file system name.
## \param title, string, The title of the preset, no character restrictions.
## \param description, string, A description of the preset, no character restrictions.
## \param categories, ( string, ... ) A list of categories the preset should be tagged with
## \param version, int, the version of the preset, this will default to 1, used when saving
## for the ClassLoader.
## \param classLoadable, bool, if True (default) then the preset will be saved in a way that
## can be loaded by the ClassLoader, otherwise, just a cob file is written containing the
## presets data.
def save( self, path, name, title="", description="", categories=(), version=1, classLoadable=True ) :
if not self._data:
raise RuntimeError, "IECore.BasicPreset.save: Unable to save, preset has no data."
baseDir = path
cobName = "%s.cob" % ( name, )
pyFile = None
if classLoadable :
baseDir = "%s/%s" % ( path, name )
cobName = "%s-%i.cob" % ( name, version )
pyFile = "%s/%s-%i.py" % ( baseDir, name, version )
cobFile = "%s/%s" % ( baseDir, cobName )
if not os.path.isdir( baseDir ) :
os.makedirs( baseDir )
if not os.path.isdir( baseDir ) :
raise RuntimeError, "IECore.BasicPreset.save: Unable to create the directory '%s'" % baseDir
w = IECore.Writer.create( self._data, cobFile )
w["header"].getValue()["title"] = IECore.StringData( title if title else name )
w["header"].getValue()["description"] = IECore.StringData( description )
w["header"].getValue()["categories"] = IECore.StringVectorData( categories )
w["header"].getValue()["dataVersion"] = IECore.IntData( 1 )
w.write()
if pyFile :
BasicPreset._writePy( pyFile, cobName, name )
def _ensureData( self ) :
if self._data != None:
return
if self._cob is not None:
data = IECore.Reader.create( self._cob ).read()
if not isinstance( data, IECore.CompoundObject ) :
raise RuntimeError, "IECore.BasicPreset: Unable to retrieve data from '%s'." % self._cob
self._data = data
if not self._data:
raise RuntimeError, "IECore.BasicPreset: No data in preset."
def _ensureHeader( self ) :
if self._cob != None:
self._header = IECore.Reader.create( self._cob ).readHeader()
else:
self._header = {}
@staticmethod
def _writePy( fileName, cob, className ) :
f = open( fileName, "w" )
f.write(
"""import IECore
import os.path
class %s( IECore.BasicPreset ):
def __init__( self ):
dir = os.path.dirname( __file__ )
IECore.BasicPreset.__init__( self, dir+"/%s" )
IECore.registerRunTimeTyped( %s )
""" % ( className, cob, className )
)
@staticmethod
def _grabHierarchy( data, parameter, parameterList=() ) :
if parameter.staticTypeId() == IECore.TypeId.CompoundParameter :
for p in parameter.keys() :
data[p] = IECore.CompoundObject()
BasicPreset._grabHierarchy(
data[p],
parameter[p],
parameterList,
)
else :
if isinstance( parameter, IECore.ClassParameter ) :
BasicPreset._grabClassParameter( parameter, data, parameterList )
elif isinstance( parameter, IECore.ClassVectorParameter ) :
BasicPreset._grabClassVectorParameter( parameter, data, parameterList )
else :
# Some parameter types end up with different python instance
# due to a boost bug, so 'if p in parameterList' fails.
if parameterList:
for p in parameterList:
if parameter.isSame( p ) :
BasicPreset._grabParameter( parameter, data )
break
else :
BasicPreset._grabParameter( parameter, data )
@staticmethod
def _grabParameter( parameter, data ) :
data["_value_"] = parameter.getValue()
@staticmethod
def _grabClassParameter( parameter, data, parameterList ) :
c = parameter.getClass( True )
data["_className_"] = IECore.StringData( c[1] )
data["_classVersion_"] = IECore.IntData( c[2] )
data["_classSearchPaths_"] = IECore.StringData( c[3] )
classNameFilter = "*"
try :
classNameFilter = parameter.userData()["UI"]["classNameFilter"].value
except :
pass
data["_classNameFilter_"] = IECore.StringData( classNameFilter )
data["_classValue_"] = IECore.CompoundObject()
if c[0] :
# Some classes may have no parameters, if they have been
# specifically included in the parameter list, then we
# want to save their instance specification anyway.
if len( c[0].parameters() ) :
BasicPreset._grabHierarchy(
data["_classValue_"],
c[0].parameters(),
parameterList,
)
elif parameterList :
for p in parameterList:
if parameter.isSame( p ) :
data["_noPrune_"] = IECore.BoolData( True )
else :
data["_noPrune_"] = IECore.BoolData( True )
@staticmethod
def _grabClassVectorParameter( parameter, data, parameterList ) :
classes = parameter.getClasses( True )
data["_classSearchPaths_"] = IECore.StringData( parameter.searchPathEnvVar() )
classNameFilter = "*"
try :
classNameFilter = parameter.userData()["UI"]["classNameFilter"].value
except :
pass
data["_classNameFilter_" ] = IECore.StringData( classNameFilter )
data["_classNames_"] = IECore.StringVectorData()
data["_classVersions_"] = IECore.IntVectorData()
data["_classOrder_"] = IECore.StringVectorData()
data["_values_"] = IECore.CompoundObject()
for c in classes:
data["_classOrder_"].append( c[1] )
data["_classNames_"].append( c[2] )
data["_classVersions_"].append( c[3] )
v = IECore.CompoundObject()
BasicPreset._grabHierarchy(
v,
c[0].parameters(),
parameterList,
)
data["_values_"][c[1]] = v
def _applyHierarchy( self, parameterised, parameter, data, parameterList=[], invertList=False ) :
if parameterList :
if invertList : # its a 'skipList'
if parameter in parameterList :
return
else :
if parameter not in parameterList :
return
if "_className_" in data :
self._applyClassParameter( parameterised, parameter, data, parameterList, invertList )
elif "_classNames_" in data :
self._applyClassVector( parameterised, parameter, data, parameterList, invertList )
elif "_value_" in data :
self._applyParameter( parameterised, parameter, data )
else : # CompoundParameter
for p in data.keys() :
if p not in parameter :
IECore.msg(
IECore.Msg.Level.Warning,
"IECore.BasicPreset",
"'%s' is missing from '%s' (%s)" % ( p, parameter.name, parameter )
)
continue
self._applyHierarchy( parameterised, parameter[p], data[p], parameterList, invertList )
def _applyParameter( self, parameterised, parameter, data ) :
if not self.parameters()["modifyValues"].getTypedValue() :
return
try:
parameter.setValue( data["_value_"] )
except Exception, e:
IECore.msg( IECore.Msg.Level.Warning, "IECore.BasicPreset", str(e) )
def _applyClassParameter( self, parameterised, parameter, data, parameterList=[], invertList=False ) :
if not isinstance( parameter, IECore.ClassParameter ) :
IECore.msg(
IECore.Msg.Level.Warning,
"IECore.BasicPreset",
"Unable to restore to '%s' (%s) as it isnt a ClassParameter"
% ( parameter.name, parameter )
)
return
c = parameter.getClass( True )
className = data["_className_"].value
classVersion = data["_classVersion_"].value
classPaths = data["_classSearchPaths_"].value
if self.parameters()["replaceClasses"].getTypedValue() :
if c[1] != className or c[2] != classVersion or c[3] != classPaths:
parameter.setClass( className, classVersion, classPaths )
else :
if c[1] != className :
# class name is different, we don't change it and we don't process it's
# parameters since they will differ anyways...
return
c = parameter.getClass( False )
if c and "_classValue_" in data :
self._applyHierarchy( parameterised, c.parameters(), data["_classValue_"], parameterList, invertList )
def _applyClassVector( self, parameterised, parameter, data, parameterList=[], invertList=False ) :
if not isinstance( parameter, IECore.ClassVectorParameter ) :
IECore.msg(
IECore.Msg.Level.Warning,
"IECore.BasicPreset",
"Unable to restore to '%s' (%s) as it isnt a ClassVectorParameter"
% ( parameter.name, parameter )
)
return
replaceClasses = self.parameters()["replaceClasses"].getTypedValue()
forceAddingClasses = self.parameters()["forceAddingClasses"].getTypedValue()
names = data["_classNames_"]
versions = data["_classVersions_"]
paramNames = data["_classOrder_"]
if self.parameters()["removePreviousClasses"].getTypedValue() :
# remove classes that are not in the Preset....
currClasses = map( lambda c: c[1:], parameter.getClasses( True ) )
if forceAddingClasses :
# if the user if forcing to add as new classes, we should remove all of the existent ones..
newClasses = []
else :
newClasses = filter( lambda c: c[0] in paramNames, currClasses )
if newClasses != currClasses :
parameter.setClasses( newClasses )
actualParamNames = []
for i in range( len( data["_classNames_"] ) ) :
paramName = paramNames[i]
# We still have the class information, even if
# there were no parameter values saved.
if paramName not in data["_values_"] :
actualParamNames.append( paramName )
continue
if forceAddingClasses:
( paramName, c ) = self._addClassToVector(
parameter,
paramName,
names[i],
versions[i],
)
else :
if replaceClasses :
c = None
if paramName in parameter:
c = parameter.getClass( paramName, True )
if not c or c[1:] != ( paramName, names[i], versions[i] ) :
parameter.setClass( paramName, names[i], versions[i] )
c = parameter.getClass( paramName, True )
else :
# Class parameter must exist and it must be of the same class name.
if not paramName in parameter:
continue
c = parameter.getClass( paramName, True )
if c[2] != names[i] :
continue
self._applyHierarchy(
parameterised,
c[0].parameters(),
data["_values_"][ paramNames[i] ],
parameterList,
invertList
)
actualParamNames.append( paramName )
if self.parameters()["enforceClassesOrder"].getTypedValue() :
currClasses = map( lambda c: c[1:], parameter.getClasses( True ) )
# get the classes that did not existed in the preset and figure out their place
# in the preset order by adding their parameter names in the list right after
# the closest parameter that DID existed in the preset.
prevActualParam = None
lastActualParamInsertion = None
for c in currClasses :
if c[0] in actualParamNames :
prevActualParam = c[0]
continue
if prevActualParam is None :
if lastActualParamInsertion is None :
lastActualParamInsertion = 0
else :
lastActualParamInsertion += 1
else :
lastActualParamInsertion = actualParamNames.find( prevActualParam ) + 1
prevActualParam = None
actualParamNames.insert(lastActualParamInsertion,c[0])
def classCmp( c1, c2 ):
i1 = actualParamNames.index( c1[0] )
i2 = actualParamNames.index( c2[0] )
return cmp( i1, i2 )
# try to apply the order stored in the preset
newOrder = list(currClasses)
newOrder.sort( classCmp )
if currClasses != newOrder :
parameter.setClasses( newOrder )
def _addClassToVector( self, parameter, parameterName, className, classVersion ) :
classes = parameter.getClasses( True )
parameterNames = [ c[1] for c in classes ]
if parameterName in parameterNames:
parameterName = parameter.newParameterName()
parameter.setClass( parameterName, className, classVersion )
return ( parameterName, parameter.getClass( parameterName, True ) )
def _applicableTo( self, parameterised, parameter, data ) :
if parameter.staticTypeId() == IECore.TypeId.CompoundParameter :
if "_classValue_" in data or "_values_" in data:
return False
for k in data.keys():
if k not in parameter:
return False
elif isinstance( parameter, IECore.ClassParameter ) :
if "_className_" not in data:
return False
classNameFilter = "*"
try :
classNameFilter = parameter.userData()["UI"]["classNameFilter"].value
except :
pass
if classNameFilter != data["_classNameFilter_"].value:
return False
elif isinstance( parameter, IECore.ClassVectorParameter ) :
if "_classNames_" not in data:
return False
classNameFilter = "*"
try :
classNameFilter = parameter.userData()["UI"]["classNameFilter"].value
except :
pass
if classNameFilter != data["_classNameFilter_"].value:
return False
if data["_classSearchPaths_"].value != parameter.searchPathEnvVar() :
return False
else :
if "_value_" not in data:
return False
if not parameter.valueValid( data["_value_"] )[0]:
return False
return True
@staticmethod
def _pruneHierarchy( data ) :
returnVal = True
for k in data.keys() :
if k == "_value_" or k == "_noPrune_":
returnVal = False
elif isinstance( data[k], IECore.CompoundObject ):
if BasicPreset._pruneHierarchy( data[k] ) :
del data[k]
else :
returnVal = False
return returnVal
IECore.registerRunTimeTyped( BasicPreset )
| 33.102273 | 246 | 0.683659 |
de4fc9172da64a238a01d13f72af84acf1ad8d31 | 3,381 | py | Python | archiver/test_delete.py | praekeltfoundation/rasa-postgres-archiver | cba26a87dffb4049b2692f05a6c897983ce30cb5 | [
"BSD-3-Clause"
] | 1 | 2021-05-20T14:05:22.000Z | 2021-05-20T14:05:22.000Z | archiver/test_delete.py | praekeltfoundation/rasa-postgres-archiver | cba26a87dffb4049b2692f05a6c897983ce30cb5 | [
"BSD-3-Clause"
] | 1 | 2020-12-08T13:53:39.000Z | 2020-12-08T13:53:39.000Z | archiver/test_delete.py | praekeltfoundation/rasa-postgres-archiver | cba26a87dffb4049b2692f05a6c897983ce30cb5 | [
"BSD-3-Clause"
] | null | null | null | from datetime import date, datetime, timedelta, timezone
from unittest import TestCase, mock
import boto3
import psycopg2
from moto import mock_s3
from archiver import settings
from archiver.delete import delete_day, delete_events
class TestArchiver(TestCase):
def setUp(self):
self.conn = psycopg2.connect(settings.DATABASE)
with self.conn.cursor() as cur:
cur.execute(
"""
CREATE TABLE events(
id serial PRIMARY KEY NOT NULL,
sender_id character varying(255) NOT NULL,
type_name character varying(255) NOT NULL,
"timestamp" double precision,
intent_name character varying(255),
action_name character varying(255),
data text
)"""
)
def tearDown(self):
self.conn.rollback()
self.conn.close()
def create_event(
self,
sender_id="27820001001",
type_name="action",
timestamp=datetime(2020, 12, 2, 14, 5, 2, tzinfo=timezone.utc).timestamp(),
intent_name=None,
action_name="action_session_start",
data='{"event": "session_started"}',
):
with self.conn.cursor() as cur:
cur.execute(
"""
INSERT INTO events(sender_id, type_name, timestamp, intent_name,
action_name, data)
VALUES (%s, %s, %s, %s, %s, %s)""",
(sender_id, type_name, timestamp, intent_name, action_name, data),
)
def test_delete_day(self):
"""
Deletes events that fall on the specified day
"""
self.create_event()
self.create_event(
timestamp=datetime(2020, 12, 3, 14, 5, 2, tzinfo=timezone.utc).timestamp()
)
delete_day(self.conn, date(2020, 12, 2))
with self.conn.cursor() as cur:
cur.execute("SELECT count(*) from events")
[count] = cur.fetchone()
self.assertEqual(count, 1)
@mock_s3
@mock.patch("archiver.delete.date")
def test_delete_events(self, date_mock):
"""
Deletes all events that we have archives for within time range
"""
# We can't mock date.today, we have to mock the whole datetime module
# but we still need date.fromisoformat, so we put that back
date_mock.today.return_value = date(2020, 12, 3) + timedelta(days=30)
date_mock.fromisoformat = date.fromisoformat
self.create_event()
self.create_event(
timestamp=datetime(2020, 12, 3, 14, 5, 2, tzinfo=timezone.utc).timestamp()
)
client = boto3.resource("s3")
bucket = client.create_bucket(Bucket="rasa-archive")
bucket.put_object(Key="events-2020-12-02.json.gz")
delete_events(self.conn, bucket)
with self.conn.cursor() as cur:
cur.execute("SELECT count(*) from events")
[count] = cur.fetchone()
self.assertEqual(count, 1)
@mock_s3
def test_delete_with_no_events(self):
"""
Should not raise any exceptions when there is no events to delete
"""
client = boto3.resource("s3")
bucket = client.create_bucket(Bucket="rasa-archive")
delete_events(self.conn, bucket)
| 33.475248 | 86 | 0.580893 |
853fc49fdd4da3836c22826b3f9e8e0399efb8fd | 2,328 | py | Python | oneflow/python/nn/modules/floor.py | qqsun8819/oneflow | b61e07b3406cc5c2d71f3d5e8b0f4de9b3fb9e40 | [
"Apache-2.0"
] | 1 | 2021-02-22T00:43:08.000Z | 2021-02-22T00:43:08.000Z | oneflow/python/nn/modules/floor.py | qqsun8819/oneflow | b61e07b3406cc5c2d71f3d5e8b0f4de9b3fb9e40 | [
"Apache-2.0"
] | null | null | null | oneflow/python/nn/modules/floor.py | qqsun8819/oneflow | b61e07b3406cc5c2d71f3d5e8b0f4de9b3fb9e40 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import collections
from typing import Optional, Sequence, Union
import oneflow as flow
from oneflow.python.oneflow_export import oneflow_export, experimental_api
from oneflow.python.nn.module import Module
from oneflow.python.framework.tensor import register_tensor_op
from oneflow.python.nn.modules.utils import _check_axis
class Floor(Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x):
return flow.F.floor(x)
@oneflow_export("floor")
@experimental_api
def floor_op(x):
r"""
Returns a new tensor with the arcsine of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \lfloor \text{input}_{i} \rfloor
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import oneflow.experimental as flow
>>> import numpy as np
>>> flow.enable_eager_execution()
>>> input = flow.Tensor(np.array([-0.5, 1.5, 0, 0.8]), dtype=flow.float32)
>>> output = flow.floor(input)
>>> output.shape
flow.Size([4])
>>> output.numpy()
array([-1., 1., 0., 0.], dtype=float32)
>>> input1 = flow.Tensor(np.array([[0.8, 1.0], [-0.6, 2.5]]), dtype=flow.float32)
>>> output1 = input1.floor()
>>> output1.shape
flow.Size([2, 2])
>>> output1.numpy()
array([[ 0., 1.],
[-1., 2.]], dtype=float32)
"""
return Floor()(x)
@register_tensor_op("floor")
@experimental_api
def floor_op_tensor(input):
r"""
See :func:`oneflow.experimental.floor`
"""
return Floor()(input)
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| 26.454545 | 89 | 0.649055 |
f3012181fab0827a0da3c774260996fa251fb358 | 27,435 | py | Python | tests/migrations/test_writer.py | shinshin86/django | 5cc81cd9eb69f5f7a711412c02039b435c393135 | [
"PSF-2.0",
"BSD-3-Clause"
] | 2 | 2017-03-30T06:28:50.000Z | 2017-03-30T06:28:55.000Z | tests/migrations/test_writer.py | Blaahborgh/django | c591bc3ccece1514d6b419826c7fa36ada9d9213 | [
"PSF-2.0",
"BSD-3-Clause"
] | 55 | 2016-02-27T06:02:24.000Z | 2021-11-01T07:53:20.000Z | tests/migrations/test_writer.py | Blaahborgh/django | c591bc3ccece1514d6b419826c7fa36ada9d9213 | [
"PSF-2.0",
"BSD-3-Clause"
] | 2 | 2018-01-08T08:14:29.000Z | 2020-11-04T08:46:29.000Z | import datetime
import decimal
import enum
import functools
import math
import os
import re
import uuid
from unittest import mock
import custom_migration_operations.more_operations
import custom_migration_operations.operations
from django import get_version
from django.conf import settings
from django.core.validators import EmailValidator, RegexValidator
from django.db import migrations, models
from django.db.migrations.writer import (
MigrationWriter, OperationWriter, SettingsReference,
)
from django.test import SimpleTestCase
from django.utils import datetime_safe
from django.utils.deconstruct import deconstructible
from django.utils.functional import SimpleLazyObject
from django.utils.timezone import FixedOffset, get_default_timezone, utc
from django.utils.translation import gettext_lazy as _
from django.utils.version import PY36
from .models import FoodManager, FoodQuerySet
class Money(decimal.Decimal):
def deconstruct(self):
return (
'%s.%s' % (self.__class__.__module__, self.__class__.__name__),
[str(self)],
{}
)
class TestModel1:
def upload_to(self):
return '/somewhere/dynamic/'
thing = models.FileField(upload_to=upload_to)
class OperationWriterTests(SimpleTestCase):
def test_empty_signature(self):
operation = custom_migration_operations.operations.TestOperation()
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.TestOperation(\n'
'),'
)
def test_args_signature(self):
operation = custom_migration_operations.operations.ArgsOperation(1, 2)
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.ArgsOperation(\n'
' arg1=1,\n'
' arg2=2,\n'
'),'
)
def test_kwargs_signature(self):
operation = custom_migration_operations.operations.KwargsOperation(kwarg1=1)
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.KwargsOperation(\n'
' kwarg1=1,\n'
'),'
)
def test_args_kwargs_signature(self):
operation = custom_migration_operations.operations.ArgsKwargsOperation(1, 2, kwarg2=4)
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.ArgsKwargsOperation(\n'
' arg1=1,\n'
' arg2=2,\n'
' kwarg2=4,\n'
'),'
)
def test_nested_args_signature(self):
operation = custom_migration_operations.operations.ArgsOperation(
custom_migration_operations.operations.ArgsOperation(1, 2),
custom_migration_operations.operations.KwargsOperation(kwarg1=3, kwarg2=4)
)
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.ArgsOperation(\n'
' arg1=custom_migration_operations.operations.ArgsOperation(\n'
' arg1=1,\n'
' arg2=2,\n'
' ),\n'
' arg2=custom_migration_operations.operations.KwargsOperation(\n'
' kwarg1=3,\n'
' kwarg2=4,\n'
' ),\n'
'),'
)
def test_multiline_args_signature(self):
operation = custom_migration_operations.operations.ArgsOperation("test\n arg1", "test\narg2")
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
"custom_migration_operations.operations.ArgsOperation(\n"
" arg1='test\\n arg1',\n"
" arg2='test\\narg2',\n"
"),"
)
def test_expand_args_signature(self):
operation = custom_migration_operations.operations.ExpandArgsOperation([1, 2])
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.ExpandArgsOperation(\n'
' arg=[\n'
' 1,\n'
' 2,\n'
' ],\n'
'),'
)
def test_nested_operation_expand_args_signature(self):
operation = custom_migration_operations.operations.ExpandArgsOperation(
arg=[
custom_migration_operations.operations.KwargsOperation(
kwarg1=1,
kwarg2=2,
),
]
)
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.ExpandArgsOperation(\n'
' arg=[\n'
' custom_migration_operations.operations.KwargsOperation(\n'
' kwarg1=1,\n'
' kwarg2=2,\n'
' ),\n'
' ],\n'
'),'
)
class WriterTests(SimpleTestCase):
"""
Tests the migration writer (makes migration files from Migration instances)
"""
def safe_exec(self, string, value=None):
d = {}
try:
exec(string, globals(), d)
except Exception as e:
if value:
self.fail("Could not exec %r (from value %r): %s" % (string.strip(), value, e))
else:
self.fail("Could not exec %r: %s" % (string.strip(), e))
return d
def serialize_round_trip(self, value):
string, imports = MigrationWriter.serialize(value)
return self.safe_exec("%s\ntest_value_result = %s" % ("\n".join(imports), string), value)['test_value_result']
def assertSerializedEqual(self, value):
self.assertEqual(self.serialize_round_trip(value), value)
def assertSerializedResultEqual(self, value, target):
self.assertEqual(MigrationWriter.serialize(value), target)
def assertSerializedFieldEqual(self, value):
new_value = self.serialize_round_trip(value)
self.assertEqual(value.__class__, new_value.__class__)
self.assertEqual(value.max_length, new_value.max_length)
self.assertEqual(value.null, new_value.null)
self.assertEqual(value.unique, new_value.unique)
def test_serialize_numbers(self):
self.assertSerializedEqual(1)
self.assertSerializedEqual(1.2)
self.assertTrue(math.isinf(self.serialize_round_trip(float("inf"))))
self.assertTrue(math.isinf(self.serialize_round_trip(float("-inf"))))
self.assertTrue(math.isnan(self.serialize_round_trip(float("nan"))))
self.assertSerializedEqual(decimal.Decimal('1.3'))
self.assertSerializedResultEqual(
decimal.Decimal('1.3'),
("Decimal('1.3')", {'from decimal import Decimal'})
)
self.assertSerializedEqual(Money('1.3'))
self.assertSerializedResultEqual(
Money('1.3'),
("migrations.test_writer.Money('1.3')", {'import migrations.test_writer'})
)
def test_serialize_constants(self):
self.assertSerializedEqual(None)
self.assertSerializedEqual(True)
self.assertSerializedEqual(False)
def test_serialize_strings(self):
self.assertSerializedEqual(b"foobar")
string, imports = MigrationWriter.serialize(b"foobar")
self.assertEqual(string, "b'foobar'")
self.assertSerializedEqual("föobár")
string, imports = MigrationWriter.serialize("foobar")
self.assertEqual(string, "'foobar'")
def test_serialize_multiline_strings(self):
self.assertSerializedEqual(b"foo\nbar")
string, imports = MigrationWriter.serialize(b"foo\nbar")
self.assertEqual(string, "b'foo\\nbar'")
self.assertSerializedEqual("föo\nbár")
string, imports = MigrationWriter.serialize("foo\nbar")
self.assertEqual(string, "'foo\\nbar'")
def test_serialize_collections(self):
self.assertSerializedEqual({1: 2})
self.assertSerializedEqual(["a", 2, True, None])
self.assertSerializedEqual({2, 3, "eighty"})
self.assertSerializedEqual({"lalalala": ["yeah", "no", "maybe"]})
self.assertSerializedEqual(_('Hello'))
def test_serialize_builtin_types(self):
self.assertSerializedEqual([list, tuple, dict, set, frozenset])
self.assertSerializedResultEqual(
[list, tuple, dict, set, frozenset],
("[list, tuple, dict, set, frozenset]", set())
)
def test_serialize_lazy_objects(self):
pattern = re.compile(r'^foo$')
lazy_pattern = SimpleLazyObject(lambda: pattern)
self.assertEqual(self.serialize_round_trip(lazy_pattern), pattern)
def test_serialize_enums(self):
class TextEnum(enum.Enum):
A = 'a-value'
B = 'value-b'
class BinaryEnum(enum.Enum):
A = b'a-value'
B = b'value-b'
class IntEnum(enum.IntEnum):
A = 1
B = 2
self.assertSerializedResultEqual(
TextEnum.A,
("migrations.test_writer.TextEnum('a-value')", {'import migrations.test_writer'})
)
self.assertSerializedResultEqual(
BinaryEnum.A,
("migrations.test_writer.BinaryEnum(b'a-value')", {'import migrations.test_writer'})
)
self.assertSerializedResultEqual(
IntEnum.B,
("migrations.test_writer.IntEnum(2)", {'import migrations.test_writer'})
)
field = models.CharField(default=TextEnum.B, choices=[(m.value, m) for m in TextEnum])
string = MigrationWriter.serialize(field)[0]
self.assertEqual(
string,
"models.CharField(choices=["
"('a-value', migrations.test_writer.TextEnum('a-value')), "
"('value-b', migrations.test_writer.TextEnum('value-b'))], "
"default=migrations.test_writer.TextEnum('value-b'))"
)
field = models.CharField(default=BinaryEnum.B, choices=[(m.value, m) for m in BinaryEnum])
string = MigrationWriter.serialize(field)[0]
self.assertEqual(
string,
"models.CharField(choices=["
"(b'a-value', migrations.test_writer.BinaryEnum(b'a-value')), "
"(b'value-b', migrations.test_writer.BinaryEnum(b'value-b'))], "
"default=migrations.test_writer.BinaryEnum(b'value-b'))"
)
field = models.IntegerField(default=IntEnum.A, choices=[(m.value, m) for m in IntEnum])
string = MigrationWriter.serialize(field)[0]
self.assertEqual(
string,
"models.IntegerField(choices=["
"(1, migrations.test_writer.IntEnum(1)), "
"(2, migrations.test_writer.IntEnum(2))], "
"default=migrations.test_writer.IntEnum(1))"
)
def test_serialize_uuid(self):
self.assertSerializedEqual(uuid.uuid1())
self.assertSerializedEqual(uuid.uuid4())
uuid_a = uuid.UUID('5c859437-d061-4847-b3f7-e6b78852f8c8')
uuid_b = uuid.UUID('c7853ec1-2ea3-4359-b02d-b54e8f1bcee2')
self.assertSerializedResultEqual(
uuid_a,
("uuid.UUID('5c859437-d061-4847-b3f7-e6b78852f8c8')", {'import uuid'})
)
self.assertSerializedResultEqual(
uuid_b,
("uuid.UUID('c7853ec1-2ea3-4359-b02d-b54e8f1bcee2')", {'import uuid'})
)
field = models.UUIDField(choices=((uuid_a, 'UUID A'), (uuid_b, 'UUID B')), default=uuid_a)
string = MigrationWriter.serialize(field)[0]
self.assertEqual(
string,
"models.UUIDField(choices=["
"(uuid.UUID('5c859437-d061-4847-b3f7-e6b78852f8c8'), 'UUID A'), "
"(uuid.UUID('c7853ec1-2ea3-4359-b02d-b54e8f1bcee2'), 'UUID B')], "
"default=uuid.UUID('5c859437-d061-4847-b3f7-e6b78852f8c8'))"
)
def test_serialize_functions(self):
with self.assertRaisesMessage(ValueError, 'Cannot serialize function: lambda'):
self.assertSerializedEqual(lambda x: 42)
self.assertSerializedEqual(models.SET_NULL)
string, imports = MigrationWriter.serialize(models.SET(42))
self.assertEqual(string, 'models.SET(42)')
self.serialize_round_trip(models.SET(42))
def test_serialize_datetime(self):
self.assertSerializedEqual(datetime.datetime.utcnow())
self.assertSerializedEqual(datetime.datetime.utcnow)
self.assertSerializedEqual(datetime.datetime.today())
self.assertSerializedEqual(datetime.datetime.today)
self.assertSerializedEqual(datetime.date.today())
self.assertSerializedEqual(datetime.date.today)
self.assertSerializedEqual(datetime.datetime.now().time())
self.assertSerializedEqual(datetime.datetime(2014, 1, 1, 1, 1, tzinfo=get_default_timezone()))
self.assertSerializedEqual(datetime.datetime(2013, 12, 31, 22, 1, tzinfo=FixedOffset(180)))
self.assertSerializedResultEqual(
datetime.datetime(2014, 1, 1, 1, 1),
("datetime.datetime(2014, 1, 1, 1, 1)", {'import datetime'})
)
self.assertSerializedResultEqual(
datetime.datetime(2012, 1, 1, 1, 1, tzinfo=utc),
(
"datetime.datetime(2012, 1, 1, 1, 1, tzinfo=utc)",
{'import datetime', 'from django.utils.timezone import utc'},
)
)
def test_serialize_datetime_safe(self):
self.assertSerializedResultEqual(
datetime_safe.date(2014, 3, 31),
("datetime.date(2014, 3, 31)", {'import datetime'})
)
self.assertSerializedResultEqual(
datetime_safe.time(10, 25),
("datetime.time(10, 25)", {'import datetime'})
)
self.assertSerializedResultEqual(
datetime_safe.datetime(2014, 3, 31, 16, 4, 31),
("datetime.datetime(2014, 3, 31, 16, 4, 31)", {'import datetime'})
)
def test_serialize_fields(self):
self.assertSerializedFieldEqual(models.CharField(max_length=255))
self.assertSerializedResultEqual(
models.CharField(max_length=255),
("models.CharField(max_length=255)", {"from django.db import models"})
)
self.assertSerializedFieldEqual(models.TextField(null=True, blank=True))
self.assertSerializedResultEqual(
models.TextField(null=True, blank=True),
("models.TextField(blank=True, null=True)", {'from django.db import models'})
)
def test_serialize_settings(self):
self.assertSerializedEqual(SettingsReference(settings.AUTH_USER_MODEL, "AUTH_USER_MODEL"))
self.assertSerializedResultEqual(
SettingsReference("someapp.model", "AUTH_USER_MODEL"),
("settings.AUTH_USER_MODEL", {"from django.conf import settings"})
)
def test_serialize_iterators(self):
self.assertSerializedResultEqual(
((x, x * x) for x in range(3)),
("((0, 0), (1, 1), (2, 4))", set())
)
def test_serialize_compiled_regex(self):
"""
Make sure compiled regex can be serialized.
"""
regex = re.compile(r'^\w+$')
self.assertSerializedEqual(regex)
def test_serialize_class_based_validators(self):
"""
Ticket #22943: Test serialization of class-based validators, including
compiled regexes.
"""
validator = RegexValidator(message="hello")
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "django.core.validators.RegexValidator(message='hello')")
self.serialize_round_trip(validator)
# Test with a compiled regex.
validator = RegexValidator(regex=re.compile(r'^\w+$'))
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "django.core.validators.RegexValidator(regex=re.compile('^\\\\w+$'))")
self.serialize_round_trip(validator)
# Test a string regex with flag
validator = RegexValidator(r'^[0-9]+$', flags=re.S)
string = MigrationWriter.serialize(validator)[0]
if PY36:
self.assertEqual(string, "django.core.validators.RegexValidator('^[0-9]+$', flags=re.RegexFlag(16))")
else:
self.assertEqual(string, "django.core.validators.RegexValidator('^[0-9]+$', flags=16)")
self.serialize_round_trip(validator)
# Test message and code
validator = RegexValidator('^[-a-zA-Z0-9_]+$', 'Invalid', 'invalid')
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "django.core.validators.RegexValidator('^[-a-zA-Z0-9_]+$', 'Invalid', 'invalid')")
self.serialize_round_trip(validator)
# Test with a subclass.
validator = EmailValidator(message="hello")
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "django.core.validators.EmailValidator(message='hello')")
self.serialize_round_trip(validator)
validator = deconstructible(path="migrations.test_writer.EmailValidator")(EmailValidator)(message="hello")
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "migrations.test_writer.EmailValidator(message='hello')")
validator = deconstructible(path="custom.EmailValidator")(EmailValidator)(message="hello")
with self.assertRaisesMessage(ImportError, "No module named 'custom'"):
MigrationWriter.serialize(validator)
validator = deconstructible(path="django.core.validators.EmailValidator2")(EmailValidator)(message="hello")
with self.assertRaisesMessage(ValueError, "Could not find object EmailValidator2 in django.core.validators."):
MigrationWriter.serialize(validator)
def test_serialize_empty_nonempty_tuple(self):
"""
Ticket #22679: makemigrations generates invalid code for (an empty
tuple) default_permissions = ()
"""
empty_tuple = ()
one_item_tuple = ('a',)
many_items_tuple = ('a', 'b', 'c')
self.assertSerializedEqual(empty_tuple)
self.assertSerializedEqual(one_item_tuple)
self.assertSerializedEqual(many_items_tuple)
def test_serialize_builtins(self):
string, imports = MigrationWriter.serialize(range)
self.assertEqual(string, 'range')
self.assertEqual(imports, set())
def test_serialize_unbound_method_reference(self):
"""An unbound method used within a class body can be serialized."""
self.serialize_round_trip(TestModel1.thing)
def test_serialize_local_function_reference(self):
"""A reference in a local scope can't be serialized."""
class TestModel2:
def upload_to(self):
return "somewhere dynamic"
thing = models.FileField(upload_to=upload_to)
with self.assertRaisesMessage(ValueError, 'Could not find function upload_to in migrations.test_writer'):
self.serialize_round_trip(TestModel2.thing)
def test_serialize_managers(self):
self.assertSerializedEqual(models.Manager())
self.assertSerializedResultEqual(
FoodQuerySet.as_manager(),
('migrations.models.FoodQuerySet.as_manager()', {'import migrations.models'})
)
self.assertSerializedEqual(FoodManager('a', 'b'))
self.assertSerializedEqual(FoodManager('x', 'y', c=3, d=4))
def test_serialize_frozensets(self):
self.assertSerializedEqual(frozenset())
self.assertSerializedEqual(frozenset("let it go"))
def test_serialize_set(self):
self.assertSerializedEqual(set())
self.assertSerializedResultEqual(set(), ('set()', set()))
self.assertSerializedEqual({'a'})
self.assertSerializedResultEqual({'a'}, ("{'a'}", set()))
def test_serialize_timedelta(self):
self.assertSerializedEqual(datetime.timedelta())
self.assertSerializedEqual(datetime.timedelta(minutes=42))
def test_serialize_functools_partial(self):
value = functools.partial(datetime.timedelta, 1, seconds=2)
result = self.serialize_round_trip(value)
self.assertEqual(result.func, value.func)
self.assertEqual(result.args, value.args)
self.assertEqual(result.keywords, value.keywords)
def test_serialize_functools_partialmethod(self):
value = functools.partialmethod(datetime.timedelta, 1, seconds=2)
result = self.serialize_round_trip(value)
self.assertIsInstance(result, functools.partialmethod)
self.assertEqual(result.func, value.func)
self.assertEqual(result.args, value.args)
self.assertEqual(result.keywords, value.keywords)
def test_simple_migration(self):
"""
Tests serializing a simple migration.
"""
fields = {
'charfield': models.DateTimeField(default=datetime.datetime.utcnow),
'datetimefield': models.DateTimeField(default=datetime.datetime.utcnow),
}
options = {
'verbose_name': 'My model',
'verbose_name_plural': 'My models',
}
migration = type("Migration", (migrations.Migration,), {
"operations": [
migrations.CreateModel("MyModel", tuple(fields.items()), options, (models.Model,)),
migrations.CreateModel("MyModel2", tuple(fields.items()), bases=(models.Model,)),
migrations.CreateModel(
name="MyModel3", fields=tuple(fields.items()), options=options, bases=(models.Model,)
),
migrations.DeleteModel("MyModel"),
migrations.AddField("OtherModel", "datetimefield", fields["datetimefield"]),
],
"dependencies": [("testapp", "some_other_one")],
})
writer = MigrationWriter(migration)
output = writer.as_string()
# We don't test the output formatting - that's too fragile.
# Just make sure it runs for now, and that things look alright.
result = self.safe_exec(output)
self.assertIn("Migration", result)
def test_migration_path(self):
test_apps = [
'migrations.migrations_test_apps.normal',
'migrations.migrations_test_apps.with_package_model',
'migrations.migrations_test_apps.without_init_file',
]
base_dir = os.path.dirname(os.path.dirname(__file__))
for app in test_apps:
with self.modify_settings(INSTALLED_APPS={'append': app}):
migration = migrations.Migration('0001_initial', app.split('.')[-1])
expected_path = os.path.join(base_dir, *(app.split('.') + ['migrations', '0001_initial.py']))
writer = MigrationWriter(migration)
self.assertEqual(writer.path, expected_path)
def test_custom_operation(self):
migration = type("Migration", (migrations.Migration,), {
"operations": [
custom_migration_operations.operations.TestOperation(),
custom_migration_operations.operations.CreateModel(),
migrations.CreateModel("MyModel", (), {}, (models.Model,)),
custom_migration_operations.more_operations.TestOperation()
],
"dependencies": []
})
writer = MigrationWriter(migration)
output = writer.as_string()
result = self.safe_exec(output)
self.assertIn("custom_migration_operations", result)
self.assertNotEqual(
result['custom_migration_operations'].operations.TestOperation,
result['custom_migration_operations'].more_operations.TestOperation
)
def test_sorted_imports(self):
"""
#24155 - Tests ordering of imports.
"""
migration = type("Migration", (migrations.Migration,), {
"operations": [
migrations.AddField("mymodel", "myfield", models.DateTimeField(
default=datetime.datetime(2012, 1, 1, 1, 1, tzinfo=utc),
)),
]
})
writer = MigrationWriter(migration)
output = writer.as_string()
self.assertIn(
"import datetime\n"
"from django.db import migrations, models\n"
"from django.utils.timezone import utc\n",
output
)
def test_migration_file_header_comments(self):
"""
Test comments at top of file.
"""
migration = type("Migration", (migrations.Migration,), {
"operations": []
})
dt = datetime.datetime(2015, 7, 31, 4, 40, 0, 0, tzinfo=utc)
with mock.patch('django.db.migrations.writer.now', lambda: dt):
writer = MigrationWriter(migration)
output = writer.as_string()
self.assertTrue(
output.startswith(
"# Generated by Django %(version)s on 2015-07-31 04:40\n" % {
'version': get_version(),
}
)
)
def test_models_import_omitted(self):
"""
django.db.models shouldn't be imported if unused.
"""
migration = type("Migration", (migrations.Migration,), {
"operations": [
migrations.AlterModelOptions(
name='model',
options={'verbose_name': 'model', 'verbose_name_plural': 'models'},
),
]
})
writer = MigrationWriter(migration)
output = writer.as_string()
self.assertIn("from django.db import migrations\n", output)
def test_deconstruct_class_arguments(self):
# Yes, it doesn't make sense to use a class as a default for a
# CharField. It does make sense for custom fields though, for example
# an enumfield that takes the enum class as an argument.
class DeconstructibleInstances:
def deconstruct(self):
return ('DeconstructibleInstances', [], {})
string = MigrationWriter.serialize(models.CharField(default=DeconstructibleInstances))[0]
self.assertEqual(string, "models.CharField(default=migrations.test_writer.DeconstructibleInstances)")
| 41.255639 | 118 | 0.630326 |
c13ac340d97456ef4b47b3b0d309fac6361177d1 | 4,014 | py | Python | ml_source/src/blocktorch/blocktorch/pipelines/components/estimators/regressors/xgboost_regressor.py | blocktorch/blocktorch | 044aa269813ab22c5fd27f84272e5fb540fc522b | [
"MIT"
] | 1 | 2021-09-23T12:23:02.000Z | 2021-09-23T12:23:02.000Z | ml_source/src/blocktorch/blocktorch/pipelines/components/estimators/regressors/xgboost_regressor.py | blocktorch/blocktorch | 044aa269813ab22c5fd27f84272e5fb540fc522b | [
"MIT"
] | null | null | null | ml_source/src/blocktorch/blocktorch/pipelines/components/estimators/regressors/xgboost_regressor.py | blocktorch/blocktorch | 044aa269813ab22c5fd27f84272e5fb540fc522b | [
"MIT"
] | null | null | null | """XGBoost Regressor."""
from blocktorch.model_family import ModelFamily
from blocktorch.pipelines.components.estimators import Estimator
from blocktorch.problem_types import ProblemTypes
from blocktorch.utils.gen_utils import (
_rename_column_names_to_numeric,
import_or_raise,
)
from blocktorch.utils.woodwork_utils import infer_feature_types
import dask
import xgboost as xgb
import dask.array as da
import dask.distributed
import dask.dataframe as dd
from .blockwise_voting_regressor import BlockwiseVotingRegressor
class XGBoostRegressor(Estimator):
"""XGBoost Regressor.
Args:
eta (float): Boosting learning rate. Defaults to 0.1.
max_depth (int): Maximum tree depth for base learners. Defaults to 6.
min_child_weight (float): Minimum sum of instance weight (hessian) needed in a child. Defaults to 1.0
n_estimators (int): Number of gradient boosted trees. Equivalent to number of boosting rounds. Defaults to 100.
random_seed (int): Seed for the random number generator. Defaults to 0.
n_jobs (int): Number of parallel threads used to run xgboost. Note that creating thread contention will significantly slow down the algorithm. Defaults to 12.
"""
name = "XGBoost Regressor"
hyperparameter_ranges = {}
model_family = ModelFamily.XGBOOST
supported_problem_types = [
ProblemTypes.REGRESSION,
ProblemTypes.TIME_SERIES_REGRESSION,
]
# xgboost supports seeds from -2**31 to 2**31 - 1 inclusive. these limits ensure the random seed generated below
# is within that range.
SEED_MIN = -(2 ** 31)
SEED_MAX = 2 ** 31 - 1
def __init__(
self,
eta=0.1,
max_depth=6,
min_child_weight=1,
n_estimators=100,
random_seed=0,
n_jobs=1,
**kwargs,
):
parameters = {
"eta": eta,
"max_depth": max_depth,
"min_child_weight": min_child_weight,
"n_estimators": n_estimators,
"n_jobs": n_jobs,
}
parameters.update(kwargs)
xgb_error_msg = (
"XGBoost is not installed. Please install using `pip install xgboost.`"
)
xgb = import_or_raise("xgboost", error_msg=xgb_error_msg)
xgb_regressor = xgb.XGBRegressor(random_state=random_seed, enable_categorical=True, **parameters)
super().__init__(
parameters=parameters, component_obj=xgb_regressor, random_seed=random_seed
)
@staticmethod
def _convert_bool_to_int(X):
return {
col: "Integer" for col in X.ww.select("boolean", return_schema=True).columns
}
def fit(self, X, y=None):
"""Fits XGBoost regressor component to data.
Args:
X (pd.DataFrame): The input training data of shape [n_samples, n_features].
y (pd.Series, optional): The target training data of length [n_samples].
Returns:
self
"""
X, y = super()._manage_woodwork(X, y)
X.ww.set_types(self._convert_bool_to_int(X))
self.input_feature_names = list(X.columns)
# X = _rename_column_names_to_numeric(X, flatten_tuples=False)
self._component_obj = BlockwiseVotingRegressor(
self._component_obj,
)
self._component_obj.fit(X, y)
return self
def predict(self, X):
"""Make predictions using fitted XGBoost regressor.
Args:
X (pd.DataFrame): Data of shape [n_samples, n_features].
Returns:
pd.Series: Predicted values.
"""
X, _ = super()._manage_woodwork(X)
X.ww.set_types(self._convert_bool_to_int(X))
# X = _rename_column_names_to_numeric(X, flatten_tuples=False)
return infer_feature_types(self._component_obj.predict(X))
@property
def feature_importance(self):
"""Feature importance of fitted XGBoost regressor."""
return self._component_obj.feature_importances_
| 33.45 | 166 | 0.660937 |
845958315f146c23b78ebb452ed1fba10713f15f | 4,321 | py | Python | ideas/examples/french.py | aroberge/importhooks | 57483ce24d265d391587f6321954f2ed60f04afd | [
"MIT"
] | null | null | null | ideas/examples/french.py | aroberge/importhooks | 57483ce24d265d391587f6321954f2ed60f04afd | [
"MIT"
] | null | null | null | ideas/examples/french.py | aroberge/importhooks | 57483ce24d265d391587f6321954f2ed60f04afd | [
"MIT"
] | null | null | null | """
.. admonition:: Summary
This example demonstrates the how to use a non-standard file
extension (``.pyfr``) as an indication that an import hook must
be used.
It also demonstrates how to use the ``verbose_finder`` configuration option.
French Python
==============
Imagine you are a French beginner who has learned the basics
of programming using a block-based environment such as Scratch
or Blockly. All the text shown on these blocks was in French,
the only language you know. You now want to do a transition
to actually writing code in an editor, instead of putting
predefined blocks together. It would be so much easier if
you could use a version of Python where the keywords were in French,
with most of them being identical to what you were using in the
block-based environment.
This is what this import hook example allows one to do.
A more elaborate example is that given by
`AvantPy <https://aroberge.github.io/avantpy/docs/html/>`_.
Let's see it in action:
.. code-block:: none
> python -m ideas -a french --show
Ideas Console version 0.0.38. [Python version: 3.9.10]
ideas> pourchaque lettre dans 'Bonjour':
... afficher(lettre)
...
===========Transformed============
for lettre in 'Bonjour':
print(lettre)
-----------------------------
B
o
n
j
o
u
r
ideas>
Importing .pyfr files
----------------------
Suppose we have the following two files:
.. code-block:: python
# my_program.py
print("Wrong one")
raise ImportError
and
.. code-block:: none
# my_program.pyfr
afficher("Bonjour !")
Let's see if we attempt to import ``my_program`` after
setting up the ``french`` import hook and enabling the
verbose finder::
>>> from ideas.session import config
>>> config.verbose_finder = True
>>> from ideas.examples import french
>>> hook = french.add_hook()
Looking for files with extensions: ['.pyfr']
The following paths will not be included in the search:
PYTHON: c:\\users\\andre\\appdata\\local\\programs\\python\\python37\\lib
IDEAS: c:\\users\\andre\\github\\ideas\\ideas
SITE-PACKAGES: c:\\users\\andre\\github\\ideas\\venv-ideas3.7\\lib\\site-packages
>>> import my_program
Searching for ~\\github\\ideas\\my_program.pyfr
Found: ~\\github\\ideas\\my_program.pyfr
Bonjour !
>>> import math
Searching for ~\\github\\ideas\\math.pyfr
IdeasMetaFinder did not find math.pyfr
>>> math.pi
3.141592653589793
>>>
"""
from ideas import import_hook
import token_utils
fr_to_py = {
"Faux": "False",
"Aucun": "None",
"Vrai": "True",
"et": "and",
"comme": "as",
"affirmer": "assert",
"async": "async", # do not translate
"await": "await", # as these are not for beginners
"interrompre": "break",
"classe": "class",
"continuer": "continue",
"définir": "def",
"supprimer": "del",
"sinonsi": "elif",
"sinon": "else",
"siexception": "except",
"finalement": "finally",
"pourchaque": "for",
"de": "from",
"global": "global",
"si": "if",
"importer": "import",
"dans": "in",
"est": "is",
"fonction": "lambda",
"nonlocal": "nonlocal",
"pas": "not",
"ou": "or",
"passer": "pass",
"lever": "raise",
"retourner": "return",
"essayer": "try",
"tantque": "while",
"avec": "with",
"céder": "yield",
# a few builtins useful for beginners
"demander": "input",
"afficher": "print",
"intervalle": "range",
"quitter": "exit", # useful for console
}
def transform_source(source, **_kwargs):
"""A simple replacement of 'French Python keyword' by their normal
English version.
"""
new_tokens = []
for token in token_utils.tokenize(source):
if token.string in fr_to_py:
token.string = fr_to_py[token.string]
new_tokens.append(token)
new_source = token_utils.untokenize(new_tokens)
return new_source
def add_hook(**_kwargs):
"""Creates and adds the import hook in sys.meta_path.
Uses a custom extension for the exception hook."""
hook = import_hook.create_hook(
transform_source=transform_source,
hook_name=__name__,
extensions=[".pyfr"],
)
return hook
| 25.122093 | 87 | 0.626938 |
9db7e428a203e743e21f290ddc7b7e7ddaab0a28 | 5,324 | py | Python | .history/src/data/data_20191021142701.py | bkraft4257/kaggle_titanic | f29ea1773773109a867278c001dbd21a9f7b21dd | [
"MIT"
] | null | null | null | .history/src/data/data_20191021142701.py | bkraft4257/kaggle_titanic | f29ea1773773109a867278c001dbd21a9f7b21dd | [
"MIT"
] | null | null | null | .history/src/data/data_20191021142701.py | bkraft4257/kaggle_titanic | f29ea1773773109a867278c001dbd21a9f7b21dd | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
from typing import Union
from pathlib import Path
from nameparser import HumanName
class ExtractData:
def __init__(self, filename: Union[str, Path], age_bins = None, drop_columns=None):
# """Extract Training Data from file or Path
# Arguments:
# filename {[str]} -- Filename of CSV data file containing data.
# drop_columns -- Columns in dataframe that should be dropped.
# """
if drop_columns is None:
drop_columns = ["age", "cabin", "name", "ticket"]
self.filename = filename
self.drop_columns = drop_columns
self.all_label_columns = ["survived"]
self.all_feature_columns = [
"pclass",
"name",
"sex",
"age",
"sibsp",
"parch",
"ticket",
"fare",
"cabin",
"embarked",
]
self.Xy_raw = None
self.extract_raw()
def extract_raw(self):
"""
Extracts data from a CSV file.
Returns:
pd.DataFrame -- [description]
"""
Xy_raw = pd.read_csv(self.filename)
Xy_raw.columns = Xy_raw.columns.str.lower().str.replace(" ", "_")
Xy_raw = Xy_raw.rename(columns={"age": "age_known"})
Xy_raw["pclass"] = Xy_raw["pclass"].astype("category")
self.Xy_raw = Xy_raw.set_index("passengerid")
class TransformData:
title_translator = {
"Mlle.": "Mrs.",
"Mme.": "Mrs.",
"Sir.": "Mr.",
"Ms.": "Mrs.",
"Rev.": "Mr.",
"": "Mr.",
"Col.": "Mr.",
"Capt.": "Mr.",
"Lady.": "Mrs.",
"the Countess. of": "Mrs.",
}
def __init__(
self,
raw_data,
adult_age_threshold_min=13,
age_bins = None,
fare_mode = None,
embarked_mode = None,
Xy_age_estimate=None,
drop_columns=None,
):
# """Extract Training Data from file or Path
# Arguments:
# filename {[str]} -- Filename of CSV data file containing data.
# drop_columns -- Columns in dataframe that should be dropped.
# """
if age_bins is None:
age_bins = [0,10,20,30, 40, 50, 60, np.inf]
if drop_columns is None:
drop_columns = ["age", "cabin", "name", "ticket"]
self.raw = raw_data
self.adult_age_threshold_min = adult_age_threshold_min
self.Xy_age_estimate = Xy_age_estimate
self.age_bins = age_bins
if self.fare_mode is None:
self.fare_mode = self.Xy['fare'].mode()[0]
if self.embarked_mode is None:
self.embarked_mode = self.Xy['embarked'].mode()[0]
self.Xy = self.raw.Xy_raw.copy()
self.impute_missing_fare()
self.impute_missing_embark()
self.extract_title()
self.extract_last_name()
self.extract_cabin_number()
self.extract_cabin_prefix()
self.estimate_age()
self.calc_age_bins()
self.calc_is_child()
self.calc_is_travelling_alone()
def calc_is_travelling_alone(self):
self.Xy["is_travelling_alone"] = (self.Xy.sibsp == 0) & (self.Xy.parch == 0)
def calc_is_child(self):
self.Xy["is_child"] = self.Xy.age < self.adult_age_threshold_min
def extract_cabin_number(self):
self.Xy["cabin_number"] = self.Xy.ticket.str.extract("(\d+)$")
def extract_cabin_prefix(self):
self.Xy["cabin_prefix"] = self.Xy.ticket.str.extract("^(.+) ")
def extract_title(self):
"""[summary]
"""
self.Xy["title"] = (
self.Xy.name.apply(lambda x: HumanName(x).title)
.replace(self.title_translator)
.replace({"\.": ""}, regex=True)
)
def extract_last_name(self):
self.Xy["last_name"] = self.Xy.name.apply(lambda x: HumanName(x).last)
def calc_age_bins(self):
self.Xy['age_bin'] = pd.cut(self.Xy.age, bins=[0,10,20,30, 40, 50, 60, np.inf])
def clean(self,):
"""Clean data to remove missing data and "unnecessary" features.
Arguments:
in_raw_df {pd.DataFrame} -- Dataframe containing all columns and rows Kaggle Titanic Training Data set
"""
self.Xy = self.Xy_raw.drop(self.drop_columns, axis=1)
def estimate_age(self, groupby_columns=["sex", "title"]):
"""[summary]
Keyword Arguments:
groupby {list} -- [description] (default: {['sex','title']})
"""
if self.Xy_age_estimate is None:
self.Xy_age_estimate = (
self.Xy.groupby(groupby_columns).age_known.mean().to_frame().round(1)
)
self.Xy_age_estimate = self.Xy_age_estimate.rename(
columns={"age_known": "age_estimate"}
)
out_df = self.Xy.reset_index().merge(self.Xy_age_estimate, on=groupby_columns)
out_df["age"] = out_df["age_known"].fillna(out_df["age_estimate"])
self.Xy = out_df
def impute_missing_fare(self):
self.Xy['fare'] = self.Xy['fare'].fillna(self.fare_mode)
def impute_missing_embark(self):
self.Xy['embarked'] = self.Xy['embarked'].fillna(self.fare_mode )
| 29.910112 | 114 | 0.563298 |
c9571a8769a04f15b736b18d1d2ccdba8e3165d7 | 2,459 | py | Python | setup.py | googleinterns/django-csp | 6efaad957e4e22e91d5c29a69d91b0c1f1765546 | [
"BSD-3-Clause"
] | 2 | 2020-05-20T06:15:21.000Z | 2020-05-20T06:15:30.000Z | setup.py | 9mido/django-csp | cbff891cdd4e8718c25bc762147a2c22fa07787e | [
"BSD-3-Clause"
] | null | null | null | setup.py | 9mido/django-csp | cbff891cdd4e8718c25bc762147a2c22fa07787e | [
"BSD-3-Clause"
] | 1 | 2020-06-16T17:18:32.000Z | 2020-06-16T17:18:32.000Z | import sys
import os
import codecs
from setuptools import setup, find_packages
version = '3.6'
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
os.system('python setup.py bdist_wheel upload')
print('You probably want to also tag the version now:')
print(' git tag -a %s -m "version %s"' % (version, version))
print(' git push --tags')
sys.exit()
def read(*parts):
filename = os.path.join(os.path.dirname(__file__), *parts)
with codecs.open(filename, encoding='utf-8') as fp:
return fp.read()
install_requires = [
'Django>=1.8',
]
jinja2_requires = [
'jinja2>=2.9.6',
]
test_requires = [
'pytest<4.0',
'pytest-django',
'pytest-flakes==1.0.1',
'pytest-pep8==1.0.6',
'pep8==1.4.6',
'mock==1.0.1',
'six==1.12.0',
]
test_requires += jinja2_requires
setup(
name='django_csp',
version=version,
description='Django Content Security Policy support.',
long_description=read('README.rst'),
author='James Socol',
author_email='me@jamessocol.com',
maintainer='Christopher Grebs',
maintainer_email='cg@webshox.org',
url='http://github.com/mozilla/django-csp',
license='BSD',
packages=find_packages(),
install_requires=install_requires,
extras_require={
'tests': test_requires,
'jinja2': jinja2_requires,
},
include_package_data=True,
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Environment :: Web Environment :: Mozilla',
'Programming Language :: Python',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: PyPy',
'Programming Language :: Python :: Implementation :: CPython',
'Framework :: Django',
]
)
| 27.943182 | 71 | 0.616104 |
0ee2f61617158a409d718710f6bb397d8647e36e | 8,689 | py | Python | src/dropbot_chip_qc/ui/execute.py | cfobel/dropbot-chip-qc | e5944b88c0d423163f55a3f49ebf84bb27e229bc | [
"BSD-3-Clause"
] | null | null | null | src/dropbot_chip_qc/ui/execute.py | cfobel/dropbot-chip-qc | e5944b88c0d423163f55a3f49ebf84bb27e229bc | [
"BSD-3-Clause"
] | 5 | 2019-04-02T11:10:45.000Z | 2019-07-17T20:31:18.000Z | src/dropbot_chip_qc/ui/execute.py | cfobel/dropbot-chip-qc | e5944b88c0d423163f55a3f49ebf84bb27e229bc | [
"BSD-3-Clause"
] | null | null | null | # -*- encoding: utf-8 -*-
'''
.. versionadded:: v0.12.0
'''
import functools as ft
import itertools as it
import threading
from dropbot_chip_qc.ui.render import get_summary_dict, render_summary
from logging_helpers import _L, caller_name
import asyncio_helpers as aioh
import dropbot_chip_qc as qc
import dropbot_chip_qc.ui.plan
import dropbot_chip_qc.ui.render
import networkx as nx
import numpy as np
import pandas as pd
import path_helpers as ph
import si_prefix as si
import trollius as asyncio
from .mqtt_proxy import DropBotMqttProxy
# For colors, see: https://gist.github.com/cfobel/fd939073cf13a309d7a9
light_blue = '#88bde6'
light_green = '#90cd97'
class Executor(object):
def __init__(self, channels_graph, channel_plan):
self.base_channels_graph = channels_graph.copy()
self.channels_graph = channels_graph.copy()
self.base_channel_plan = list(channel_plan)
self.completed_results = []
self._thread = None
self._task = None
def is_alive(self):
return self._thread is not None and self._thread.is_alive()
def remove_channels(self, bad_channels):
self.channels_graph.remove_nodes_from(bad_channels)
def channel_plan(self):
if self.completed_results:
channel_plan = self.completed_results[-1]['channel_plan']
completed_transfers = \
self.completed_results[-1]['completed_transfers']
else:
channel_plan = self.base_channel_plan
completed_transfers = []
channel_plan_ = [c for c in channel_plan if c in self.channels_graph]
if len(channel_plan_) < len(channel_plan):
_L().debug('reroute around missing channels')
channel_plan = list(qc.ui.plan\
.create_channel_plan(self.channels_graph, channel_plan_,
loop=False))
return channel_plan, completed_transfers
def start(self, aproxy, signals, bad_channels=None, min_duration=.15):
'''
# TODO
- incorporate `execute()` coroutine
- add
'''
if self.is_alive():
raise RuntimeError('Executor is already running.')
channel_plan, completed_transfers = self.channel_plan()
@asyncio.coroutine
def execute_test(*args, **kwargs):
yield asyncio.From(set_capacitance_update_interval())
try:
result = yield asyncio\
.From(qc.ui.plan.transfer_windows(*args, **kwargs))
except qc.ui.plan.TransferFailed as exception:
# Save intermediate result.
result = dict(channel_plan=exception.channel_plan,
completed_transfers=exception.completed_transfers)
signals.signal('test-interrupt').send(caller_name(0), **result)
self.completed_results.append(result)
yield asyncio.From(aproxy.set_state_of_channels(pd.Series(), append=False))
# result = dict(channel_plan=channel_plan_i,
# completed_transfers=completed_transfers_i)
raise asyncio.Return(result)
@asyncio.coroutine
def set_capacitance_update_interval():
state = yield asyncio.From(aproxy.state)
max_update_interval = int(.5 * min_duration * 1e3)
if state.capacitance_update_interval_ms > max_update_interval \
or state.capacitance_update_interval_ms == 0:
yield asyncio\
.From(aproxy.update_state(capacitance_update_interval_ms=
max_update_interval))
looped_channel_plan = (channel_plan +
nx.shortest_path(self.channels_graph,
channel_plan[-1],
self.base_channel_plan[0])[1:])
self._task = aioh.cancellable(execute_test)
transfer_liquid = ft.partial(qc.ui.plan.transfer_liquid, aproxy,
min_duration=min_duration)
self._thread = threading.Thread(target=self._task,
args=(signals, looped_channel_plan,
completed_transfers,
transfer_liquid),
kwargs={'n': 3})
self._thread.daemon = True
self._thread.start()
def pause(self):
if self.is_alive():
self._task.cancel()
def reset(self):
self.pause()
del self.completed_results[:]
self.channels_graph = self.base_channels_graph.copy()
class ExecutorController(object):
def __init__(self, aproxy, ui, executor):
self.ui = ui
channel_electrodes = ui['channel_electrodes']
channel_patches = ui['channel_patches']
chip_info = ui['chip_info']
chip_info_mm = ui['chip_info_mm']
figure = ui['figure']
signals = ui['signals']
def calibrate_sheet_capacitance(target_force, *args):
'''Calibrate sheet capacitance with liquid present
**NOTE** Prior to running the following cell:
- _at least_ one electrode **MUST** be **actuated**
- all actuated electrodes **MUST** be completely covered with liquid
It may be helpful to use the interactive figure UI to manipulate liquid until
the above criteria are met.
This function performs the following steps:
1. Measure **total capacitance** across **all actuated electrodes**
2. Compute sheet capacitance with liquid present ($\Omega_L$) based on nominal
areas of actuated electrodes from `chip_file`
3. Compute voltage to match 25 μN of force, where
$F = 10^3 \cdot 0.5 \cdot \Omega_L \cdot V^2$
4. Set DropBot voltage to match target of 25 μN force.
'''
proxy = DropBotMqttProxy.from_uri('dropbot', aproxy.__client__._host)
name = 'liquid'
states = proxy.state_of_channels
channels = states[states > 0].index.tolist()
electrodes_by_id = pd.Series(chip_info_mm['electrodes'],
index=(e['id'] for e in
chip_info_mm['electrodes']))
actuated_area = (electrodes_by_id[channel_electrodes[channels]]
.map(lambda x: x['area'])).sum()
capacitance = pd.Series(proxy.capacitance(0)
for i in range(20)).median()
sheet_capacitance = capacitance / actuated_area
message = ('Measured %s sheet capacitance: %sF/%.1f mm^2 = %sF/mm^2'
% (name, si.si_format(capacitance), actuated_area,
si.si_format(sheet_capacitance)))
print(message)
voltage = np.sqrt(target_force / (1e3 * 0.5 * sheet_capacitance))
return sheet_capacitance, voltage
def pause(*args):
executor.pause()
def reset(*args):
executor.reset()
channel_patches.map(lambda x: x.set_facecolor(light_blue))
for collection in list(figure._ax.collections):
collection.remove()
figure._ax.figure.canvas.draw()
def save_results(output_directory, chip_uuid, *args):
output_dir = ph.path(output_directory)
channel_plan, completed_transfers = executor.channel_plan()
proxy = DropBotMqttProxy.from_uri('dropbot', aproxy.__client__._host)
summary_dict = \
get_summary_dict(proxy, chip_info,
sorted(set(executor.base_channel_plan)),
channel_plan, completed_transfers,
chip_uuid=chip_uuid)
output_path = output_dir.joinpath('Chip test report - %s.html' %
summary_dict['chip_uuid'])
print('save to: `%s`' % output_path)
render_summary(output_path, **summary_dict)
def start(bad_channels, *args):
executor.channels_graph = executor.base_channels_graph.copy()
executor.remove_channels(bad_channels)
executor.start(aproxy, signals)
self.calibrate_sheet_capacitance = calibrate_sheet_capacitance
self.pause = pause
self.reset = reset
self.save_results = save_results
self.start = start
| 41.37619 | 91 | 0.590747 |
5e8d1e88ac4f740facaef1a7f9f3beb0a4e26984 | 985 | py | Python | setup.py | lmacken/binance-chain-python | 483e51394ebc9f9998f5248910ac7b7dff7198f9 | [
"MIT"
] | 22 | 2019-04-27T02:14:52.000Z | 2021-01-04T00:37:41.000Z | setup.py | redquantum/binance-chain-python | 483e51394ebc9f9998f5248910ac7b7dff7198f9 | [
"MIT"
] | 7 | 2019-04-28T20:57:49.000Z | 2021-09-03T03:39:22.000Z | setup.py | redquantum/binance-chain-python | 483e51394ebc9f9998f5248910ac7b7dff7198f9 | [
"MIT"
] | 9 | 2019-04-27T23:43:51.000Z | 2021-04-15T18:09:51.000Z | #!/usr/bin/env python
import setuptools
from distutils.core import setup
setup(
name="binancechain",
version="0.1.6",
description="Unofficial Binance Chain SDK",
author="Luke Macken & Kim Bui",
author_email="",
url="https://github.com/lmacken/binance-chain-python",
packages=["binancechain"],
classifiers=[
"Development Status :: 4 - Beta",
"Framework :: AsyncIO",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
],
install_requires=[
"wheel",
"bech32",
"aiohttp",
"bitcoinlib",
"eth_keyfile",
"secp256k1",
"pyee",
"varint",
"protobuf",
"orjson",
],
)
| 25.921053 | 58 | 0.559391 |
54690cd96c2a52d95c26b8e44b65e2b77a6243fe | 116 | py | Python | app/streampush/backend/apps.py | streampush/streampush | c531a8b35b8f0e9b6d56f8760ee78b707eab7830 | [
"MIT"
] | 6 | 2018-09-11T01:36:12.000Z | 2021-12-06T07:12:46.000Z | app/streampush/backend/apps.py | streampush/streampush | c531a8b35b8f0e9b6d56f8760ee78b707eab7830 | [
"MIT"
] | 9 | 2018-09-13T01:28:03.000Z | 2021-03-14T04:01:47.000Z | app/streampush/backend/apps.py | streampush/streampush | c531a8b35b8f0e9b6d56f8760ee78b707eab7830 | [
"MIT"
] | 2 | 2019-02-15T15:03:37.000Z | 2019-07-28T13:05:07.000Z | from django.apps import AppConfig
from backend import configs
class BackendConfig(AppConfig):
name = 'backend'
| 19.333333 | 33 | 0.784483 |
495a63a30f721b605e51851f6673ff2f48fc88ac | 2,525 | py | Python | vue/vue.py | adamlwgriffiths/vue.py | f4256454256ddfe54a8be6dea493d3fc915ef1a2 | [
"MIT"
] | 274 | 2018-07-07T00:57:17.000Z | 2022-03-22T23:49:53.000Z | vue/vue.py | adamlwgriffiths/vue.py | f4256454256ddfe54a8be6dea493d3fc915ef1a2 | [
"MIT"
] | 25 | 2018-11-24T17:19:44.000Z | 2022-03-23T22:30:18.000Z | vue/vue.py | adamlwgriffiths/vue.py | f4256454256ddfe54a8be6dea493d3fc915ef1a2 | [
"MIT"
] | 18 | 2019-07-04T07:18:18.000Z | 2022-03-22T23:49:55.000Z | from browser import window
from .factory import VueComponentFactory, Wrapper, VueDirectiveFactory
from .bridge import Object
from .decorators.directive import DirectiveHook
from .decorators.filters import Filter
class Vue:
@staticmethod
def directive(name, directive=None):
if directive is None and isinstance(name, str):
return window.Vue.directive(name)
if directive is None:
directive = name
name = directive.__name__.lower()
if not isinstance(directive, type):
class FunctionDirective(VueDirective):
d = DirectiveHook(directive)
directive = FunctionDirective
window.Vue.directive(name, VueDirectiveFactory.get_item(directive))
@staticmethod
def filter(method_or_name, method=None):
if not method:
method = method_or_name
name = method_or_name.__name__
else:
method = method
name = method_or_name
flt = Filter(method, name)
window.Vue.filter(flt.__id__, flt.__value__)
@staticmethod
def mixin(mixin):
window.Vue.mixin(VueComponentFactory.get_item(mixin))
@staticmethod
def use(plugin, *args, **kwargs):
window.Vue.use(plugin, *args, kwargs)
@staticmethod
def component(component_or_name, component=None):
if isinstance(component_or_name, str) and component is None:
return window.Vue.component(component_or_name)
if component is not None:
name = component_or_name
else:
component = component_or_name
name = component.__name__
window.Vue.component(name, VueComponentFactory.get_item(component))
class VueComponent(Wrapper):
@classmethod
def init_dict(cls):
return VueComponentFactory.get_item(cls)
def __new__(cls, el, **kwargs):
init_dict = cls.init_dict()
init_dict.update(el=el)
for key, value in kwargs.items():
if key == "props_data":
key = "propsData"
init_dict.update({key: value})
return Object.from_js(window.Vue.new(Object.to_js(init_dict)))
@classmethod
def register(cls, name=None):
if name:
Vue.component(name, cls)
else:
Vue.component(cls)
class VueMixin(Wrapper):
pass
class VueDirective(Wrapper):
name = None
class VuePlugin:
@staticmethod
def install(*args, **kwargs):
raise NotImplementedError()
| 27.445652 | 75 | 0.639604 |
2364dd688024c5cf36b133426750963aa49b2cad | 206 | py | Python | gym-foster/gym_foster/__init__.py | c4sgub/counterfactual_RL | 9dbdf53935fbe6f8da45235bc1284fc855740a46 | [
"MIT"
] | null | null | null | gym-foster/gym_foster/__init__.py | c4sgub/counterfactual_RL | 9dbdf53935fbe6f8da45235bc1284fc855740a46 | [
"MIT"
] | null | null | null | gym-foster/gym_foster/__init__.py | c4sgub/counterfactual_RL | 9dbdf53935fbe6f8da45235bc1284fc855740a46 | [
"MIT"
] | null | null | null | from gym.envs.registration import register
register(
id='foster-v0',
entry_point='gym_foster.envs:FosterEnv')
register(
id = 'foster-v1',
entry_point='gym_foster.envs:FosterAEnv'
) | 20.6 | 48 | 0.68932 |
b134f1b1ef20bc5ecf87b296d9401ab15747271b | 269 | py | Python | tests/artificial/transf_Fisher/trend_LinearTrend/cycle_7/ar_/test_artificial_1024_Fisher_LinearTrend_7__20.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | null | null | null | tests/artificial/transf_Fisher/trend_LinearTrend/cycle_7/ar_/test_artificial_1024_Fisher_LinearTrend_7__20.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | 1 | 2019-11-30T23:39:38.000Z | 2019-12-01T04:34:35.000Z | tests/artificial/transf_Fisher/trend_LinearTrend/cycle_7/ar_/test_artificial_1024_Fisher_LinearTrend_7__20.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | null | null | null | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_length = 7, transform = "Fisher", sigma = 0.0, exog_count = 20, ar_order = 0); | 38.428571 | 164 | 0.732342 |
efed7199316416eb3a1e5d4abf004d1a71e1de98 | 13,498 | py | Python | tests/profiling/exporter/test_http.py | AlexandreYang/dd-trace-py | 41508c7c230e7b30d32b942cbf08ccfc9901e3b8 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | tests/profiling/exporter/test_http.py | AlexandreYang/dd-trace-py | 41508c7c230e7b30d32b942cbf08ccfc9901e3b8 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | tests/profiling/exporter/test_http.py | AlexandreYang/dd-trace-py | 41508c7c230e7b30d32b942cbf08ccfc9901e3b8 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # -*- encoding: utf-8 -*-
import collections
import errno
import email.parser
import platform
import socket
import threading
import time
import pytest
from ddtrace import compat
from ddtrace.vendor import six
from ddtrace.vendor.six.moves import BaseHTTPServer
from ddtrace.vendor.six.moves import http_client
import ddtrace
from ddtrace.profiling.exporter import http
from . import test_pprof
_API_KEY = "my-api-key"
class _APIEndpointRequestHandlerTest(BaseHTTPServer.BaseHTTPRequestHandler):
error_message_format = "%(message)s\n"
error_content_type = "text/plain"
@staticmethod
def log_message(format, *args): # noqa: A002
pass
@staticmethod
def _check_tags(tags):
tags.sort()
return (
len(tags) == 6
and tags[0].startswith(b"host:")
and tags[1] == b"language:python"
and tags[2] == ("profiler_version:%s" % ddtrace.__version__).encode("utf-8")
and tags[3].startswith(b"runtime-id:")
and tags[4] == b"runtime:CPython"
and tags[5].startswith(b"service:")
and tags[6] == platform.python_version().encode(),
)
def do_POST(self):
api_key = self.headers["DD-API-KEY"]
if api_key != _API_KEY:
self.send_error(400, "Wrong API Key")
return
length = int(self.headers["Content-Length"])
body = self.rfile.read(length)
mmpart = b"Content-Type: " + self.headers["Content-Type"].encode() + b"\r\n" + body
if six.PY2:
msg = email.parser.Parser().parsestr(mmpart)
else:
msg = email.parser.BytesParser().parsebytes(mmpart)
if not msg.is_multipart():
self.send_error(400, "No multipart")
return
items = collections.defaultdict(list)
for part in msg.get_payload():
items[part.get_param("name", header="content-disposition")].append(part.get_payload(decode=True))
for key, check in {
"recording-start": lambda x: x[0] == b"1970-01-01T00:00:00Z",
"recording-end": lambda x: x[0].startswith(b"20"),
"runtime": lambda x: x[0] == platform.python_implementation().encode(),
"format": lambda x: x[0] == b"pprof",
"type": lambda x: x[0] == b"cpu+alloc+exceptions",
"tags[]": self._check_tags,
"chunk-data": lambda x: x[0].startswith(b"\x1f\x8b\x08\x00"),
}.items():
if not check(items[key]):
self.send_error(400, "Wrong value for %s: %r" % (key, items[key]))
return
self.send_error(200, "OK")
class _TimeoutAPIEndpointRequestHandlerTest(_APIEndpointRequestHandlerTest):
def do_POST(self):
# This server sleeps longer than our timeout
time.sleep(5)
self.send_error(500, "Argh")
class _ResetAPIEndpointRequestHandlerTest(_APIEndpointRequestHandlerTest):
def do_POST(self):
return
_PORT = 8992
_TIMEOUT_PORT = _PORT + 1
_RESET_PORT = _PORT + 2
_ENDPOINT = "http://localhost:%d" % _PORT
_TIMEOUT_ENDPOINT = "http://localhost:%d" % _TIMEOUT_PORT
_RESET_ENDPOINT = "http://localhost:%d" % _RESET_PORT
def _make_server(port, request_handler):
server = BaseHTTPServer.HTTPServer(("localhost", port), request_handler)
t = threading.Thread(target=server.serve_forever)
# Set daemon just in case something fails
t.daemon = True
t.start()
return server, t
@pytest.fixture(scope="module")
def endpoint_test_server():
server, thread = _make_server(_PORT, _APIEndpointRequestHandlerTest)
try:
yield thread
finally:
server.shutdown()
thread.join()
@pytest.fixture(scope="module")
def endpoint_test_timeout_server():
server, thread = _make_server(_TIMEOUT_PORT, _TimeoutAPIEndpointRequestHandlerTest)
try:
yield thread
finally:
server.shutdown()
thread.join()
@pytest.fixture(scope="module")
def endpoint_test_reset_server():
server, thread = _make_server(_RESET_PORT, _ResetAPIEndpointRequestHandlerTest)
try:
yield thread
finally:
server.shutdown()
thread.join()
def test_wrong_api_key(endpoint_test_server):
# This is mostly testing our test server, not the exporter
exp = http.PprofHTTPExporter(_ENDPOINT, "this is not the right API key", max_retry_delay=10)
with pytest.raises(http.UploadFailed) as t:
exp.export(test_pprof.TEST_EVENTS, 0, 1)
e = t.exception
assert isinstance(e, http.RequestFailed)
assert e.response.status == 400
assert e.content == b"Wrong API Key\n"
def test_export(endpoint_test_server):
exp = http.PprofHTTPExporter(_ENDPOINT, _API_KEY)
exp.export(test_pprof.TEST_EVENTS, 0, compat.time_ns())
def test_export_no_endpoint(endpoint_test_server):
exp = http.PprofHTTPExporter(endpoint="")
with pytest.raises(http.InvalidEndpoint):
exp.export(test_pprof.TEST_EVENTS, 0, 1)
def test_export_server_down():
exp = http.PprofHTTPExporter("http://localhost:2", _API_KEY, max_retry_delay=10)
with pytest.raises(http.UploadFailed) as t:
exp.export(test_pprof.TEST_EVENTS, 0, 1)
e = t.exception
assert isinstance(e, (IOError, OSError))
assert e.errno == errno.ECONNREFUSED
def test_export_timeout(endpoint_test_timeout_server):
exp = http.PprofHTTPExporter(_TIMEOUT_ENDPOINT, _API_KEY, timeout=1, max_retry_delay=10)
with pytest.raises(http.UploadFailed) as t:
exp.export(test_pprof.TEST_EVENTS, 0, 1)
e = t.value.exception
assert isinstance(e, socket.timeout)
def test_export_reset(endpoint_test_reset_server):
exp = http.PprofHTTPExporter(_RESET_ENDPOINT, _API_KEY, timeout=1)
with pytest.raises(http.UploadFailed) as t:
exp.export(test_pprof.TEST_EVENTS, 0, 1)
e = t.value.exception
if six.PY3:
assert isinstance(e, ConnectionResetError)
else:
assert isinstance(e, http_client.BadStatusLine)
def test_default_from_env(monkeypatch):
monkeypatch.setenv("DD_PROFILING_API_KEY", "123")
exp = http.PprofHTTPExporter()
assert exp.api_key == "123"
assert exp.endpoint == "https://intake.profile.datadoghq.com/v1/input"
monkeypatch.setenv("DD_PROFILING_API_URL", "foobar")
exp = http.PprofHTTPExporter()
assert exp.endpoint == "foobar"
monkeypatch.setenv("DD_SITE", "datadoghq.eu")
exp = http.PprofHTTPExporter()
assert exp.endpoint == "foobar"
monkeypatch.delenv("DD_PROFILING_API_URL")
exp = http.PprofHTTPExporter()
assert exp.endpoint == "https://intake.profile.datadoghq.eu/v1/input"
monkeypatch.setenv("DD_API_KEY", "456")
exp = http.PprofHTTPExporter()
assert exp.api_key == "123"
monkeypatch.delenv("DD_PROFILING_API_KEY")
exp = http.PprofHTTPExporter()
assert exp.api_key == "456"
monkeypatch.setenv("DD_SERVICE", "myservice")
exp = http.PprofHTTPExporter()
assert exp.service_name == "myservice"
def _check_tags_types(tags):
for k, v in tags.items():
assert isinstance(k, str)
assert isinstance(v, bytes)
def test_get_tags():
tags = http.PprofHTTPExporter()._get_tags("foobar")
_check_tags_types(tags)
assert len(tags) == 7
assert tags["service"] == b"foobar"
assert len(tags["host"])
assert len(tags["runtime-id"])
assert tags["language"] == b"python"
assert tags["runtime"] == b"CPython"
assert tags["profiler_version"] == ddtrace.__version__.encode("utf-8")
assert "version" not in tags
def test_get_malformed(monkeypatch):
monkeypatch.setenv("DD_TAGS", "mytagfoobar")
tags = http.PprofHTTPExporter()._get_tags("foobar")
_check_tags_types(tags)
assert len(tags) == 7
assert tags["service"] == b"foobar"
assert len(tags["host"])
assert len(tags["runtime-id"])
assert tags["language"] == b"python"
assert tags["runtime"] == b"CPython"
assert tags["profiler_version"] == ddtrace.__version__.encode("utf-8")
monkeypatch.setenv("DD_TAGS", "mytagfoobar,")
tags = http.PprofHTTPExporter()._get_tags("foobar")
_check_tags_types(tags)
assert len(tags) == 7
assert tags["service"] == b"foobar"
assert len(tags["host"])
assert len(tags["runtime-id"])
assert tags["language"] == b"python"
assert tags["runtime"] == b"CPython"
assert tags["profiler_version"] == ddtrace.__version__.encode("utf-8")
monkeypatch.setenv("DD_TAGS", ",")
tags = http.PprofHTTPExporter()._get_tags("foobar")
_check_tags_types(tags)
assert len(tags) == 7
assert tags["service"] == b"foobar"
assert len(tags["host"])
assert len(tags["runtime-id"])
assert tags["language"] == b"python"
assert tags["runtime"] == b"CPython"
assert tags["profiler_version"] == ddtrace.__version__.encode("utf-8")
monkeypatch.setenv("DD_TAGS", "foo:bar,")
tags = http.PprofHTTPExporter()._get_tags("foobar")
_check_tags_types(tags)
assert len(tags) == 8
assert tags["service"] == b"foobar"
assert len(tags["host"])
assert len(tags["runtime-id"])
assert tags["language"] == b"python"
assert tags["runtime"] == b"CPython"
assert tags["foo"] == b"bar"
assert tags["profiler_version"] == ddtrace.__version__.encode("utf-8")
def test_get_tags_override(monkeypatch):
monkeypatch.setenv("DD_TAGS", "mytag:foobar")
tags = http.PprofHTTPExporter()._get_tags("foobar")
_check_tags_types(tags)
assert len(tags) == 8
assert tags["service"] == b"foobar"
assert len(tags["host"])
assert len(tags["runtime-id"])
assert tags["language"] == b"python"
assert tags["runtime"] == b"CPython"
assert tags["mytag"] == b"foobar"
assert tags["profiler_version"] == ddtrace.__version__.encode("utf-8")
assert "version" not in tags
monkeypatch.setenv("DD_TAGS", "mytag:foobar,author:jd")
tags = http.PprofHTTPExporter()._get_tags("foobar")
_check_tags_types(tags)
assert len(tags) == 9
assert tags["service"] == b"foobar"
assert len(tags["host"])
assert len(tags["runtime-id"])
assert tags["language"] == b"python"
assert tags["runtime"] == b"CPython"
assert tags["mytag"] == b"foobar"
assert tags["author"] == b"jd"
assert tags["profiler_version"] == ddtrace.__version__.encode("utf-8")
assert "version" not in tags
monkeypatch.setenv("DD_TAGS", "")
tags = http.PprofHTTPExporter()._get_tags("foobar")
_check_tags_types(tags)
assert len(tags) == 7
assert tags["service"] == b"foobar"
assert len(tags["host"])
assert len(tags["runtime-id"])
assert tags["language"] == b"python"
assert tags["runtime"] == b"CPython"
assert tags["profiler_version"] == ddtrace.__version__.encode("utf-8")
assert "version" not in tags
monkeypatch.setenv("DD_TAGS", "foobar:baz,service:mycustomservice")
tags = http.PprofHTTPExporter()._get_tags("foobar")
_check_tags_types(tags)
assert len(tags) == 8
assert tags["service"] == b"mycustomservice"
assert len(tags["host"])
assert len(tags["runtime-id"])
assert tags["language"] == b"python"
assert tags["runtime"] == b"CPython"
assert tags["foobar"] == b"baz"
assert tags["profiler_version"] == ddtrace.__version__.encode("utf-8")
assert "version" not in tags
monkeypatch.setenv("DD_TAGS", "foobar:baz,service:🤣")
tags = http.PprofHTTPExporter()._get_tags("foobar")
_check_tags_types(tags)
assert len(tags) == 8
assert tags["service"] == u"🤣".encode("utf-8")
assert len(tags["host"])
assert len(tags["runtime-id"])
assert tags["language"] == b"python"
assert tags["runtime"] == b"CPython"
assert tags["foobar"] == b"baz"
assert tags["profiler_version"] == ddtrace.__version__.encode("utf-8")
assert "version" not in tags
monkeypatch.setenv("DD_VERSION", "123")
tags = http.PprofHTTPExporter()._get_tags("foobar")
_check_tags_types(tags)
assert len(tags) == 9
assert tags["service"] == u"🤣".encode("utf-8")
assert len(tags["host"])
assert len(tags["runtime-id"])
assert tags["language"] == b"python"
assert tags["runtime"] == b"CPython"
assert tags["foobar"] == b"baz"
assert tags["profiler_version"] == ddtrace.__version__.encode("utf-8")
assert tags["version"] == b"123"
assert "env" not in tags
monkeypatch.setenv("DD_ENV", "prod")
tags = http.PprofHTTPExporter()._get_tags("foobar")
_check_tags_types(tags)
assert len(tags) == 10
assert tags["service"] == u"🤣".encode("utf-8")
assert len(tags["host"])
assert len(tags["runtime-id"])
assert tags["language"] == b"python"
assert tags["runtime"] == b"CPython"
assert tags["foobar"] == b"baz"
assert tags["profiler_version"] == ddtrace.__version__.encode("utf-8")
assert tags["version"] == b"123"
assert tags["env"] == b"prod"
def test_get_tags_legacy(monkeypatch):
monkeypatch.setenv("DD_PROFILING_TAGS", "mytag:baz")
tags = http.PprofHTTPExporter()._get_tags("foobar")
_check_tags_types(tags)
assert tags["mytag"] == b"baz"
# precedence
monkeypatch.setenv("DD_TAGS", "mytag:val1,ddtag:hi")
monkeypatch.setenv("DD_PROFILING_TAGS", "mytag:val2,ddptag:lo")
tags = http.PprofHTTPExporter()._get_tags("foobar")
_check_tags_types(tags)
assert tags["mytag"] == b"val2"
assert tags["ddtag"] == b"hi"
assert tags["ddptag"] == b"lo"
| 34.085859 | 109 | 0.661283 |
2fd468c4951b4386762633284c38c71c1695f1b0 | 1,557 | py | Python | config.py | LumaGhost/dispatity-eval-fun | b7104230eeb67780925ca1bec34efb363eb01c55 | [
"MIT"
] | null | null | null | config.py | LumaGhost/dispatity-eval-fun | b7104230eeb67780925ca1bec34efb363eb01c55 | [
"MIT"
] | null | null | null | config.py | LumaGhost/dispatity-eval-fun | b7104230eeb67780925ca1bec34efb363eb01c55 | [
"MIT"
] | null | null | null | import cv2 as cv
import numpy as np
'''
folder should contain entries from the middlebury 2014 dataset
i.e. a collection of folders with the following contents:
img0.png (the left camera image)
img1.png, imgE1.png, imgL1.png (right camera image in different lighting conditions)
disp0.pfm (floating point ground truth horizontal disparity relative to the left image)
note: currently for simplicity only disp0 is supported
datasets can be downloaded here https://vision.middlebury.edu/stereo/data/scenes2014/zip/
download and unzip as many as you wish to use
place all of the datasets in the "ALL_DATASETS" folder
'''
ALL_DATASETS = "./datasets/middlebury/2014/"
'''
lighting: "default", "E", or "L" to specifiy which image1 version to load from the dataset
'''
LIGHTING = "default"
'''
bad_threshold: disparity difference from the ground truth where pixels will be
considered "bad" for the purpose of calculating the % of "bad pixels"
'''
BAD_THRESHOLD = 1.5
def calc_dispariry(im1, im2):
'''
this function should expect two matricies representing the left and right image
and return a matrix representing the disparity map calculated from the two images
note: invalid pixels should be expressed as inf/nan. all pixels that are not inf
or nan will be considered valid disparities for the purpose of average and other calculations
'''
stereo = cv.StereoSGBM_create(numDisparities=300, blockSize=8)
disp = stereo.compute(im1,im2).astype(np.float32) / 16.0
disp[disp <= 0.0] = np.inf
return disp | 39.923077 | 97 | 0.748876 |
b7d943b7f7a4d97ec05bad07a13f4023bc944787 | 1,143 | py | Python | appengine/trooper_o_matic/appengine_module/trooper_o_matic/controller.py | mithro/chromium-infra | d27ac0b230bedae4bc968515b02927cf9e17c2b7 | [
"BSD-3-Clause"
] | null | null | null | appengine/trooper_o_matic/appengine_module/trooper_o_matic/controller.py | mithro/chromium-infra | d27ac0b230bedae4bc968515b02927cf9e17c2b7 | [
"BSD-3-Clause"
] | null | null | null | appengine/trooper_o_matic/appengine_module/trooper_o_matic/controller.py | mithro/chromium-infra | d27ac0b230bedae4bc968515b02927cf9e17c2b7 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from google.appengine.ext import ndb
from appengine_module.trooper_o_matic import models
def get_cq_stats(project):
project_key = ndb.Key(models.Project, project)
single_run_data = models.CqStat.query(ancestor=project_key).order(
-models.CqStat.timestamp).fetch(limit=100)
single_run_data = [run for run in single_run_data if run.p50]
single_run_data.reverse()
queue_time_data = models.CqTimeInQueueForPatchStat.query(
ancestor=project_key).order(-models.CqStat.timestamp).fetch(limit=100)
queue_time_data = [run for run in queue_time_data if run.p50]
queue_time_data.reverse()
total_time_data = models.CqTotalTimeForPatchStat.query(
ancestor=project_key).order(-models.CqStat.timestamp).fetch(limit=100)
total_time_data = [run for run in total_time_data if run.p50]
total_time_data.reverse()
return {
'single_run_data': single_run_data,
'queue_time_data': queue_time_data,
'total_time_data': total_time_data,
}
| 38.1 | 76 | 0.768154 |
0ea3ee2fb0f96ae0bfec5f2d770f7ab4dbf6b99d | 9,825 | py | Python | onnxmltools/convert/coreml/operator_converters/TreeEnsemble.py | scnakandala/onnxmltools | cf9d14731f125338ff9e751c97f7c4277399599a | [
"MIT"
] | null | null | null | onnxmltools/convert/coreml/operator_converters/TreeEnsemble.py | scnakandala/onnxmltools | cf9d14731f125338ff9e751c97f7c4277399599a | [
"MIT"
] | null | null | null | onnxmltools/convert/coreml/operator_converters/TreeEnsemble.py | scnakandala/onnxmltools | cf9d14731f125338ff9e751c97f7c4277399599a | [
"MIT"
] | null | null | null | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from ...common._registration import register_converter
COREML_TREE_NODE_BEHAVIOR_TO_ONNX_TREE_NODE_MODE = {
0: 'BRANCH_LEQ',
1: 'BRANCH_LT',
2: 'BRANCH_GTE',
3: 'BRANCH_GT',
4: 'BRANCH_EQ',
5: 'BRANCH_NEQ',
6: 'LEAF'
}
COREML_TREE_POST_TRANSFORM_TO_ONNX_TREE_POST_TRANSFORM = {
0: 'NONE',
1: 'SOFTMAX',
2: 'LOGISTIC',
3: 'SOFTMAX_ZERO'
}
def get_onnx_tree_mode(cm_tree_behavior):
if cm_tree_behavior in COREML_TREE_NODE_BEHAVIOR_TO_ONNX_TREE_NODE_MODE:
return COREML_TREE_NODE_BEHAVIOR_TO_ONNX_TREE_NODE_MODE[cm_tree_behavior]
raise ValueError('CoreML tree node behavior not supported {0}'.format(cm_tree_behavior))
def get_onnx_tree_post_transform(cm_tree_post_transform):
if cm_tree_post_transform in COREML_TREE_POST_TRANSFORM_TO_ONNX_TREE_POST_TRANSFORM:
return COREML_TREE_POST_TRANSFORM_TO_ONNX_TREE_POST_TRANSFORM[cm_tree_post_transform]
raise ValueError('CoreML tree post transform not supported {0}'.format(cm_tree_post_transform))
def convert_tree_ensemble_model(scope, operator, container):
raw_model = operator.raw_operator
attrs = {'name': operator.full_name}
if raw_model.WhichOneof('Type') == 'treeEnsembleClassifier':
op_type = 'TreeEnsembleClassifier'
prefix = 'class'
nodes = raw_model.treeEnsembleClassifier.treeEnsemble.nodes
attrs['base_values'] = raw_model.treeEnsembleClassifier.treeEnsemble.basePredictionValue
attrs['post_transform'] = get_onnx_tree_post_transform(raw_model.treeEnsembleClassifier.postEvaluationTransform)
zipmap_attrs = {'name': scope.get_unique_operator_name('ZipMap')}
if raw_model.treeEnsembleClassifier.WhichOneof('ClassLabels') == 'int64ClassLabels':
class_labels = list(int(i) for i in raw_model.treeEnsembleClassifier.int64ClassLabels.vector)
attrs['classlabels_int64s'] = class_labels
zipmap_attrs['classlabels_int64s'] = class_labels
else:
class_labels = list(s.encode('utf-8') for s in raw_model.treeEnsembleClassifier.stringClassLabels.vector)
attrs['classlabels_strings'] = class_labels
zipmap_attrs['classlabels_strings'] = class_labels
elif raw_model.WhichOneof('Type') == 'treeEnsembleRegressor':
op_type = 'TreeEnsembleRegressor'
prefix = 'target'
nodes = raw_model.treeEnsembleRegressor.treeEnsemble.nodes
attrs['base_values'] = raw_model.treeEnsembleRegressor.treeEnsemble.basePredictionValue
attrs['n_targets'] = raw_model.treeEnsembleRegressor.treeEnsemble.numPredictionDimensions
attrs['post_transform'] = get_onnx_tree_post_transform(raw_model.treeEnsembleRegressor.postEvaluationTransform)
else:
raise ValueError('Unknown tree model type')
leaf_treeids = [node.treeId for node in nodes if 6 == node.nodeBehavior for weight in node.evaluationInfo]
leaf_nodeids = [node.nodeId for node in nodes if 6 == node.nodeBehavior for weight in node.evaluationInfo]
leaf_ids = [weight.evaluationIndex for node in nodes if 6 == node.nodeBehavior for weight in node.evaluationInfo]
leaf_weights = [weight.evaluationValue for node in nodes if 6 == node.nodeBehavior for weight in
node.evaluationInfo]
assert (len(leaf_ids) == len(leaf_weights))
assert (len(leaf_weights) == len(leaf_nodeids))
assert (len(leaf_nodeids) == len(leaf_treeids))
nodes_nodeids = [x.nodeId for x in nodes]
nodes_treeids = [x.treeId for x in nodes]
nodes_featureids = [x.branchFeatureIndex for x in nodes]
nodes_values = [x.branchFeatureValue for x in nodes]
nodes_truenodeids = [x.trueChildNodeId for x in nodes]
nodes_falsenodeids = [x.falseChildNodeId for x in nodes]
nodes_missing_value_tracks_true = [x.missingValueTracksTrueChild for x in nodes]
nodes_hitrates = [float(x.relativeHitRate) for x in nodes]
nodes_modes = [get_onnx_tree_mode(x.nodeBehavior) for x in nodes]
attrs['nodes_treeids'] = nodes_treeids
attrs['nodes_nodeids'] = nodes_nodeids
attrs['nodes_featureids'] = nodes_featureids
attrs['nodes_values'] = nodes_values
attrs['nodes_hitrates'] = nodes_hitrates
attrs['nodes_modes'] = nodes_modes
attrs['nodes_truenodeids'] = nodes_truenodeids
attrs['nodes_falsenodeids'] = nodes_falsenodeids
attrs['nodes_missing_value_tracks_true'] = nodes_missing_value_tracks_true
attrs[prefix + '_treeids'] = leaf_treeids
attrs[prefix + '_nodeids'] = leaf_nodeids
attrs[prefix + '_ids'] = leaf_ids
attrs[prefix + '_weights'] = leaf_weights
# For regression, we can simply construct a model. For classifier, due to the different representation of
# classes' probabilities, we need to add some operators for type conversion.
if raw_model.WhichOneof('Type') == 'treeEnsembleRegressor':
# Create ONNX representation of this operator. If there is only one input, its full topology is
#
# input features ---> TreeEnsembleRegressor ---> output
#
# If there are multiple (e.g., "N" features) input features, we need to concatenate them all together before feeding them into
# ONNX tree-based model. It leads to the following computational graph.
#
# input feature 1 -----.
# ... |
# ... v
# ... ---> Feature Vectorizer ---> TreeEnsembleRegressor ---> output
# ... ^
# ... |
# input feature N -----'
if len(operator.inputs) > 1:
feature_vector_name = scope.get_unique_variable_name('feature_vector')
container.add_node('FeatureVectorizer', operator.input_full_names, feature_vector_name,
op_domain='ai.onnx.ml', name=scope.get_unique_operator_name('FeatureVectorizer'),
inputdimensions=[variable.type.shape[1] for variable in operator.inputs])
container.add_node(op_type, feature_vector_name, operator.output_full_names,
op_domain='ai.onnx.ml', **attrs)
else:
container.add_node(op_type, operator.input_full_names, operator.output_full_names,
op_domain='ai.onnx.ml', **attrs)
else:
# For classifiers, due to the different representation of classes' probabilities, we need to add some
# operators for type conversion. It turns out that we have the following topology.
# input features ---> TreeEnsembleClassifier ---> label (must present)
# |
# '--> probability tensor ---> ZipMap ---> probability map (optional)
#
# Similar to the regressor's case, if there are multiple input features, we need to concatenate them all
# together before feeding them into ONNX tree-based model. It leads to the following computational graph.
#
# input feature 1 -----.
# ... |
# ... v
# ... ---> Feature Vectorizer ---> TreeEnsembleClassifier ---> label (must present)
# ... ^ |
# ... | '--> probability tensor ---> ZipMap ---> probability
# input feature N -----' map (optional)
# Set up input feature(s)
if len(operator.inputs) > 1:
feature_vector_name = scope.get_unique_variable_name('feature_vector')
container.add_node('FeatureVectorizer', operator.input_full_names, feature_vector_name,
op_domain='ai.onnx.ml', name=scope.get_unique_operator_name('FeatureVectorizer'),
inputdimensions=[variable.type.shape[1] for variable in operator.inputs])
else:
feature_vector_name = operator.inputs[0].full_name
# Find label name and probability name
proba_output_name = None
for variable in operator.outputs:
if raw_model.description.predictedFeatureName == variable.raw_name:
label_output_name = variable.full_name
if raw_model.description.predictedProbabilitiesName != '' and raw_model.description.predictedProbabilitiesName == variable.raw_name:
proba_output_name = variable.full_name
proba_tensor_name = scope.get_unique_variable_name('ProbabilityTensor')
if proba_output_name is not None:
# Add tree model ONNX node with probability output
container.add_node(op_type, feature_vector_name, [label_output_name, proba_tensor_name],
op_domain='ai.onnx.ml', **attrs)
# Add ZipMap to convert probability tensor into probability map
container.add_node('ZipMap', [proba_tensor_name], [proba_output_name],
op_domain='ai.onnx.ml', **zipmap_attrs)
else:
# Add support vector classifier without probability output
container.add_node(op_type, feature_vector_name, [label_output_name, proba_tensor_name],
op_domain='ai.onnx.ml', **attrs)
register_converter("treeEnsembleClassifier", convert_tree_ensemble_model)
register_converter("treeEnsembleRegressor", convert_tree_ensemble_model)
| 53.983516 | 144 | 0.653435 |
84a401cc6ae80bba7822e1891c3f86868eb4e302 | 1,876 | py | Python | tests/contract_tests/KT1F25MTKpQJF8xJXVCNhweGmsxHtAjCDTFx/test_f25mtk_setFrozen.py | konchunas/pytezos | 65576d18bdf1956fae8ea21241b6c43a38921b83 | [
"MIT"
] | 98 | 2019-02-07T16:33:38.000Z | 2022-03-31T15:53:41.000Z | tests/contract_tests/KT1F25MTKpQJF8xJXVCNhweGmsxHtAjCDTFx/test_f25mtk_setFrozen.py | konchunas/pytezos | 65576d18bdf1956fae8ea21241b6c43a38921b83 | [
"MIT"
] | 152 | 2019-05-20T16:38:56.000Z | 2022-03-30T14:24:38.000Z | tests/contract_tests/KT1F25MTKpQJF8xJXVCNhweGmsxHtAjCDTFx/test_f25mtk_setFrozen.py | konchunas/pytezos | 65576d18bdf1956fae8ea21241b6c43a38921b83 | [
"MIT"
] | 34 | 2019-07-25T12:03:51.000Z | 2021-11-11T22:23:38.000Z | from unittest import TestCase
from os.path import dirname, join
import json
from pytezos.michelson.program import MichelsonProgram
from pytezos.michelson.types.big_map import big_map_diff_to_lazy_diff
from pytezos.michelson.forge import forge_micheline, unforge_micheline
folder = 'dexter_usdtz_xtz'
entrypoint = 'removeLiquidity'
class MainnetOperationTestCaseF25MTK(TestCase):
@classmethod
def setUpClass(cls):
with open(join(dirname(__file__), f'', '__script__.json')) as f:
script = json.loads(f.read())
cls.program = MichelsonProgram.match(script['code'])
with open(join(dirname(__file__), f'', f'setFrozen.json')) as f:
operation = json.loads(f.read())
cls.entrypoint = f'setFrozen'
cls.operation = operation
# cls.maxDiff = None
def test_parameters_f25mtk(self):
original_params = self.program.parameter.from_parameters(self.operation['parameters'])
py_obj = original_params.to_python_object()
# pprint(py_obj)
readable_params = self.program.parameter.from_parameters(original_params.to_parameters(mode='readable'))
self.assertEqual(py_obj, readable_params.to_python_object())
self.program.parameter.from_python_object(py_obj)
def test_lazy_storage_f25mtk(self):
storage = self.program.storage.from_micheline_value(self.operation['storage'])
lazy_diff = big_map_diff_to_lazy_diff(self.operation['big_map_diff'])
extended_storage = storage.merge_lazy_diff(lazy_diff)
py_obj = extended_storage.to_python_object(try_unpack=True, lazy_diff=True)
# pprint(py_obj)
def test_parameters_forging(self):
expected = self.operation['parameters'].get('value', {'prim': 'Unit'})
actual = unforge_micheline(forge_micheline(expected))
self.assertEqual(expected, actual)
| 39.083333 | 112 | 0.722281 |
ad17d54e9134594d3fe09c96150896f9026eb57b | 118 | py | Python | exercises/level_1/jinja_templates/with_flask/run.py | eyalle/python_course | acc75fd3c81f69f314099051026c81d80d141a84 | [
"MIT"
] | null | null | null | exercises/level_1/jinja_templates/with_flask/run.py | eyalle/python_course | acc75fd3c81f69f314099051026c81d80d141a84 | [
"MIT"
] | null | null | null | exercises/level_1/jinja_templates/with_flask/run.py | eyalle/python_course | acc75fd3c81f69f314099051026c81d80d141a84 | [
"MIT"
] | null | null | null | from exercises.level_1.jinja_templates.with_flask.app import app
if __name__ == '__main__':
app.run(debug=True)
| 19.666667 | 64 | 0.762712 |
6677eac92b8c0838da6cb90d961a8052bd894a32 | 2,312 | py | Python | venv/Lib/site-packages/win32/test/test_win32gui.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 150 | 2021-11-02T05:31:51.000Z | 2022-03-24T06:22:22.000Z | venv/Lib/site-packages/win32/test/test_win32gui.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 4 | 2021-12-01T11:55:58.000Z | 2022-02-24T16:14:37.000Z | venv/Lib/site-packages/win32/test/test_win32gui.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 33 | 2021-11-03T00:29:41.000Z | 2022-03-15T13:15:56.000Z | # tests for win32gui
import unittest
import win32gui
import pywin32_testutil
import operator
import array
import sys
class TestPyGetString(unittest.TestCase):
def test_get_string(self):
# test invalid addresses cause a ValueError rather than crash!
self.assertRaises(ValueError, win32gui.PyGetString, 0)
self.assertRaises(ValueError, win32gui.PyGetString, 1)
self.assertRaises(ValueError, win32gui.PyGetString, 1, 1)
class TestPyGetMemory(unittest.TestCase):
def test_ob(self):
# Check the PyGetMemory result and a bytes string can be compared
test_data = b"\0\1\2\3\4\5\6"
c = array.array("b", test_data)
addr, buflen = c.buffer_info()
got = win32gui.PyGetMemory(addr, buflen)
self.assertEqual(len(got), len(test_data))
self.assertEqual(bytes(got), test_data)
def test_memory_index(self):
# Check we can index into the buffer object returned by PyGetMemory
test_data = b"\0\1\2\3\4\5\6"
c = array.array("b", test_data)
addr, buflen = c.buffer_info()
got = win32gui.PyGetMemory(addr, buflen)
self.assertEqual(got[0], 0)
def test_memory_slice(self):
# Check we can slice the buffer object returned by PyGetMemory
test_data = b"\0\1\2\3\4\5\6"
c = array.array("b", test_data)
addr, buflen = c.buffer_info()
got = win32gui.PyGetMemory(addr, buflen)
self.assertEqual(list(got[0:3]), [0, 1, 2])
def test_real_view(self):
# Do the PyGetMemory, then change the original memory, then ensure
# the initial object we fetched sees the new value.
test_data = b"\0\1\2\3\4\5\6"
c = array.array("b", test_data)
addr, buflen = c.buffer_info()
got = win32gui.PyGetMemory(addr, buflen)
self.assertEqual(got[0], 0)
c[0] = 1
self.assertEqual(got[0], 1)
def test_memory_not_writable(self):
# Check the buffer object fetched by PyGetMemory isn't writable.
test_data = b"\0\1\2\3\4\5\6"
c = array.array("b", test_data)
addr, buflen = c.buffer_info()
got = win32gui.PyGetMemory(addr, buflen)
self.assertRaises(TypeError, operator.setitem, got, 0, 1)
if __name__ == "__main__":
unittest.main()
| 35.030303 | 75 | 0.645329 |
aee273744f0c9ab9b52c56544767d51d9913b063 | 3,836 | py | Python | pysnmp-with-texts/HUAWEI-LswSMON-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/HUAWEI-LswSMON-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/HUAWEI-LswSMON-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module HUAWEI-LswSMON-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HUAWEI-LswSMON-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:46:27 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection, ValueSizeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "ConstraintsUnion")
huaweiDatacomm, huaweiMgmt = mibBuilder.importSymbols("HUAWEI-3COM-OID-MIB", "huaweiDatacomm", "huaweiMgmt")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Bits, iso, TimeTicks, ObjectIdentity, Counter32, MibIdentifier, Integer32, NotificationType, IpAddress, Counter64, Unsigned32, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "iso", "TimeTicks", "ObjectIdentity", "Counter32", "MibIdentifier", "Integer32", "NotificationType", "IpAddress", "Counter64", "Unsigned32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "ModuleIdentity")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
hwSmonExtend = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 26))
smonExtendObject = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 26, 1))
hwdot1qVlanStatNumber = MibScalar((1, 3, 6, 1, 4, 1, 2011, 5, 25, 26, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1qVlanStatNumber.setStatus('mandatory')
if mibBuilder.loadTexts: hwdot1qVlanStatNumber.setDescription('The number of vlans that can collect statistics of packets.')
hwdot1qVlanStatStatusTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 26, 1, 2), )
if mibBuilder.loadTexts: hwdot1qVlanStatStatusTable.setStatus('mandatory')
if mibBuilder.loadTexts: hwdot1qVlanStatStatusTable.setDescription('VLAN statistics status table.')
hwdot1qVlanStatStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 26, 1, 2, 1), ).setIndexNames((0, "HUAWEI-LswSMON-MIB", "hwdot1qVlanStatEnableIndex"))
if mibBuilder.loadTexts: hwdot1qVlanStatStatusEntry.setStatus('mandatory')
if mibBuilder.loadTexts: hwdot1qVlanStatStatusEntry.setDescription(' VLAN statistics status table entry.')
hwdot1qVlanStatEnableIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 26, 1, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwdot1qVlanStatEnableIndex.setStatus('mandatory')
if mibBuilder.loadTexts: hwdot1qVlanStatEnableIndex.setDescription('Vlan index .')
hwdot1qVlanStatEnableStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 26, 1, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hwdot1qVlanStatEnableStatus.setStatus('mandatory')
if mibBuilder.loadTexts: hwdot1qVlanStatEnableStatus.setDescription('VLAN Statistics Status.It represent the current VLAN supports statistic or not.')
mibBuilder.exportSymbols("HUAWEI-LswSMON-MIB", smonExtendObject=smonExtendObject, hwdot1qVlanStatNumber=hwdot1qVlanStatNumber, hwSmonExtend=hwSmonExtend, hwdot1qVlanStatEnableStatus=hwdot1qVlanStatEnableStatus, hwdot1qVlanStatEnableIndex=hwdot1qVlanStatEnableIndex, hwdot1qVlanStatStatusTable=hwdot1qVlanStatStatusTable, hwdot1qVlanStatStatusEntry=hwdot1qVlanStatStatusEntry)
| 116.242424 | 477 | 0.792492 |
3aa957f779b875d4b1ae2b6b59c3dc06c04ec87c | 3,907 | py | Python | app/recipe/test/test_tags_api.py | trbs/recipe-app-api | b290f72c1c24f247e536eeee300c7e157511a3c2 | [
"MIT"
] | null | null | null | app/recipe/test/test_tags_api.py | trbs/recipe-app-api | b290f72c1c24f247e536eeee300c7e157511a3c2 | [
"MIT"
] | 7 | 2020-03-06T13:41:34.000Z | 2022-02-13T05:23:39.000Z | app/recipe/test/test_tags_api.py | trbs/recipe-app-api | b290f72c1c24f247e536eeee300c7e157511a3c2 | [
"MIT"
] | 1 | 2020-02-04T20:41:43.000Z | 2020-02-04T20:41:43.000Z | from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Tag, Recipe
from recipe.serializers import TagSerializer
TAGS_URL = reverse('recipe:tag-list')
class PublicTagsApiTests(TestCase):
"""Test the publicly available tags API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test that login required for retrieving tags"""
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTagsApiTests(TestCase):
"""Test the authorized user tags API"""
def setUp(self):
self.user = get_user_model().objects.create_user(
'test@londonappdev.com',
'password'
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_tags(self):
"""Test retrieving tags"""
Tag.objects.create(user=self.user, name='Vegan')
Tag.objects.create(user=self.user, name='Dessert')
res = self.client.get(TAGS_URL)
tags = Tag.objects.all().order_by('-name')
serializer = TagSerializer(tags, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_tags_limited_to_user(self):
"""Test that tags returned are for authenticated user"""
user2 = get_user_model().objects.create_user(
'other@londonappdev.com',
'testpass'
)
Tag.objects.create(user=user2, name='Fruity')
tag = Tag.objects.create(user=self.user, name='Comfort Food')
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], tag.name)
def test_create_tags_successful(self):
#Creating a new tag test
payload={'name':'Test Tag'}
self.client.post(TAGS_URL, payload)
exists = Tag.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_tag_invalid(self):
#Test creating a new tag with invalid payload
payload={'name':''}
res = self.client.post(TAGS_URL,payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_tags_assigned_to_recipes(self):
#Test filtering by tags only assigned to recipes
tag1 = Tag.objects.create(user=self.user, name = "Breakfast")
tag2 = Tag.objects.create(user=self.user, name = "Beef")
recipe= Recipe.objects.create(
title= "French Toast",
time=15,
price=10.00,
user=self.user)
recipe.tags.add(tag1)
res=self.client.get(TAGS_URL, {'assigned_only':1})
serializer1 = TagSerializer(tag1)
serializer2 = TagSerializer(tag2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
def test_retrieve_tags_assigned_unique(self):
#Test that unique recipes are returned when filtering tags
tag = Tag.objects.create(user=self.user, name="Breakfast")
Tag.objects.create(user=self.user, name="Lunch")
recipe1 = Recipe.objects.create(
title='French Toast',
time=15,
price=5.00,
user=self.user)
recipe1.tags.add(tag)
recipe2= Recipe.objects.create(
title='Pancakes',
time=15,
price=6.00,
user=self.user)
recipe2.tags.add(tag)
res=self.client.get(TAGS_URL, {'assigned_only': 1})
self.assertEqual(len(res.data),1) | 31.508065 | 71 | 0.638085 |
6c94d08408223ff1d4644956796ed017935a6100 | 420 | py | Python | Lists/2TakingListElemInput.py | palakbaphna/pyprac | 992770d5aed73c632a69b4bb22f471f35d083ee5 | [
"Apache-2.0"
] | null | null | null | Lists/2TakingListElemInput.py | palakbaphna/pyprac | 992770d5aed73c632a69b4bb22f471f35d083ee5 | [
"Apache-2.0"
] | null | null | null | Lists/2TakingListElemInput.py | palakbaphna/pyprac | 992770d5aed73c632a69b4bb22f471f35d083ee5 | [
"Apache-2.0"
] | null | null | null |
a = [0]*int(input("enter the number of elements:"))
# a is defined as an empty list and multiplying it with a number, taking input, we redefine its length at the same time
for i in range(len(a)): # if len is 3, i gets in range 0,1,2
a[i] = int(input())# as len of list is already defined, it will take only those many inputs
print(a * 3) # all the elements gets repeated 3 times
print(a[1] * 3)
print(a[1] - 2)
| 32.307692 | 119 | 0.680952 |
209d749619c7464d03f7865a62d6534b372e49f4 | 672 | py | Python | ImgVidProcessing/Exercise1/exercise1.py | SystemNinja/MyPythonPrograms | 6bdebb5017994c3431aea769319f702075fff9b9 | [
"MIT"
] | null | null | null | ImgVidProcessing/Exercise1/exercise1.py | SystemNinja/MyPythonPrograms | 6bdebb5017994c3431aea769319f702075fff9b9 | [
"MIT"
] | null | null | null | ImgVidProcessing/Exercise1/exercise1.py | SystemNinja/MyPythonPrograms | 6bdebb5017994c3431aea769319f702075fff9b9 | [
"MIT"
] | null | null | null | """
Reference: https://www.udemy.com/the-python-mega-course/learn/v4/t/lecture/4775490?start=0
"""
import cv2
img1=cv2.imread("Exercise1\galaxy.jpg", 0)
img2=cv2.imread("Exercise1\kangaroos.jpg", 0)
img3=cv2.imread("Exercise1\Lighthouse.jpg", 0)
img4=cv2.imread("Exercise1\MoonSun.jpg", 0)
resize1=cv2.resize(img1,(100,100))
resize2=cv2.resize(img2,(100,100))
resize3=cv2.resize(img3,(100,100))
resize4=cv2.resize(img4,(100,100))
cv2.imwrite("Exercise1\galaxy_resized.jpg", resize1)
cv2.imwrite("Exercise1\kangaroos_resized.jpg", resize2)
cv2.imwrite("Exercise1\lighthouse_resized.jpg", resize3)
cv2.imwrite("Exercise1\MoonSun_resized.jpg", resize4) | 33.6 | 91 | 0.745536 |
547391eed26bc4f64979eb4379ff1ba3539eceba | 5,447 | py | Python | testscripts/RDKB/component/RDKB_Logger/TS_RDKBLogger_EnvGetValueFromNum.py | cablelabs/tools-tdkb | 1fd5af0f6b23ce6614a4cfcbbaec4dde430fad69 | [
"Apache-2.0"
] | null | null | null | testscripts/RDKB/component/RDKB_Logger/TS_RDKBLogger_EnvGetValueFromNum.py | cablelabs/tools-tdkb | 1fd5af0f6b23ce6614a4cfcbbaec4dde430fad69 | [
"Apache-2.0"
] | null | null | null | testscripts/RDKB/component/RDKB_Logger/TS_RDKBLogger_EnvGetValueFromNum.py | cablelabs/tools-tdkb | 1fd5af0f6b23ce6614a4cfcbbaec4dde430fad69 | [
"Apache-2.0"
] | null | null | null | ##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2016 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
'''
<?xml version="1.0" encoding="UTF-8"?><xml>
<id/>
<version>2</version>
<name>TS_RDKBLogger_EnvGetValueFromNum</name>
<primitive_test_id/>
<primitive_test_name>RDKBLogger_EnvGetValueFromNum</primitive_test_name>
<primitive_test_version>1</primitive_test_version>
<status>FREE</status>
<synopsis>This tests the getting of logging level from registered number functionality.
Test Case ID: CT_RDKBLogger_06
Test Type: Positive</synopsis>
<groups_id/>
<execution_time>5</execution_time>
<long_duration>false</long_duration>
<remarks/>
<skip>false</skip>
<box_types>
<box_type>Emulator</box_type>
<box_type>Broadband</box_type>
<box_type>RPI</box_type>
</box_types>
<rdk_versions>
<rdk_version>RDKB</rdk_version>
</rdk_versions>
<test_cases>
<test_case_id>TC_RDKLOGGER_6</test_case_id>
<test_objective>To get the logging level from registered number functionality.</test_objective>
<test_type>Positive</test_type>
<test_setup>Emulator,XB3</test_setup>
<pre_requisite>1.Ccsp Components should be in a running state else invoke cosa_start.sh manually that includes all the ccsp components.
2.TDK Agent should be in running state or invoke it through StartTdk.sh script</pre_requisite>
<api_or_interface_used>None</api_or_interface_used>
<input_parameters>Json Interface:
API Name
RDKBLogger_EnvGetValueFromNum
Input:
number = 16</input_parameters>
<automation_approch>1.Function which needs to be tested will be configured in Test Manager GUI.
2.Python Script will be generated by Test Manager with provided arguments in configure page.
3.TM will load the RDKLogger library via Test agent
4.From python script, invoke RDKBLogger_EnvGetValueFromNum() stub function to get the logging level from registered number functionality.
5.RDKLogger stub function will call the rdk_logger_envGetValueFromNum() function of the rdk-logger component in ccsp.
6.Responses from the RDKLogger stub function will be logged in Agent Console log.
7.RDKLogger stub will validate the actual result with the expected result and send the result status to Test Manager.
8.Test Manager will publish the result in GUI as PASS/FAILURE based on the response from RDKLogger stub.</automation_approch>
<except_output>CheckPoint 1:
Logging level from the registered number should be logged in the Agent console/Component log
CheckPoint 2:
Stub function result should be success and should see corresponding log in the agent console log
CheckPoint 3:
TestManager GUI will publish the result as PASS in Execution/Console page of Test Manager</except_output>
<priority>High</priority>
<test_stub_interface>None</test_stub_interface>
<test_script>TS_RDKBLogger_EnvGetValueFromNum</test_script>
<skipped>No</skipped>
<release_version/>
<remarks/>
</test_cases>
<script_tags/>
</xml>
'''
# use tdklib library,which provides a wrapper for tdk testcase script
from tdklib import TDKScriptingLibrary;
#IP and Port of box, No need to change,
#This will be replaced with corresponding Box Ip and port while executing script
ip = <ipaddress>
port = <port>
#Test component to be tested
obj = TDKScriptingLibrary("rdklogger","RDKB");
obj.configureTestCase(ip,port,'TS_RDKBLogger_EnvGetValueFromNum');
#Get the result of connection with test component and Gateway
result =obj.getLoadModuleResult();
print "rdklogger module loading status :%s" %result;
#Check for SUCCESS/FAILURE of rdklogger module
if "SUCCESS" in result.upper():
#Set the module loading status
obj.setLoadModuleStatus("SUCCESS");
#Primitive test case which associated to this Script
tdkTestObj = obj.createTestStep('RDKBLogger_EnvGetValueFromNum');
expectedRes = "SUCCESS"
number = 16
print "Requested number: %d"%number
tdkTestObj.addParameter("number",number);
#Execute the test case in Gateway
tdkTestObj.executeTestCase(expectedRes);
#Get the result of execution
result = tdkTestObj.getResult();
print "[TEST EXECUTION RESULT] : %s" %result;
details = tdkTestObj.getResultDetails();
#Set the result status of execution
if "SUCCESS" in result.upper():
tdkTestObj.setResultStatus("SUCCESS");
print "rdklogger env get value Successful: [%s]" %details;
else:
tdkTestObj.setResultStatus("FAILURE");
print "rdklogger env get value Failed: [%s]"%details;
#unloading rdklogger module
obj.unloadModule("rdklogger");
else:
print "Failed to load rdklogger module";
#Set the module loading status
obj.setLoadModuleStatus("FAILURE");
| 40.649254 | 140 | 0.742243 |
54f248f2ada2aa4f15812efd9dced0b198f45754 | 1,607 | py | Python | data-structures/ds-slinklst/python3/linked_list.py | NuclearCactus/FOSSALGO | eb66f3bdcd6c42c66e8fc7110a32ac021596ca66 | [
"MIT"
] | 59 | 2018-09-11T17:40:25.000Z | 2022-03-03T14:40:39.000Z | data-structures/ds-slinklst/python3/linked_list.py | RitvikDayal/FOSSALGO | ae225a5fffbd78d0dff83fd7b178ba47bfd7a769 | [
"MIT"
] | 468 | 2018-08-28T17:04:29.000Z | 2021-12-03T15:16:34.000Z | data-structures/ds-slinklst/python3/linked_list.py | RitvikDayal/FOSSALGO | ae225a5fffbd78d0dff83fd7b178ba47bfd7a769 | [
"MIT"
] | 253 | 2018-08-28T17:08:51.000Z | 2021-11-01T12:30:39.000Z | class Node:
def __init__(self, d, n=None):
self.data = d
self.next = n # developed the Constructor to the Node
class LinkedList:
def __init__(self, r=None):
"""Initializing the Linked list."""
self.root = r
self.size = 0
self.item = 0
def add(self, item):
new_node = Node(item, self.root)
self.root = new_node
self.size += 1
return print("Successfully added", item)
def length(self):
return self.size
def search(self, item):
nd = self.root
while nd:
if nd.data == item:
return print(item, "Find")
else:
nd = nd.next
return print(item, "Not find")
def delet(self, item):
nd = self.root.next
prev = self.root
if self.root.data == item:
self.root = self.root.next
self.item -= 1
return print("Delete", item)
else:
while nd:
if nd.data == item:
prev.next = nd.next
self.size -= 1
return print("deleted", item)
else:
nd = nd.next
prev = prev.next
return print(item, "Item not find")
lin = LinkedList()
lin.add(23)
lin.add("manjitha")
lin.add("teshara")
lin.add("false")
lin.search(23)
lin.search("manjitha")
print(lin.size)
lin.delet(23)
lin.delet("teran")
lin.add("manji")
lin.add("teshara")
lin.add("true")
lin.search(23)
lin.search("manjitha")
print(lin.size)
lin.delet(23)
| 20.602564 | 62 | 0.511512 |
1654a1ea82e26dafda4ead99ab62e7b057cf1b5d | 11,212 | py | Python | docs/conf_common.py | Mixerito/esp-idf | 20a662936483f44ee9c8d16f3251a5a1191ca6e5 | [
"Apache-2.0"
] | null | null | null | docs/conf_common.py | Mixerito/esp-idf | 20a662936483f44ee9c8d16f3251a5a1191ca6e5 | [
"Apache-2.0"
] | null | null | null | docs/conf_common.py | Mixerito/esp-idf | 20a662936483f44ee9c8d16f3251a5a1191ca6e5 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Common (non-language-specific) configuration for Read The Docs & Sphinx
#
# Based on a Read the Docs Template documentation build configuration file,
# created by sphinx-quickstart on Tue Aug 26 14:19:49 2014.
#
# This file is imported from a language-specific conf.py (ie en/conf.py or
# zh_CN/conf.py)
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import re
from subprocess import Popen, PIPE
import shlex
# Note: If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute
from local_util import run_cmd_get_output, copy_if_modified
builddir = '_build'
builddir = builddir
if 'BUILDDIR' in os.environ:
builddir = os.environ['BUILDDIR']
# Call Doxygen to get XML files from the header files
print "Calling Doxygen to generate latest XML files"
os.system("doxygen ../Doxyfile")
# Doxygen has generated XML files in 'xml' directory.
# Copy them to 'xml_in', only touching the files which have changed.
copy_if_modified('xml/', 'xml_in/')
# Generate 'api_name.inc' files using the XML files by Doxygen
os.system('python ../gen-dxd.py')
# Generate 'kconfig.inc' file from components' Kconfig files
kconfig_inc_path = '{}/inc/kconfig.inc'.format(builddir)
os.system('python ../gen-kconfig-doc.py > ' + kconfig_inc_path + '.in')
copy_if_modified(kconfig_inc_path + '.in', kconfig_inc_path)
# http://stackoverflow.com/questions/12772927/specifying-an-online-image-in-sphinx-restructuredtext-format
#
suppress_warnings = ['image.nonlocal_uri']
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['breathe',
'link-roles',
'sphinxcontrib.blockdiag',
'sphinxcontrib.seqdiag',
'sphinxcontrib.actdiag',
'sphinxcontrib.nwdiag',
'sphinxcontrib.rackdiag',
'sphinxcontrib.packetdiag'
]
# Set up font for blockdiag, nwdiag, rackdiag and packetdiag
blockdiag_fontpath = '../_static/DejaVuSans.ttf'
seqdiag_fontpath = '../_static/DejaVuSans.ttf'
actdiag_fontpath = '../_static/DejaVuSans.ttf'
nwdiag_fontpath = '../_static/DejaVuSans.ttf'
rackdiag_fontpath = '../_static/DejaVuSans.ttf'
packetdiag_fontpath = '../_static/DejaVuSans.ttf'
# Breathe extension variables
# Doxygen regenerates files in 'xml/' directory every time,
# but we copy files to 'xml_in/' only when they change, to speed up
# incremental builds.
breathe_projects = { "esp32-idf": "xml_in/" }
breathe_default_project = "esp32-idf"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = ['.rst', '.md']
source_parsers = {
'.md': 'recommonmark.parser.CommonMarkParser',
}
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# Readthedocs largely ignores 'version' and 'release', and displays one of
# 'latest', tag name, or branch name, depending on the build type.
# Still, this is useful for non-RTD builds.
# This is supposed to be "the short X.Y version", but it's the only version
# visible when you open index.html.
# Display full version to make things less confusing.
version = run_cmd_get_output('git describe')
# The full version, including alpha/beta/rc tags.
# If needed, nearest tag is returned by 'git describe --abbrev=0'.
release = version
print 'Version: {0} Release: {1}'.format(version, release)
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build','README.md']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ReadtheDocsTemplatedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'ReadtheDocsTemplate.tex', u'Read the Docs Template Documentation',
u'Read the Docs', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'readthedocstemplate', u'Read the Docs Template Documentation',
[u'Read the Docs'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ReadtheDocsTemplate', u'Read the Docs Template Documentation',
u'Read the Docs', 'ReadtheDocsTemplate', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Use sphinx_rtd_theme for local builds --------------------------------
# ref. https://github.com/snide/sphinx_rtd_theme#using-this-theme-locally-then-building-on-read-the-docs
#
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
| 33.975758 | 106 | 0.716821 |
e58fa7e66201fcf0d1b15e399330eef4350fd27d | 1,870 | py | Python | packs/orion/tests/test_action_node_pollnow.py | prajwal222/prajwal | ce1431858a9b54ae2a9546e9afab9f4b722bd210 | [
"Apache-2.0"
] | null | null | null | packs/orion/tests/test_action_node_pollnow.py | prajwal222/prajwal | ce1431858a9b54ae2a9546e9afab9f4b722bd210 | [
"Apache-2.0"
] | 1 | 2022-03-08T17:03:46.000Z | 2022-03-08T17:03:46.000Z | packs/orion/tests/test_action_node_pollnow.py | isabella232/st2contrib | 182af2fb6e26a1d002954b19a5cc7afc73307872 | [
"Apache-2.0"
] | 1 | 2019-07-10T21:23:49.000Z | 2019-07-10T21:23:49.000Z | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
from mock import MagicMock
from orion_base_action_test_case import OrionBaseActionTestCase
from node_pollnow import NodePollNow
__all__ = [
'NodePollNowTestCase'
]
class NodePollNowTestCase(OrionBaseActionTestCase):
__test__ = True
action_cls = NodePollNow
def test_run_connect_fail(self):
action = self.setup_connect_fail()
self.assertRaises(ValueError,
action.run,
"orion",
"router1")
def test_run_node_not_exist(self):
action = self.setup_query_blank_results()
self.assertRaises(ValueError,
action.run,
"orion",
"router1")
def test_run_polled(self):
action = self.setup_node_exists()
self.assertTrue(action.run("router1", "orion"))
def test_run_polled_text(self):
expected = "fake"
action = self.setup_node_exists()
action.invoke = MagicMock(return_value="fake")
result = action.run("router1", "orion")
self.assertEqual(result, expected)
| 34 | 74 | 0.672193 |
a82c5d1dd4f3fc3cf275987b244f2519012e66cc | 94 | py | Python | feedback/admin.py | zahidtokur/office-hub | 5dd1fd094c6ba78060103f6e8c0992b3e1cb3679 | [
"MIT"
] | null | null | null | feedback/admin.py | zahidtokur/office-hub | 5dd1fd094c6ba78060103f6e8c0992b3e1cb3679 | [
"MIT"
] | null | null | null | feedback/admin.py | zahidtokur/office-hub | 5dd1fd094c6ba78060103f6e8c0992b3e1cb3679 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Feedback
admin.site.register(Feedback)
| 15.666667 | 32 | 0.819149 |
5888bf8a2ae2dd74afb7e1edf33e84215d7d649e | 197 | py | Python | coloredcoinlib/comparable.py | killerstorm/ngcccbase | 30fd033835ceeecd0eafc3461bf5b4dcfff117de | [
"MIT"
] | 31 | 2015-01-25T01:59:07.000Z | 2022-03-11T02:49:53.000Z | coloredcoinlib/comparable.py | killerstorm/ngcccbase | 30fd033835ceeecd0eafc3461bf5b4dcfff117de | [
"MIT"
] | 5 | 2015-06-16T14:43:49.000Z | 2016-07-19T12:49:16.000Z | coloredcoinlib/comparable.py | jeorgen/ngcccbase | 0a7348d95353598a320e5612166402ba676c8d33 | [
"MIT"
] | 17 | 2015-02-14T15:19:49.000Z | 2019-11-28T19:17:50.000Z | class ComparableMixin:
def __ne__(self, other):
return not (self == other)
def __ge__(self, other):
return not (self < other)
def __le__(self, other):
return not (other < self)
| 19.7 | 30 | 0.649746 |
5bc0f5996c6c8dd2a370993236632533e46da99b | 2,709 | py | Python | GAN/wasserstein_gan/wgan_pytorch.py | eastonhou/generative-models | 02f19ff8f8980afea44ed0a8834bc5e1c4322b4d | [
"Unlicense"
] | 7,386 | 2016-12-15T06:54:40.000Z | 2022-03-31T16:21:47.000Z | GAN/wasserstein_gan/wgan_pytorch.py | milanhzj/generative-models | b930d5fa9e2f69adfd4ea8ec759f38f6ce6da4c2 | [
"Unlicense"
] | 150 | 2017-08-28T14:59:36.000Z | 2022-03-11T23:21:35.000Z | GAN/wasserstein_gan/wgan_pytorch.py | milanhzj/generative-models | b930d5fa9e2f69adfd4ea8ec759f38f6ce6da4c2 | [
"Unlicense"
] | 2,247 | 2017-01-12T04:20:12.000Z | 2022-03-27T00:42:14.000Z | import torch
import torch.nn
import torch.nn.functional as nn
import torch.autograd as autograd
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os
from torch.autograd import Variable
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('../../MNIST_data', one_hot=True)
mb_size = 32
z_dim = 10
X_dim = mnist.train.images.shape[1]
y_dim = mnist.train.labels.shape[1]
h_dim = 128
cnt = 0
lr = 1e-4
G = torch.nn.Sequential(
torch.nn.Linear(z_dim, h_dim),
torch.nn.ReLU(),
torch.nn.Linear(h_dim, X_dim),
torch.nn.Sigmoid()
)
D = torch.nn.Sequential(
torch.nn.Linear(X_dim, h_dim),
torch.nn.ReLU(),
torch.nn.Linear(h_dim, 1),
)
def reset_grad():
G.zero_grad()
D.zero_grad()
G_solver = optim.RMSprop(G.parameters(), lr=lr)
D_solver = optim.RMSprop(D.parameters(), lr=lr)
for it in range(1000000):
for _ in range(5):
# Sample data
z = Variable(torch.randn(mb_size, z_dim))
X, _ = mnist.train.next_batch(mb_size)
X = Variable(torch.from_numpy(X))
# Dicriminator forward-loss-backward-update
G_sample = G(z)
D_real = D(X)
D_fake = D(G_sample)
D_loss = -(torch.mean(D_real) - torch.mean(D_fake))
D_loss.backward()
D_solver.step()
# Weight clipping
for p in D.parameters():
p.data.clamp_(-0.01, 0.01)
# Housekeeping - reset gradient
reset_grad()
# Generator forward-loss-backward-update
X, _ = mnist.train.next_batch(mb_size)
X = Variable(torch.from_numpy(X))
z = Variable(torch.randn(mb_size, z_dim))
G_sample = G(z)
D_fake = D(G_sample)
G_loss = -torch.mean(D_fake)
G_loss.backward()
G_solver.step()
# Housekeeping - reset gradient
reset_grad()
# Print and plot every now and then
if it % 1000 == 0:
print('Iter-{}; D_loss: {}; G_loss: {}'
.format(it, D_loss.data.numpy(), G_loss.data.numpy()))
samples = G(z).data.numpy()[:16]
fig = plt.figure(figsize=(4, 4))
gs = gridspec.GridSpec(4, 4)
gs.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(sample.reshape(28, 28), cmap='Greys_r')
if not os.path.exists('out/'):
os.makedirs('out/')
plt.savefig('out/{}.png'.format(str(cnt).zfill(3)), bbox_inches='tight')
cnt += 1
plt.close(fig)
| 23.973451 | 80 | 0.612034 |
d37b717984e5be52c15bfe6ec83ac7ae11e1ca50 | 9,134 | py | Python | tts/model/decoder.py | isadrtdinov/tacotron | 994716c0f731735b0edde57b920549b83bdd89ca | [
"MIT"
] | null | null | null | tts/model/decoder.py | isadrtdinov/tacotron | 994716c0f731735b0edde57b920549b83bdd89ca | [
"MIT"
] | null | null | null | tts/model/decoder.py | isadrtdinov/tacotron | 994716c0f731735b0edde57b920549b83bdd89ca | [
"MIT"
] | null | null | null | import random
import torch
from torch import nn
from .attention import Attention
from .feedforward import PreNet
class Decoder(nn.Module):
def __init__(self, num_mels=80, prenet_dim=256, embed_dim=512,
attention_lstm_dim=1024, decoder_lstm_dim=1024,
attention_dim=128, attention_dropout=0.1, dropout=0.5,
max_frames=870, threshold=0.5):
super(Decoder, self).__init__()
self.num_mels = num_mels
self.prenet_dim = prenet_dim
self.embed_dim = embed_dim
self.attention_dim = attention_dim
self.attention_lstm_dim = attention_lstm_dim
self.decoder_lstm_dim = decoder_lstm_dim
self.max_frames = max_frames
self.threshold = threshold
self.teacher_forcing = None
self.prenet = PreNet(dims=[num_mels, prenet_dim, prenet_dim], dropout=dropout)
self.attention_lstm = nn.LSTMCell(input_size=prenet_dim + embed_dim,
hidden_size=attention_lstm_dim)
self.attention = Attention(embed_dim, attention_dim, attention_lstm_dim,
attention_dropout)
self.decoder_lstm = nn.LSTMCell(input_size=attention_lstm_dim + embed_dim,
hidden_size=decoder_lstm_dim)
self.spec_fc = nn.Linear(in_features=decoder_lstm_dim + embed_dim,
out_features=num_mels)
self.stop_fc = nn.Linear(in_features=decoder_lstm_dim + embed_dim,
out_features=1)
def init_states(self, batch_size, device):
decoder_outputs = torch.zeros((batch_size, self.num_mels)).to(device)
attention_context = torch.zeros((batch_size, self.embed_dim)).to(device)
attention_hidden = torch.zeros((batch_size, self.attention_lstm_dim)).to(device)
attention_cell = torch.zeros((batch_size, self.attention_lstm_dim)).to(device)
decoder_hidden = torch.zeros((batch_size, self.decoder_lstm_dim)).to(device)
decoder_cell = torch.zeros((batch_size, self.decoder_lstm_dim)).to(device)
return decoder_outputs, attention_context, attention_hidden, attention_cell, \
decoder_hidden, decoder_cell
def forward(self, encoder_outputs, lengths, melspecs):
# encoder_outputs: (batch_size, char_length, embed_dim)
# lengths: (batch_size, )
# melspecs: (batch_size, frames_length, num_mels)
batch_size, char_length, _ = encoder_outputs.shape
frames_length = melspecs.shape[1]
device = encoder_outputs.device
# initialize all states with zeros
decoder_outputs, attention_context, attention_hidden, attention_cell, \
decoder_hidden, decoder_cell = self.init_states(batch_size, device)
# prepare K, V and mask for attention
K = self.attention.WK(encoder_outputs)
V = self.attention.WV(encoder_outputs)
# K: (batch_size, char_length, attention_dim)
# V: (batch_size, char_length, embed_dim)
mask = torch.arange(char_length).view(1, char_length) >= lengths.view(batch_size, 1)
mask = mask.unsqueeze(1).to(device)
# mask: (batch_size, 1, char_length)
output_melspecs, output_probs, attention = [], [], []
for i in range(frames_length):
# teacher forcing
if i > 0 and random.random() < self.teacher_forcing:
decoder_outputs = melspecs[:, i - 1]
# PreNet for previous step
prenet_outputs = self.prenet(decoder_outputs)
# prenet_outputs: (batch_size, prenet_dim)
# attention LSTM
attention_lstm_inputs = torch.cat([prenet_outputs, attention_context], dim=1)
# attention_lstm_inputs: (batch_size, prenet_dim + embed_dim)
attention_hidden, attention_cell = self.attention_lstm(attention_lstm_inputs,
(attention_hidden, attention_cell))
# attention_hidden, attention_cell: (batch_size, attention_lstm_dim)
attention_context, attention_probs = self.attention(query=attention_hidden.unsqueeze(1),
K=K, V=V, mask=mask)
attention += [attention_probs]
attention_context = attention_context.squeeze(1)
# attention_context: (batch_size, embed_dim)
decoder_lstm_inputs = torch.cat([attention_hidden, attention_context], dim=1)
# decoder_lstm_inputs: (batch_size, attention_lstm_dim + embed_dim)
decoder_hidden, decoder_context = self.decoder_lstm(decoder_lstm_inputs,
(decoder_hidden, decoder_cell))
# decoder_hidden, decoder_cell: (batch_size, decoder_lstm_dim)
frame_features = torch.cat([decoder_hidden, attention_context], dim=1)
# frame_features: (batch_size, decoder_lstm_dim + embed_dim)
decoder_outputs = self.spec_fc(frame_features)
stop_probs = torch.sigmoid(self.stop_fc(frame_features))
# decoder_outputs: (batch_size, num_mels)
# stop_probs: (batch_size, 1)
output_melspecs += [decoder_outputs.unsqueeze(1)]
output_probs += [stop_probs]
output_melspecs = torch.cat(output_melspecs, dim=1)
output_probs = torch.cat(output_probs, dim=1)
attention = torch.cat(attention, dim=1)
# output_melspecs: (batch_size, frames_length, prenet_dim)
# output_probs: (batch_size, frames_length)
# attention: (batch_size, frames_length, char_length)
return output_melspecs, output_probs, attention
def inference(self, encoder_outputs, lengths):
# encoder_outputs: (batch_size, char_length, embed_dim)
# lengths: (batch_size, )
batch_size, char_length, _ = encoder_outputs.shape
device = encoder_outputs.device
# initialize all states with zeros
decoder_outputs, attention_context, attention_hidden, attention_cell, \
decoder_hidden, decoder_cell = self.init_states(batch_size, device)
# prepare K, V and mask for attention
K = self.attention.WK(encoder_outputs)
V = self.attention.WV(encoder_outputs)
# K: (batch_size, char_length, attention_dim)
# V: (batch_size, char_length, embed_dim)
mask = torch.arange(char_length).view(1, char_length) >= lengths.view(batch_size, 1)
mask = mask.unsqueeze(1).to(device)
# mask: (batch_size, 1, char_length)
output_melspecs, output_probs, attention = [], [], []
for i in range(self.max_frames):
# PreNet for previous step
prenet_outputs = self.prenet(decoder_outputs)
# prenet_outputs: (batch_size, prenet_dim)
# attention LSTM
attention_lstm_inputs = torch.cat([prenet_outputs, attention_context], dim=1)
# attention_lstm_inputs: (batch_size, prenet_dim + embed_dim)
attention_hidden, attention_cell = self.attention_lstm(attention_lstm_inputs,
(attention_hidden, attention_cell))
# attention_hidden, attention_cell: (batch_size, attention_lstm_dim)
attention_context, attention_probs = self.attention(query=attention_hidden.unsqueeze(1),
K=K, V=V, mask=mask)
attention += [attention_probs]
attention_context = attention_context.squeeze(1)
# attention_context: (batch_size, embed_dim)
decoder_lstm_inputs = torch.cat([attention_hidden, attention_context], dim=1)
# decoder_lstm_inputs: (batch_size, attention_lstm_dim + embed_dim)
decoder_hidden, decoder_context = self.decoder_lstm(decoder_lstm_inputs,
(decoder_hidden, decoder_cell))
# decoder_hidden, decoder_cell: (batch_size, decoder_lstm_dim)
frame_features = torch.cat([decoder_hidden, attention_context], dim=1)
# frame_features: (batch_size, decoder_lstm_dim + embed_dim)
decoder_outputs = self.spec_fc(frame_features)
stop_probs = torch.sigmoid(self.stop_fc(frame_features))
# spec_frames: (batch_size, num_mels)
# stop_probs: (batch_size, 1)
output_melspecs += [decoder_outputs.unsqueeze(1)]
output_probs += [stop_probs]
if i > 0 and torch.all(stop_probs > self.threshold):
break
output_melspecs = torch.cat(output_melspecs, dim=1)
output_probs = torch.cat(output_probs, dim=1)
attention = torch.cat(attention, dim=1)
# output_melspecs: (batch_size, frames_length, prenet_dim)
# output_probs: (batch_size, frames_length)
# attention: (batch_size, frames_length, char_length)
return output_melspecs, output_probs, attention
| 46.365482 | 102 | 0.635866 |
b5cc0eec40c500c8039fc741b2b226e66a7f1c56 | 231 | py | Python | marcas/models/atani_marcas.py | pcs2216/modulos_atani | e3c1c5ce979113e043ed020a2d678665fb9412b0 | [
"Apache-2.0"
] | null | null | null | marcas/models/atani_marcas.py | pcs2216/modulos_atani | e3c1c5ce979113e043ed020a2d678665fb9412b0 | [
"Apache-2.0"
] | null | null | null | marcas/models/atani_marcas.py | pcs2216/modulos_atani | e3c1c5ce979113e043ed020a2d678665fb9412b0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from odoo import api, fields, models
class x_Marcas(models.Model):
_name = 'x.model.marcas'
_description = 'Marcas de productos'
x_name = fields.Char(
string='Marca',
)
| 16.5 | 40 | 0.588745 |
41354e71c2e137f6174bae89bec3b6049bf47ed1 | 342 | py | Python | signage/tools/cacheitem.py | whetra/SignagePyQt | 1bad349247f38e858ba1934151c72492b63f03ad | [
"MIT"
] | 1 | 2020-07-03T01:34:33.000Z | 2020-07-03T01:34:33.000Z | signage/tools/cacheitem.py | whetra/SignagePyQt | 1bad349247f38e858ba1934151c72492b63f03ad | [
"MIT"
] | null | null | null | signage/tools/cacheitem.py | whetra/SignagePyQt | 1bad349247f38e858ba1934151c72492b63f03ad | [
"MIT"
] | 1 | 2020-09-30T15:42:51.000Z | 2020-09-30T15:42:51.000Z | from datetime import datetime
from datetime import timedelta
class CacheItem:
def __init__(self, key, value):
self.key = key
self.value = value
self.time = datetime.now()
def is_expired(self, ttl: timedelta):
if ttl is None:
return False
return datetime.now() > self.time + ttl
| 21.375 | 47 | 0.619883 |
cbcfdde538ec9a1d0a179b7292a819e2d09d42d0 | 3,165 | py | Python | fixture/group.py | ManiKarnika/python_automation | 0dc466cabdabc1a861dc3b70865d896047ba0fe7 | [
"Apache-2.0"
] | null | null | null | fixture/group.py | ManiKarnika/python_automation | 0dc466cabdabc1a861dc3b70865d896047ba0fe7 | [
"Apache-2.0"
] | null | null | null | fixture/group.py | ManiKarnika/python_automation | 0dc466cabdabc1a861dc3b70865d896047ba0fe7 | [
"Apache-2.0"
] | null | null | null | from model.group import Group
class GroupHelper:
def __init__(self, app):
self.app = app
def return_to_groups_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("/group.php") and len(wd.find_elements_by_name("new")) > 0):
wd.find_element_by_link_text("groups").click()
def create(self, group):
wd = self.app.wd
self.app.open_home_page()
# init group creation
self.open_group_page()
wd.find_element_by_name("new").click()
self.fill_group_form(group)
wd.find_element_by_name("submit").click()
self.return_to_groups_page()
self.groups_cash = None
def delete_first_group(self):
self.delete_group_by_index(0)
def delete_group_by_index(self, index):
wd = self.app.wd
self.app.open_home_page()
self.open_group_page()
# submit deletion
self.select_group_by_index(index)
wd.find_element_by_name("delete").click()
# return to group
self.return_to_groups_page()
self.groups_cash = None
def modify_first_group(self, new_group_data):
self.modify_group_by_index(0, new_group_data)
def modify_group_by_index(self, index, new_group_data):
wd = self.app.wd
self.app.open_home_page()
self.open_group_page()
self.select_group_by_index(index)
wd.find_element_by_name("edit").click()
self.fill_group_form(new_group_data)
wd.find_element_by_name("update").click()
self.return_to_groups_page()
self.groups_cash = None
def fill_group_form(self, group):
self.change_field_value("group_name", group.name)
self.change_field_value("group_header", group.header)
self.change_field_value("group_footer", group.footer)
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def select_first_group(self):
self.delete_group_by_index(0)
def select_group_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def open_group_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("/group.php") and len(wd.find_elements_by_name("new")) > 0):
wd.find_element_by_link_text("groups").click()
def count(self):
wd = self.app.wd
self.open_group_page()
return len(wd.find_elements_by_name("selected[]"))
groups_cash = None
def get_group_list(self):
if self.groups_cash is None:
wd = self.app.wd
self.open_group_page()
self.groups_cash = []
for element in wd.find_elements_by_css_selector("span.group"):
text = element.text
id = element.find_element_by_name("selected[]").get_attribute("value")
self.groups_cash.append(Group(name=text, id=id))
return list(self.groups_cash)
| 33.670213 | 100 | 0.642654 |
d7c69afe323dfb3df0ab7e156e0e5069175ca573 | 31,259 | py | Python | IJB_evals.py | leondgarse/Keras_insightface | 7bdda6b831065a8418e63a18ba97c457df62a994 | [
"MIT"
] | 123 | 2020-01-17T15:23:03.000Z | 2022-03-29T07:31:11.000Z | IJB_evals.py | leondgarse/Keras_insightface | 7bdda6b831065a8418e63a18ba97c457df62a994 | [
"MIT"
] | 47 | 2020-05-18T10:25:04.000Z | 2022-03-06T10:44:41.000Z | IJB_evals.py | leondgarse/Keras_insightface | 7bdda6b831065a8418e63a18ba97c457df62a994 | [
"MIT"
] | 63 | 2020-04-22T05:43:17.000Z | 2022-03-26T09:40:27.000Z | #!/usr/bin/env python3
import os
import cv2
import numpy as np
import pandas as pd
from tqdm import tqdm
from skimage import transform
from sklearn.preprocessing import normalize
from sklearn.metrics import roc_curve, auc
class Mxnet_model_interf:
def __init__(self, model_file, layer="fc1", image_size=(112, 112)):
import mxnet as mx
self.mx = mx
cvd = os.environ.get("CUDA_VISIBLE_DEVICES", "").strip()
if len(cvd) > 0 and int(cvd) != -1:
ctx = [self.mx.gpu(ii) for ii in range(len(cvd.split(",")))]
else:
ctx = [self.mx.cpu()]
prefix, epoch = model_file.split(",")
print(">>>> loading mxnet model:", prefix, epoch, ctx)
sym, arg_params, aux_params = self.mx.model.load_checkpoint(prefix, int(epoch))
all_layers = sym.get_internals()
sym = all_layers[layer + "_output"]
model = self.mx.mod.Module(symbol=sym, context=ctx, label_names=None)
model.bind(data_shapes=[("data", (1, 3, image_size[0], image_size[1]))])
model.set_params(arg_params, aux_params)
self.model = model
def __call__(self, imgs):
# print(imgs.shape, imgs[0])
imgs = imgs.transpose(0, 3, 1, 2)
data = self.mx.nd.array(imgs)
db = self.mx.io.DataBatch(data=(data,))
self.model.forward(db, is_train=False)
emb = self.model.get_outputs()[0].asnumpy()
return emb
class Torch_model_interf:
def __init__(self, model_file, image_size=(112, 112)):
import torch
self.torch = torch
cvd = os.environ.get("CUDA_VISIBLE_DEVICES", "").strip()
device_name = "cuda:0" if len(cvd) > 0 and int(cvd) != -1 else "cpu"
self.device = self.torch.device(device_name)
try:
self.model = self.torch.jit.load(model_file, map_location=device_name)
except:
print("Error: %s is weights only, please load and save the entire model by `torch.jit.save`" % model_file)
self.model = None
def __call__(self, imgs):
# print(imgs.shape, imgs[0])
imgs = imgs.transpose(0, 3, 1, 2).copy().astype("float32")
imgs = (imgs - 127.5) * 0.0078125
output = self.model(self.torch.from_numpy(imgs).to(self.device).float())
return output.cpu().detach().numpy()
class ONNX_model_interf:
def __init__(self, model_file, image_size=(112, 112)):
import onnxruntime as ort
ort.set_default_logger_severity(3)
self.ort_session = ort.InferenceSession(model_file)
self.output_names = [self.ort_session.get_outputs()[0].name]
self.input_name = self.ort_session.get_inputs()[0].name
def __call__(self, imgs):
imgs = imgs.transpose(0, 3, 1, 2).astype("float32")
imgs = (imgs - 127.5) * 0.0078125
outputs = self.ort_session.run(self.output_names, {self.input_name: imgs})
return outputs[0]
def keras_model_interf(model_file):
import tensorflow as tf
from tensorflow_addons.layers import StochasticDepth
for gpu in tf.config.experimental.list_physical_devices("GPU"):
tf.config.experimental.set_memory_growth(gpu, True)
mm = tf.keras.models.load_model(model_file, compile=False)
return lambda imgs: mm((tf.cast(imgs, "float32") - 127.5) * 0.0078125).numpy()
def face_align_landmark(img, landmark, image_size=(112, 112), method="similar"):
tform = transform.AffineTransform() if method == "affine" else transform.SimilarityTransform()
src = np.array([[38.2946, 51.6963], [73.5318, 51.5014], [56.0252, 71.7366], [41.5493, 92.3655], [70.729904, 92.2041]], dtype=np.float32)
tform.estimate(landmark, src)
# ndimage = transform.warp(img, tform.inverse, output_shape=image_size)
# ndimage = (ndimage * 255).astype(np.uint8)
M = tform.params[0:2, :]
ndimage = cv2.warpAffine(img, M, image_size, borderValue=0.0)
if len(ndimage.shape) == 2:
ndimage = np.stack([ndimage, ndimage, ndimage], -1)
else:
ndimage = cv2.cvtColor(ndimage, cv2.COLOR_BGR2RGB)
return ndimage
def read_IJB_meta_columns_to_int(file_path, columns, sep=" ", skiprows=0, header=None):
# meta = np.loadtxt(file_path, skiprows=skiprows, delimiter=sep)
meta = pd.read_csv(file_path, sep=sep, skiprows=skiprows, header=header).values
return (meta[:, ii].astype("int") for ii in columns)
def extract_IJB_data_11(data_path, subset, save_path=None, force_reload=False):
if save_path == None:
save_path = os.path.join(data_path, subset + "_backup.npz")
if not force_reload and os.path.exists(save_path):
print(">>>> Reload from backup: %s ..." % save_path)
aa = np.load(save_path)
return (
aa["templates"],
aa["medias"],
aa["p1"],
aa["p2"],
aa["label"],
aa["img_names"],
aa["landmarks"],
aa["face_scores"],
)
if subset == "IJBB":
media_list_path = os.path.join(data_path, "IJBB/meta/ijbb_face_tid_mid.txt")
pair_list_path = os.path.join(data_path, "IJBB/meta/ijbb_template_pair_label.txt")
img_path = os.path.join(data_path, "IJBB/loose_crop")
img_list_path = os.path.join(data_path, "IJBB/meta/ijbb_name_5pts_score.txt")
else:
media_list_path = os.path.join(data_path, "IJBC/meta/ijbc_face_tid_mid.txt")
pair_list_path = os.path.join(data_path, "IJBC/meta/ijbc_template_pair_label.txt")
img_path = os.path.join(data_path, "IJBC/loose_crop")
img_list_path = os.path.join(data_path, "IJBC/meta/ijbc_name_5pts_score.txt")
print(">>>> Loading templates and medias...")
templates, medias = read_IJB_meta_columns_to_int(media_list_path, columns=[1, 2]) # ['1.jpg', '1', '69544']
print("templates: %s, medias: %s, unique templates: %s" % (templates.shape, medias.shape, np.unique(templates).shape))
# templates: (227630,), medias: (227630,), unique templates: (12115,)
print(">>>> Loading pairs...")
p1, p2, label = read_IJB_meta_columns_to_int(pair_list_path, columns=[0, 1, 2]) # ['1', '11065', '1']
print("p1: %s, unique p1: %s" % (p1.shape, np.unique(p1).shape))
print("p2: %s, unique p2: %s" % (p2.shape, np.unique(p2).shape))
print("label: %s, label value counts: %s" % (label.shape, dict(zip(*np.unique(label, return_counts=True)))))
# p1: (8010270,), unique p1: (1845,)
# p2: (8010270,), unique p2: (10270,) # 10270 + 1845 = 12115 --> np.unique(templates).shape
# label: (8010270,), label value counts: {0: 8000000, 1: 10270}
print(">>>> Loading images...")
with open(img_list_path, "r") as ff:
# 1.jpg 46.060 62.026 87.785 60.323 68.851 77.656 52.162 99.875 86.450 98.648 0.999
img_records = np.array([ii.strip().split(" ") for ii in ff.readlines()])
img_names = np.array([os.path.join(img_path, ii) for ii in img_records[:, 0]])
landmarks = img_records[:, 1:-1].astype("float32").reshape(-1, 5, 2)
face_scores = img_records[:, -1].astype("float32")
print("img_names: %s, landmarks: %s, face_scores: %s" % (img_names.shape, landmarks.shape, face_scores.shape))
# img_names: (227630,), landmarks: (227630, 5, 2), face_scores: (227630,)
print("face_scores value counts:", dict(zip(*np.histogram(face_scores, bins=9)[::-1])))
# {0.1: 2515, 0.2: 0, 0.3: 62, 0.4: 94, 0.5: 136, 0.6: 197, 0.7: 291, 0.8: 538, 0.9: 223797}
print(">>>> Saving backup to: %s ..." % save_path)
np.savez(
save_path,
templates=templates,
medias=medias,
p1=p1,
p2=p2,
label=label,
img_names=img_names,
landmarks=landmarks,
face_scores=face_scores,
)
print()
return templates, medias, p1, p2, label, img_names, landmarks, face_scores
def extract_gallery_prob_data(data_path, subset, save_path=None, force_reload=False):
if save_path == None:
save_path = os.path.join(data_path, subset + "_gallery_prob_backup.npz")
if not force_reload and os.path.exists(save_path):
print(">>>> Reload from backup: %s ..." % save_path)
aa = np.load(save_path)
return (
aa["s1_templates"],
aa["s1_subject_ids"],
aa["s2_templates"],
aa["s2_subject_ids"],
aa["probe_mixed_templates"],
aa["probe_mixed_subject_ids"],
)
if subset == "IJBC":
meta_dir = os.path.join(data_path, "IJBC/meta")
gallery_s1_record = os.path.join(meta_dir, "ijbc_1N_gallery_G1.csv")
gallery_s2_record = os.path.join(meta_dir, "ijbc_1N_gallery_G2.csv")
probe_mixed_record = os.path.join(meta_dir, "ijbc_1N_probe_mixed.csv")
else:
meta_dir = os.path.join(data_path, "IJBB/meta")
gallery_s1_record = os.path.join(meta_dir, "ijbb_1N_gallery_S1.csv")
gallery_s2_record = os.path.join(meta_dir, "ijbb_1N_gallery_S2.csv")
probe_mixed_record = os.path.join(meta_dir, "ijbb_1N_probe_mixed.csv")
print(">>>> Loading gallery feature...")
s1_templates, s1_subject_ids = read_IJB_meta_columns_to_int(gallery_s1_record, columns=[0, 1], skiprows=1, sep=",")
s2_templates, s2_subject_ids = read_IJB_meta_columns_to_int(gallery_s2_record, columns=[0, 1], skiprows=1, sep=",")
print("s1 gallery: %s, ids: %s, unique: %s" % (s1_templates.shape, s1_subject_ids.shape, np.unique(s1_templates).shape))
print("s2 gallery: %s, ids: %s, unique: %s" % (s2_templates.shape, s2_subject_ids.shape, np.unique(s2_templates).shape))
print(">>>> Loading prope feature...")
probe_mixed_templates, probe_mixed_subject_ids = read_IJB_meta_columns_to_int(probe_mixed_record, columns=[0, 1], skiprows=1, sep=",")
print("probe_mixed_templates: %s, unique: %s" % (probe_mixed_templates.shape, np.unique(probe_mixed_templates).shape))
print("probe_mixed_subject_ids: %s, unique: %s" % (probe_mixed_subject_ids.shape, np.unique(probe_mixed_subject_ids).shape))
print(">>>> Saving backup to: %s ..." % save_path)
np.savez(
save_path,
s1_templates=s1_templates,
s1_subject_ids=s1_subject_ids,
s2_templates=s2_templates,
s2_subject_ids=s2_subject_ids,
probe_mixed_templates=probe_mixed_templates,
probe_mixed_subject_ids=probe_mixed_subject_ids,
)
print()
return s1_templates, s1_subject_ids, s2_templates, s2_subject_ids, probe_mixed_templates, probe_mixed_subject_ids
def get_embeddings(model_interf, img_names, landmarks, batch_size=64, flip=True):
steps = int(np.ceil(len(img_names) / batch_size))
embs, embs_f = [], []
for batch_id in tqdm(range(0, len(img_names), batch_size), "Embedding", total=steps):
batch_imgs, batch_landmarks = img_names[batch_id : batch_id + batch_size], landmarks[batch_id : batch_id + batch_size]
ndimages = [face_align_landmark(cv2.imread(img), landmark) for img, landmark in zip(batch_imgs, batch_landmarks)]
ndimages = np.stack(ndimages)
embs.extend(model_interf(ndimages))
if flip:
embs_f.extend(model_interf(ndimages[:, :, ::-1, :]))
return np.array(embs), np.array(embs_f)
def process_embeddings(embs, embs_f=[], use_flip_test=True, use_norm_score=False, use_detector_score=True, face_scores=None):
print(">>>> process_embeddings: Norm {}, Detect_score {}, Flip {}".format(use_norm_score, use_detector_score, use_flip_test))
if use_flip_test and len(embs_f) != 0:
embs = embs + embs_f
if use_norm_score:
embs = normalize(embs)
if use_detector_score and face_scores is not None:
embs = embs * np.expand_dims(face_scores, -1)
return embs
def image2template_feature(img_feats=None, templates=None, medias=None, choose_templates=None, choose_ids=None):
if choose_templates is not None: # 1:N
unique_templates, indices = np.unique(choose_templates, return_index=True)
unique_subjectids = choose_ids[indices]
else: # 1:1
unique_templates = np.unique(templates)
unique_subjectids = None
# template_feats = np.zeros((len(unique_templates), img_feats.shape[1]), dtype=img_feats.dtype)
template_feats = np.zeros((len(unique_templates), img_feats.shape[1]))
for count_template, uqt in tqdm(enumerate(unique_templates), "Extract template feature", total=len(unique_templates)):
(ind_t,) = np.where(templates == uqt)
face_norm_feats = img_feats[ind_t]
face_medias = medias[ind_t]
unique_medias, unique_media_counts = np.unique(face_medias, return_counts=True)
media_norm_feats = []
for u, ct in zip(unique_medias, unique_media_counts):
(ind_m,) = np.where(face_medias == u)
if ct == 1:
media_norm_feats += [face_norm_feats[ind_m]]
else: # image features from the same video will be aggregated into one feature
media_norm_feats += [np.mean(face_norm_feats[ind_m], 0, keepdims=True)]
media_norm_feats = np.array(media_norm_feats)
# media_norm_feats = media_norm_feats / np.sqrt(np.sum(media_norm_feats ** 2, -1, keepdims=True))
template_feats[count_template] = np.sum(media_norm_feats, 0)
template_norm_feats = normalize(template_feats)
return template_norm_feats, unique_templates, unique_subjectids
def verification_11(template_norm_feats=None, unique_templates=None, p1=None, p2=None, batch_size=10000):
try:
print(">>>> Trying cupy.")
import cupy as cp
template_norm_feats = cp.array(template_norm_feats)
score_func = lambda feat1, feat2: cp.sum(feat1 * feat2, axis=-1).get()
test = score_func(template_norm_feats[:batch_size], template_norm_feats[:batch_size])
except:
score_func = lambda feat1, feat2: np.sum(feat1 * feat2, -1)
template2id = np.zeros(max(unique_templates) + 1, dtype=int)
template2id[unique_templates] = np.arange(len(unique_templates))
steps = int(np.ceil(len(p1) / batch_size))
score = []
for id in tqdm(range(steps), "Verification"):
feat1 = template_norm_feats[template2id[p1[id * batch_size : (id + 1) * batch_size]]]
feat2 = template_norm_feats[template2id[p2[id * batch_size : (id + 1) * batch_size]]]
score.extend(score_func(feat1, feat2))
return np.array(score)
def evaluation_1N(query_feats, gallery_feats, query_ids, reg_ids, fars=[0.01, 0.1]):
print("query_feats: %s, gallery_feats: %s" % (query_feats.shape, gallery_feats.shape))
similarity = np.dot(query_feats, gallery_feats.T) # (19593, 3531)
top_1_count, top_5_count, top_10_count = 0, 0, 0
pos_sims, neg_sims, non_gallery_sims = [], [], []
for index, query_id in enumerate(query_ids):
if query_id in reg_ids:
gallery_label = np.argwhere(reg_ids == query_id)[0, 0]
index_sorted = np.argsort(similarity[index])[::-1]
top_1_count += gallery_label in index_sorted[:1]
top_5_count += gallery_label in index_sorted[:5]
top_10_count += gallery_label in index_sorted[:10]
pos_sims.append(similarity[index][reg_ids == query_id][0])
neg_sims.append(similarity[index][reg_ids != query_id])
else:
non_gallery_sims.append(similarity[index])
total_pos = len(pos_sims)
pos_sims, neg_sims, non_gallery_sims = np.array(pos_sims), np.array(neg_sims), np.array(non_gallery_sims)
print("pos_sims: %s, neg_sims: %s, non_gallery_sims: %s" % (pos_sims.shape, neg_sims.shape, non_gallery_sims.shape))
print("top1: %f, top5: %f, top10: %f" % (top_1_count / total_pos, top_5_count / total_pos, top_10_count / total_pos))
correct_pos_cond = pos_sims > neg_sims.max(1)
non_gallery_sims_sorted = np.sort(non_gallery_sims.max(1))[::-1]
threshes, recalls = [], []
for far in fars:
# thresh = non_gallery_sims_sorted[int(np.ceil(non_gallery_sims_sorted.shape[0] * far)) - 1]
thresh = non_gallery_sims_sorted[max(int((non_gallery_sims_sorted.shape[0]) * far) - 1, 0)]
recall = np.logical_and(correct_pos_cond, pos_sims > thresh).sum() / pos_sims.shape[0]
threshes.append(thresh)
recalls.append(recall)
# print("FAR = {:.10f} TPIR = {:.10f} th = {:.10f}".format(far, recall, thresh))
cmc_scores = list(zip(neg_sims, pos_sims.reshape(-1, 1))) + list(zip(non_gallery_sims, [None] * non_gallery_sims.shape[0]))
return top_1_count, top_5_count, top_10_count, threshes, recalls, cmc_scores
class IJB_test:
def __init__(self, model_file, data_path, subset, batch_size=64, force_reload=False, restore_embs=None):
templates, medias, p1, p2, label, img_names, landmarks, face_scores = extract_IJB_data_11(data_path, subset, force_reload=force_reload)
if model_file != None:
if model_file.endswith(".h5"):
interf_func = keras_model_interf(model_file)
elif model_file.endswith(".pth") or model_file.endswith(".pt"):
interf_func = Torch_model_interf(model_file)
elif model_file.endswith(".onnx") or model_file.endswith(".ONNX"):
interf_func = ONNX_model_interf(model_file)
else:
interf_func = Mxnet_model_interf(model_file)
self.embs, self.embs_f = get_embeddings(interf_func, img_names, landmarks, batch_size=batch_size)
elif restore_embs != None:
print(">>>> Reload embeddings from:", restore_embs)
aa = np.load(restore_embs)
if "embs" in aa and "embs_f" in aa:
self.embs, self.embs_f = aa["embs"], aa["embs_f"]
else:
print("ERROR: %s NOT containing embs / embs_f" % restore_embs)
exit(1)
print(">>>> Done.")
self.data_path, self.subset, self.force_reload = data_path, subset, force_reload
self.templates, self.medias, self.p1, self.p2, self.label = templates, medias, p1, p2, label
self.face_scores = face_scores.astype(self.embs.dtype)
def run_model_test_single(self, use_flip_test=True, use_norm_score=False, use_detector_score=True):
img_input_feats = process_embeddings(
self.embs,
self.embs_f,
use_flip_test=use_flip_test,
use_norm_score=use_norm_score,
use_detector_score=use_detector_score,
face_scores=self.face_scores,
)
template_norm_feats, unique_templates, _ = image2template_feature(img_input_feats, self.templates, self.medias)
score = verification_11(template_norm_feats, unique_templates, self.p1, self.p2)
return score
def run_model_test_bunch(self):
from itertools import product
scores, names = [], []
for use_norm_score, use_detector_score, use_flip_test in product([True, False], [True, False], [True, False]):
name = "N{:d}D{:d}F{:d}".format(use_norm_score, use_detector_score, use_flip_test)
print(">>>>", name, use_norm_score, use_detector_score, use_flip_test)
names.append(name)
scores.append(self.run_model_test_single(use_flip_test, use_norm_score, use_detector_score))
return scores, names
def run_model_test_1N(self, npoints=100):
fars_cal = [10 ** ii for ii in np.arange(-4, 0, 4 / npoints)] + [1] # plot in range [10-4, 1]
fars_show_idx = np.arange(len(fars_cal))[:: npoints // 4] # npoints=100, fars_show=[0.0001, 0.001, 0.01, 0.1, 1.0]
g1_templates, g1_ids, g2_templates, g2_ids, probe_mixed_templates, probe_mixed_ids = extract_gallery_prob_data(
self.data_path, self.subset, force_reload=self.force_reload
)
img_input_feats = process_embeddings(
self.embs,
self.embs_f,
use_flip_test=True,
use_norm_score=False,
use_detector_score=True,
face_scores=self.face_scores,
)
g1_templates_feature, g1_unique_templates, g1_unique_ids = image2template_feature(img_input_feats, self.templates, self.medias, g1_templates, g1_ids)
g2_templates_feature, g2_unique_templates, g2_unique_ids = image2template_feature(img_input_feats, self.templates, self.medias, g2_templates, g2_ids)
probe_mixed_templates_feature, probe_mixed_unique_templates, probe_mixed_unique_subject_ids = image2template_feature(
img_input_feats, self.templates, self.medias, probe_mixed_templates, probe_mixed_ids
)
print("g1_templates_feature:", g1_templates_feature.shape) # (1772, 512)
print("g2_templates_feature:", g2_templates_feature.shape) # (1759, 512)
print("probe_mixed_templates_feature:", probe_mixed_templates_feature.shape) # (19593, 512)
print("probe_mixed_unique_subject_ids:", probe_mixed_unique_subject_ids.shape) # (19593,)
print(">>>> Gallery 1")
g1_top_1_count, g1_top_5_count, g1_top_10_count, g1_threshes, g1_recalls, g1_cmc_scores = evaluation_1N(
probe_mixed_templates_feature, g1_templates_feature, probe_mixed_unique_subject_ids, g1_unique_ids, fars_cal
)
print(">>>> Gallery 2")
g2_top_1_count, g2_top_5_count, g2_top_10_count, g2_threshes, g2_recalls, g2_cmc_scores = evaluation_1N(
probe_mixed_templates_feature, g2_templates_feature, probe_mixed_unique_subject_ids, g2_unique_ids, fars_cal
)
print(">>>> Mean")
query_num = probe_mixed_templates_feature.shape[0]
top_1 = (g1_top_1_count + g2_top_1_count) / query_num
top_5 = (g1_top_5_count + g2_top_5_count) / query_num
top_10 = (g1_top_10_count + g2_top_10_count) / query_num
print("[Mean] top1: %f, top5: %f, top10: %f" % (top_1, top_5, top_10))
mean_tpirs = (np.array(g1_recalls) + np.array(g2_recalls)) / 2
show_result = {}
for id, far in enumerate(fars_cal):
if id in fars_show_idx:
show_result.setdefault("far", []).append(far)
show_result.setdefault("g1_tpir", []).append(g1_recalls[id])
show_result.setdefault("g1_thresh", []).append(g1_threshes[id])
show_result.setdefault("g2_tpir", []).append(g2_recalls[id])
show_result.setdefault("g2_thresh", []).append(g2_threshes[id])
show_result.setdefault("mean_tpir", []).append(mean_tpirs[id])
print(pd.DataFrame(show_result).set_index("far").to_markdown())
return fars_cal, mean_tpirs, g1_cmc_scores, g2_cmc_scores
def plot_roc_and_calculate_tpr(scores, names=None, label=None):
print(">>>> plot roc and calculate tpr...")
score_dict = {}
for id, score in enumerate(scores):
name = None if names is None else names[id]
if isinstance(score, str) and score.endswith(".npz"):
aa = np.load(score)
score = aa.get("scores", [])
label = aa["label"] if label is None and "label" in aa else label
score_name = aa.get("names", [])
for ss, nn in zip(score, score_name):
score_dict[nn] = ss
elif isinstance(score, str) and score.endswith(".npy"):
name = name if name is not None else os.path.splitext(os.path.basename(score))[0]
score_dict[name] = np.load(score)
elif isinstance(score, str) and score.endswith(".txt"):
# IJB meta data like ijbb_template_pair_label.txt
label = pd.read_csv(score, sep=" ", header=None).values[:, 2]
else:
name = name if name is not None else str(id)
score_dict[name] = score
if label is None:
print("Error: Label data is not provided")
return None, None
x_labels = [10 ** (-ii) for ii in range(1, 7)[::-1]]
fpr_dict, tpr_dict, roc_auc_dict, tpr_result = {}, {}, {}, {}
for name, score in score_dict.items():
fpr, tpr, _ = roc_curve(label, score)
roc_auc = auc(fpr, tpr)
fpr, tpr = np.flipud(fpr), np.flipud(tpr) # select largest tpr at same fpr
tpr_result[name] = [tpr[np.argmin(abs(fpr - ii))] for ii in x_labels]
fpr_dict[name], tpr_dict[name], roc_auc_dict[name] = fpr, tpr, roc_auc
tpr_result_df = pd.DataFrame(tpr_result, index=x_labels).T
tpr_result_df["AUC"] = pd.Series(roc_auc_dict)
tpr_result_df.columns.name = "Methods"
print(tpr_result_df.to_markdown())
# print(tpr_result_df)
try:
import matplotlib.pyplot as plt
fig = plt.figure()
for name in score_dict:
plt.plot(fpr_dict[name], tpr_dict[name], lw=1, label="[%s (AUC = %0.4f%%)]" % (name, roc_auc_dict[name] * 100))
title = "ROC on IJB" + name.split("IJB")[-1][0] if "IJB" in name else "ROC on IJB"
plt.xlim([10 ** -6, 0.1])
plt.xscale("log")
plt.xticks(x_labels)
plt.xlabel("False Positive Rate")
plt.ylim([0.3, 1.0])
plt.yticks(np.linspace(0.3, 1.0, 8, endpoint=True))
plt.ylabel("True Positive Rate")
plt.grid(linestyle="--", linewidth=1)
plt.title(title)
plt.legend(loc="lower right", fontsize="x-small")
plt.tight_layout()
plt.show()
except:
print("matplotlib plot failed")
fig = None
return tpr_result_df, fig
def plot_dir_far_cmc_scores(scores, names=None):
try:
import matplotlib.pyplot as plt
fig = plt.figure()
for id, score in enumerate(scores):
name = None if names is None else names[id]
if isinstance(score, str) and score.endswith(".npz"):
aa = np.load(score)
score, name = aa.get("scores")[0], aa.get("names")[0]
fars, tpirs = score[0], score[1]
name = name if name is not None else str(id)
auc_value = auc(fars, tpirs)
label = "[%s (AUC = %0.4f%%)]" % (name, auc_value * 100)
plt.plot(fars, tpirs, lw=1, label=label)
plt.xlabel("False Alarm Rate")
plt.xlim([0.0001, 1])
plt.xscale("log")
plt.ylabel("Detection & Identification Rate (%)")
plt.ylim([0, 1])
plt.grid(linestyle="--", linewidth=1)
plt.legend(fontsize="x-small")
plt.tight_layout()
plt.show()
except:
print("matplotlib plot failed")
fig = None
return fig
def parse_arguments(argv):
import argparse
default_save_result_name = "IJB_result/{model_name}_{subset}_{type}.npz"
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-m", "--model_file", type=str, default=None, help="Saved model, keras h5 / pytorch jit pth / onnx / mxnet")
parser.add_argument("-d", "--data_path", type=str, default="./", help="Dataset path containing IJBB and IJBC sub folder")
parser.add_argument("-s", "--subset", type=str, default="IJBB", help="Subset test target, could be IJBB / IJBC")
parser.add_argument("-b", "--batch_size", type=int, default=128, help="Batch size for get_embeddings")
parser.add_argument("-R", "--save_result", type=str, default=default_save_result_name, help="Filename for saving / restore result")
parser.add_argument("-L", "--save_label", action="store_true", help="Save label data, useful for plot only")
parser.add_argument("-E", "--save_embeddings", action="store_true", help="Save embeddings data")
parser.add_argument("-B", "--is_bunch", action="store_true", help="Run all 8 tests N{0,1}D{0,1}F{0,1}")
parser.add_argument("-N", "--is_one_2_N", action="store_true", help="Run 1:N test instead of 1:1")
parser.add_argument("-F", "--force_reload", action="store_true", help="Force reload, instead of using cache")
parser.add_argument("-P", "--plot_only", nargs="*", type=str, help="Plot saved results, Format 1 2 3 or 1, 2, 3 or *.npy")
args = parser.parse_known_args(argv)[0]
if args.plot_only != None and len(args.plot_only) != 0:
# Plot only
from glob2 import glob
score_files = []
for ss in args.plot_only:
score_files.extend(glob(ss.replace(",", "").strip()))
args.plot_only = score_files
elif args.model_file == None and args.save_result == default_save_result_name:
print("Please provide -m MODEL_FILE, see `--help` for usage.")
exit(1)
elif args.model_file != None:
if args.model_file.endswith(".h5") or args.model_file.endswith(".pth") or args.model_file.endswith(".pt") or args.model_file.endswith(".onnx"):
# Keras model file "model.h5", pytorch model ends with `.pth` or `.pt`, onnx model ends with `.onnx`
model_name = os.path.splitext(os.path.basename(args.model_file))[0]
else:
# MXNet model file "models/r50-arcface-emore/model,1"
model_name = os.path.basename(os.path.dirname(args.model_file))
if args.save_result == default_save_result_name:
type = "1N" if args.is_one_2_N else "11"
args.save_result = default_save_result_name.format(model_name=model_name, subset=args.subset, type=type)
return args
if __name__ == "__main__":
import sys
args = parse_arguments(sys.argv[1:])
if args.plot_only != None and len(args.plot_only) != 0:
if args.is_one_2_N:
plot_dir_far_cmc_scores(args.plot_only)
else:
plot_roc_and_calculate_tpr(args.plot_only)
else:
save_name = os.path.splitext(os.path.basename(args.save_result))[0]
save_items = {}
save_path = os.path.dirname(args.save_result)
if len(save_path) != 0 and not os.path.exists(save_path):
os.makedirs(save_path)
tt = IJB_test(args.model_file, args.data_path, args.subset, args.batch_size, args.force_reload, args.save_result)
if args.save_embeddings: # Save embeddings first, in case of any error happens later...
np.savez(args.save_result, embs=tt.embs, embs_f=tt.embs_f)
if args.is_one_2_N: # 1:N test
fars, tpirs, _, _ = tt.run_model_test_1N()
scores = [(fars, tpirs)]
names = [save_name]
save_items.update({"scores": scores, "names": names})
elif args.is_bunch: # All 8 tests N{0,1}D{0,1}F{0,1}
scores, names = tt.run_model_test_bunch()
names = [save_name + "_" + ii for ii in names]
label = tt.label
save_items.update({"scores": scores, "names": names})
else: # Basic 1:1 N0D1F1 test
score = tt.run_model_test_single()
scores, names, label = [score], [save_name], tt.label
save_items.update({"scores": scores, "names": names})
if args.save_embeddings:
save_items.update({"embs": tt.embs, "embs_f": tt.embs_f})
if args.save_label:
save_items.update({"label": label})
if args.model_file != None or args.save_embeddings: # embeddings not restored from file or should save_embeddings again
np.savez(args.save_result, **save_items)
if args.is_one_2_N:
plot_dir_far_cmc_scores(scores=scores, names=names)
else:
plot_roc_and_calculate_tpr(scores, names=names, label=label)
| 48.463566 | 157 | 0.649349 |
62df7245b6e82259530f69155de1566db688c21e | 3,700 | py | Python | MAX30105.py | coltonweaver/MAX30105-Raspberry-Pi-Python | 5900f58d7d4b08791301a9e27fe8c0d0b9c17247 | [
"MIT"
] | 1 | 2020-12-06T06:09:31.000Z | 2020-12-06T06:09:31.000Z | MAX30105.py | coltonweaver/MAX30105-Raspberry-Pi-Python | 5900f58d7d4b08791301a9e27fe8c0d0b9c17247 | [
"MIT"
] | 1 | 2018-10-22T20:40:41.000Z | 2018-10-22T20:40:41.000Z | MAX30105.py | coltonweaver/MAX30105-Raspberry-Pi-Python-Library | 5900f58d7d4b08791301a9e27fe8c0d0b9c17247 | [
"MIT"
] | null | null | null | from smbus import SMBus
import time
class MAX30105(object):
def __init__(self, bus, address):
self.address = address
self.bus = SMBus(bus)
self._led_mode = None
self._pulse_width_set = None
try:
self.bus.read_byte(self.address)
except:
print("Sensor not found. Check wiring.")
raise SystemExit()
else:
print("Found MAX30105 Particle Sensor on bus {}: [{}]".format(bus, hex(self.address)))
def read_register(self, REG, n_bytes=1):
self.bus.write_byte(self.address, REG)
return self.bus.read_i2c_block_data(self.address, REG, n_bytes)
def write_register(self, REG, VALUE):
self.bus.write_i2c_block_data(self.address, REG, [VALUE])
return
def bit_mask(self, REG, MASK, NEW_VALUE):
newCONTENTS = (self.byte_to_int(self.read_register(REG)) & MASK) | NEW_VALUE
self.write_register(REG, newCONTENTS)
return
def setup_sensor(self, LED_MODE=2, LED_POWER=0x1F, PULSE_WIDTH=0x01):
self.bit_mask(0x09, 0xBF, 0x40)
time.sleep(1)
# 3: 69 (15-bit), 2: 118 (16-bit), 1: 215 (17-bit), 0: 411 (18-bit)
self.bit_mask(0x0A, 0xFC, PULSE_WIDTH)
self._pulse_width_set = PULSE_WIDTH
if LED_MODE not in [1, 2, 3]:
raise ValueError('wrong LED mode:{0}!'.format(LED_MODE))
elif LED_MODE == 1:
self.bit_mask(0x09, 0xF8, 0x02)
self.write_register(0x0C, LED_POWER)
elif LED_MODE == 2:
self.bit_mask(0x09, 0xF8, 0x03)
self.write_register(0x0C, LED_POWER)
self.write_register(0x0D, LED_POWER)
elif LED_MODE == 3:
self.bit_mask(0x09, 0xF8, 0x07)
self.write_register(0x0C, LED_POWER)
self.write_register(0x0D, LED_POWER)
self.write_register(0x0E, LED_POWER)
self.write_register(0x11, 0b00100001)
self.write_register(0x12, 0b00000011)
self._led_mode = LED_MODE
self.bit_mask(0x0A, 0xE3, 0x0C) # sampl. rate: 50
# 50: 0x00, 100: 0x04, 200: 0x08, 400: 0x0C,
# 800: 0x10, 1000: 0x14, 1600: 0x18, 3200: 0x1C
self.bit_mask(0x0A, 0x9F, 0x60) # ADC range: 2048
# 2048: 0x00, 4096: 0x20, 8192: 0x40, 16384: 0x60
self.bit_mask(0x08, ~0b11100000, 0x00) # FIFO sample avg: (no)
# 1: 0x00, 2: 0x20, 4: 0x40, 8: 0x60, 16: 0x80, 32: 0xA0
self.bit_mask(0x08, 0xEF, 0x01) # FIFO rollover: enable
# 0x00/0x01: dis-/enable
self.write_register(0x04, 0)
self.write_register(0x05, 0)
self.write_register(0x06, 0)
def set_red_led_power(self, LED_POWER):
self.bit_mask(0x09, 0xF8, 0x02)
self.write_register(0x0C, LED_POWER)
def set_ir_led_power(self, LED_POWER):
self.bit_mask(0x09, 0xF8, 0x03)
self.write_register(0x0D, LED_POWER)
def set_green_led_power(self, LED_POWER):
self.bit_mask(0x09, 0xF8, 0x07)
self.write_register(0x0E, LED_POWER)
def byte_to_int(self, byte_data):
return int.from_bytes(byte_data, byteorder='big', signed=False)
def read_sensor(self, pointer_position):
self.write_register(0x06, pointer_position)
fifo_bytes = self.read_register(0x07, self._led_mode * 3)
red_int = self.byte_to_int(fifo_bytes[0:3])
IR_int = self.byte_to_int(fifo_bytes[3:6])
green_int = self.byte_to_int(fifo_bytes[6:9])
return red_int, IR_int, green_int
def clear_fifo(self):
self.write_register(0x04, 0)
self.write_register(0x05, 0)
self.write_register(0x06, 0)
| 35.92233 | 98 | 0.618919 |
52aa62baf56f1dd547fa4c02b085c0a0a1d63490 | 8,089 | py | Python | python/tvm/auto_scheduler/auto_schedule.py | jiuqi-yang/dev-tvm | b04797561a7dac0557bc3a8348a803e67bb577ca | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null | python/tvm/auto_scheduler/auto_schedule.py | jiuqi-yang/dev-tvm | b04797561a7dac0557bc3a8348a803e67bb577ca | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null | python/tvm/auto_scheduler/auto_schedule.py | jiuqi-yang/dev-tvm | b04797561a7dac0557bc3a8348a803e67bb577ca | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
User interface for TVM Auto-scheduler.
The basic schedule search process for TVM Auto-scheduler is designed to be:
`Program sampling` -> `Performance Tuning`.
In `Program sampling`, we use some predefined precise or heuristic rules to generate several
initial schedules. Based on these initial starting points, we perform `Performance Tuning` which
uses cost model based evolutionary search to select schedules with the best performance.
Candidate schedules are measured against the specific hardware target.
"""
import tvm._ffi
from tvm.runtime import Object
from .measure import LocalBuilder, LocalRunner
from . import _ffi_api
@tvm._ffi.register_object("auto_scheduler.HardwareParams")
class HardwareParams(Object):
""" The parameters of target hardware used to guide the search policy
TODO(jcf94): This is considered to be merged with the new Target specification:
https://discuss.tvm.ai/t/rfc-tvm-target-specification/6844
Parameters
----------
num_cores : int
The number of device cores.
vector_unit_bytes : int
The width of vector units in bytes.
cache_line_bytes : int
The size of cache line in bytes.
"""
def __init__(self, num_cores, vector_unit_bytes, cache_line_bytes):
self.__init_handle_by_constructor__(_ffi_api.HardwareParams, num_cores,
vector_unit_bytes, cache_line_bytes)
@tvm._ffi.register_object("auto_scheduler.SearchTask")
class SearchTask(Object):
""" The computation information and hardware parameters for a schedule search task.
Parameters
----------
dag : ComputeDAG
The ComputeDAG for the corresponding compute declaration.
workload_key : str
The workload key for the corresponding compute declaration.
target : tvm.target.Target
The target device of this search task.
target_host : Optional[tvm.target.Target]
The target host device of this search task.
hardware_params : Optional[HardwareParams]
Hardware parameters used in this search task.
"""
def __init__(self, dag, workload_key, target, target_host=None,
hardware_params=None):
self.__init_handle_by_constructor__(_ffi_api.SearchTask, dag,
workload_key, target, target_host,
hardware_params)
@tvm._ffi.register_object("auto_scheduler.SearchPolicy")
class SearchPolicy(Object):
""" The base class of search policies. """
@tvm._ffi.register_object("auto_scheduler.EmptyPolicy")
class EmptyPolicy(SearchPolicy):
""" This is an example empty search policy which will always generate
the init state of ComputeDAG.
"""
def __init__(self):
self.__init_handle_by_constructor__(_ffi_api.EmptyPolicy)
@tvm._ffi.register_object("auto_scheduler.TuningOptions")
class TuningOptions(Object):
""" This controls the options of performance tuning.
Parameters
----------
num_measure_trials: int = 0
The number of measurement trials.
The search policy measures `num_measure_trials` schedules in total and returns the best one
among them.
With `num_measure_trials` == 0, the policy will do the schedule search but won't involve
measurement. This can be used to get a runnable schedule quickly without auto-tuning.
early_stopping: Optional[int]
Stop the tuning early if getting no improvement after n measurements.
num_measures_per_round: int = 64
The number of schedules to be measured at each search round.
The whole schedule search process will try a total number of `num_measure_trials` in several
rounds.
verbose: int = 1
Verbosity level. 0 for silent, 1 to output information during schedule search.
builder: Union[ProgramBuilder, str] = 'local'
ProgramBuilder which builds the program.
runner: Union[ProgramRunner, str] = 'local'
ProgramRunner which runs the program and measures time costs.
measure_callbacks: Optional[List[MeasureCallback]]
Callback functions called after each measurement.
Candidates:
- auto_scheduler.RecordToFile
pre_search_callbacks: Optional[List[SearchCallback]]
Callback functions called before the search process.
Candidates:
- auto_scheduler.PreloadMeasuredStates
- auto_scheduler.PreloadCustomSketchRule
TODO(jcf94): Add these implementation in later PRs.
"""
def __init__(self, num_measure_trials=0, early_stopping=None, num_measures_per_round=64,
verbose=1, builder='local', runner='local', measure_callbacks=None,
pre_search_callbacks=None):
if isinstance(builder, str):
if builder == 'local':
builder = LocalBuilder()
else:
raise ValueError("Invalid builder: " + builder)
elif not isinstance(builder, tvm.auto_scheduler.measure.ProgramBuilder):
raise ValueError("Invalid builder: " + builder +
" . TuningOptions expects a ProgramBuilder or string.")
if isinstance(runner, str):
if runner == 'local':
runner = LocalRunner()
else:
raise ValueError("Invalid runner: " + runner)
elif not isinstance(runner, tvm.auto_scheduler.measure.ProgramRunner):
raise ValueError("Invalid runner: " + runner +
" . TuningOptions expects a ProgramRunner or string.")
self.__init_handle_by_constructor__(
_ffi_api.TuningOptions, num_measure_trials, early_stopping if early_stopping else -1,
num_measures_per_round, verbose, builder, runner, measure_callbacks,
pre_search_callbacks)
def auto_schedule(task, search_policy='default', tuning_options=None):
""" Do auto scheduling for a computation declaration.
Parameters
----------
task : SearchTask
The SearchTask for the computation declaration.
search_policy : Union[SearchPolicy, str] = 'default'
The search policy to be used for schedule search.
tuning_options : Optional[TuningOptions]
Tuning and measurement options.
Returns
-------
A `te.schedule` and the a list of `te.Tensor` to be used in `tvm.lower` or `tvm.build`.
"""
if not isinstance(task, SearchTask):
raise ValueError("Invalid task: " + task +
" . `auto_scheduler.auto_schedule` expects a SearchTask.")
if isinstance(search_policy, str):
if search_policy == 'default':
# TODO(jcf94): This is an example policy for minimum system, will be upgrated to
# formal search policy later.
search_policy = EmptyPolicy()
else:
raise ValueError("Invalid search policy: " + search_policy)
elif not isinstance(search_policy, SearchPolicy):
raise ValueError("Invalid search policy: " + search_policy +
" . `auto_scheduler.auto_schedule` expects a SearchPolicy or a string.")
sch, tensors = _ffi_api.AutoSchedule(task, search_policy,
tuning_options if tuning_options else TuningOptions())
return sch, tensors
| 42.130208 | 98 | 0.687724 |
05da8c2f5b175a1756a86155fd5fadaba68f61ba | 1,427 | py | Python | Redump Verifier 1.4.3.py | normalgamer/test | f1b8918ba59661784033ea67c0c77c4c22be4dcd | [
"MIT"
] | null | null | null | Redump Verifier 1.4.3.py | normalgamer/test | f1b8918ba59661784033ea67c0c77c4c22be4dcd | [
"MIT"
] | 1 | 2020-12-19T00:09:18.000Z | 2020-12-19T00:09:18.000Z | Redump Verifier 1.4.3.py | normalgamer/test | f1b8918ba59661784033ea67c0c77c4c22be4dcd | [
"MIT"
] | null | null | null | import os
import hashlib
dats = os.listdir("./dat")
read_size = 1024
hash = hashlib.md5()
gameVerified = False
line_number=0
print(""
+"================== Redump verifier - version 1.4.3 ====================\n"
+"------------------ Github.com/normalgamer --------------------\n"
+"\n"
+"Drag 'n Drop your ISO\n"
+"\n"
)
iso = input("> ")
iso = iso.replace("\"","")
print("\nCalculating hash...")
with open(iso, "rb") as f:
data = f.read(read_size)
while data:
hash.update(data)
data = f.read(read_size)
hash = hash.hexdigest()
for dat in dats:
line_number = 0
data = ""
with open("dat/" + dat) as f:
data = f.readlines()
for line in data:
line_number += 1
if hash in line:
print("\n"
+"ISO's MD5 hash: " + hash
+"\n"
+"Game Verified, ISO's MD5 matches Redump hash"
)
gameName = data[line_number - 2].replace("<description>", "").replace("</description>", "").replace("\t", "")
print("\nRedump game name: " + gameName)
gameVerified = True
f.close()
if not gameVerified:
print("\n"
+"ISO's MD5: " + hash
+"\n"
+"ISO's MD5 doesn't match any Redump hash"
)
input() | 25.035088 | 126 | 0.448493 |
f6e89556ee97aa3b42414a068029310f448dc5a0 | 504 | py | Python | Python/challenges/andela/pig_latin.py | Kenneth-Macharia/Learning | 0948dda73d94b25f96fad7e9ff3523782b0a407a | [
"MIT"
] | null | null | null | Python/challenges/andela/pig_latin.py | Kenneth-Macharia/Learning | 0948dda73d94b25f96fad7e9ff3523782b0a407a | [
"MIT"
] | 3 | 2020-07-26T19:17:23.000Z | 2021-01-01T15:39:38.000Z | Python/challenges/andela/pig_latin.py | Kenneth-Macharia/Learning | 0948dda73d94b25f96fad7e9ff3523782b0a407a | [
"MIT"
] | null | null | null | def pig_latin_converter(word):
VOWELS = 'aeiou'
CONSONANTS = 'bcdfghjklmnpqrstvwxyz'
clean_word = word.lower().strip()
if clean_word != '' and word[0] in VOWELS or word[0] in CONSONANTS:
if word[0] in VOWELS:
return f'{word}way'
else:
index = 0
for i in range(len(word)):
if word[i] in VOWELS:
index = i
break
return f'{word[index:]}{word[:index]}ay'
return '' | 26.526316 | 71 | 0.505952 |
f1d21255e0cc971c4dd8cf99530849aee7f1597e | 6,858 | py | Python | django-stdimage/fields.py | gitdaniel228/realtor | 4366d57b064be87b31c8a036b3ed7a99b2036461 | [
"BSD-3-Clause"
] | null | null | null | django-stdimage/fields.py | gitdaniel228/realtor | 4366d57b064be87b31c8a036b3ed7a99b2036461 | [
"BSD-3-Clause"
] | null | null | null | django-stdimage/fields.py | gitdaniel228/realtor | 4366d57b064be87b31c8a036b3ed7a99b2036461 | [
"BSD-3-Clause"
] | null | null | null | from django.db.models.fields.files import ImageField
from django.db.models import signals
from django.conf import settings
from django.core.files.storage import FileSystemStorage
from widgets import DelAdminFileWidget
from forms import StdImageFormField
import os, shutil
class ThumbnailField:
'''
Instances of this class will be used to access data of the
generated thumbnails
'''
def __init__(self, name):
self.name = name
self.storage = FileSystemStorage()
def path(self):
return self.storage.path(self.name)
def url(self):
return self.storage.url(self.name)
def size(self):
return self.storage.size(self.name)
class StdImageField(ImageField):
'''
Django field that behaves as ImageField, with some extra features like:
- Auto resizing
- Automatically generate thumbnails
- Allow image deletion
'''
def __init__(self, verbose_name=None, name=None, width_field=None, height_field=None, size=None, thumbnail_size=None, **kwargs):
'''
Added fields:
- size: a tuple containing width and height to resize image, and an optional boolean setting if is wanted forcing that size (None for not resizing).
* Example: (640, 480, True) -> Will resize image to a width of 640px and a height of 480px. File will be cutted if necessary for forcing te image to have the desired size
- thumbnail_size: a tuple with same values than `size' (None for not creating a thumbnail
'''
params_size = ('width', 'height', 'force')
for att_name, att in {'size': size, 'thumbnail_size': thumbnail_size}.items():
if att and (isinstance(att, tuple) or isinstance(att, list)):
setattr(self, att_name, dict(map(None, params_size, att)))
else:
setattr(self, att_name, None)
super(StdImageField, self).__init__(verbose_name, name, width_field, height_field, **kwargs)
def _get_thumbnail_filename(self, filename):
'''
Returns the thumbnail name associated to the standard image filename
* Example: /var/www/myproject/media/img/picture_1.jpeg
will return /var/www/myproject/media/img/picture_1.thumbnail.jpeg
'''
splitted_filename = list(os.path.splitext(filename))
splitted_filename.insert(1, '.thumbnail')
return ''.join(splitted_filename)
def _resize_image(self, filename, size):
'''
Resizes the image to specified width, height and force option
- filename: full path of image to resize
- size: dictionary containing:
- width: new width
- height: new height
- force: if True, image will be cropped to fit the exact size,
if False, it will have the bigger size that fits the specified
size, but without cropping, so it could be smaller on width or height
'''
WIDTH, HEIGHT = 0, 1
from PIL import Image, ImageOps
img = Image.open(filename)
if img.size[WIDTH] > size['width'] or img.size[HEIGHT] > size['height']:
if size['force']:
img = ImageOps.fit(img, (size['width'], size['height']), Image.ANTIALIAS)
else:
img.thumbnail((size['width'], size['height']), Image.ANTIALIAS)
try:
img.save(filename, optimize=1)
except IOError:
img.save(filename)
def _rename_resize_image(self, instance=None, **kwargs):
'''
Renames the image, and calls methods to resize and create the thumbnail
'''
if getattr(instance, self.name):
filename = getattr(instance, self.name).path
ext = os.path.splitext(filename)[1].lower().replace('jpg', 'jpeg')
dst = self.generate_filename(instance, '%s_%s%s' % (self.name, instance._get_pk_val(), ext))
dst_fullpath = os.path.join(settings.MEDIA_ROOT, dst)
if os.path.abspath(filename) != os.path.abspath(dst_fullpath):
os.rename(filename, dst_fullpath)
if self.size:
self._resize_image(dst_fullpath, self.size)
if self.thumbnail_size:
thumbnail_filename = self._get_thumbnail_filename(dst_fullpath)
shutil.copyfile(dst_fullpath, thumbnail_filename)
self._resize_image(thumbnail_filename, self.thumbnail_size)
setattr(instance, self.attname, dst)
instance.save()
def _set_thumbnail(self, instance=None, **kwargs):
'''
Creates a "thumbnail" object as attribute of the ImageField instance
Thumbnail attribute will be of the same class of original image, so
"path", "url"... properties can be used
'''
if getattr(instance, self.name):
filename = self.generate_filename(instance, os.path.basename(getattr(instance, self.name).path))
thumbnail_filename = self._get_thumbnail_filename(filename)
thumbnail_field = ThumbnailField(thumbnail_filename)
setattr(getattr(instance, self.name), 'thumbnail', thumbnail_field)
def formfield(self, **kwargs):
'''
Specify form field and widget to be used on the forms
'''
kwargs['widget'] = DelAdminFileWidget
kwargs['form_class'] = StdImageFormField
return super(StdImageField, self).formfield(**kwargs)
def save_form_data(self, instance, data):
'''
Overwrite save_form_data to delete images if "delete" checkbox
is selected
'''
if data == '__deleted__':
filename = getattr(instance, self.name).path
if os.path.exists(filename):
os.remove(filename)
thumbnail_filename = self._get_thumbnail_filename(filename)
if os.path.exists(thumbnail_filename):
os.remove(thumbnail_filename)
setattr(instance, self.name, None)
else:
super(StdImageField, self).save_form_data(instance, data)
def get_db_prep_save(self, value):
'''
Overwrite get_db_prep_save to allow saving nothing to the database
if image has been deleted
'''
if value:
return super(StdImageField, self).get_db_prep_save(value)
else:
return u''
def contribute_to_class(self, cls, name):
'''
Call methods for generating all operations on specified signals
'''
super(StdImageField, self).contribute_to_class(cls, name)
signals.post_save.connect(self._rename_resize_image, sender=cls)
signals.post_init.connect(self._set_thumbnail, sender=cls)
| 43.405063 | 186 | 0.625109 |
60af80496423667f16fd28962ccaf114590ba3a5 | 762 | py | Python | skfem/__init__.py | HadrienNU/scikit-fem | 39d4cff53790725a865b1f2256dd3358c9ca878e | [
"BSD-3-Clause"
] | null | null | null | skfem/__init__.py | HadrienNU/scikit-fem | 39d4cff53790725a865b1f2256dd3358c9ca878e | [
"BSD-3-Clause"
] | null | null | null | skfem/__init__.py | HadrienNU/scikit-fem | 39d4cff53790725a865b1f2256dd3358c9ca878e | [
"BSD-3-Clause"
] | null | null | null | """Support for wildcard import."""
from skfem.mesh import * # noqa
from skfem.assembly import * # noqa
from skfem.mapping import * # noqa
from skfem.element import * # noqa
from skfem.utils import * # noqa
from skfem.assembly import __all__ as all_assembly
from skfem.mesh import __all__ as all_mesh
from skfem.element import __all__ as all_element
__all__ = all_mesh + all_assembly + all_element + [ # noqa
'MappingAffine',
'MappingIsoparametric',
'MappingMortar',
'adaptive_theta',
'build_pc_ilu',
'build_pc_diag',
'condense',
'enforce',
'project',
'projection',
'solve',
'solver_direct_scipy',
'solver_eigen_scipy',
'solver_eigen_scipy_sym',
'solver_iter_pcg',
'solver_iter_krylov',
]
| 23.8125 | 59 | 0.692913 |
0594567f6f3e14fa987fbe44bf4bc3ed2ecd1f40 | 22,771 | py | Python | code/trainer.py | Sreerag-ibtl/inference_AttnGan_py3 | 5a172d03f2397b4e541230a36392284f783a9bed | [
"MIT"
] | 1 | 2020-05-15T14:10:42.000Z | 2020-05-15T14:10:42.000Z | code/trainer.py | Sreerag-ibtl/inference_AttnGan_py3 | 5a172d03f2397b4e541230a36392284f783a9bed | [
"MIT"
] | null | null | null | code/trainer.py | Sreerag-ibtl/inference_AttnGan_py3 | 5a172d03f2397b4e541230a36392284f783a9bed | [
"MIT"
] | null | null | null |
from six.moves import range
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
from PIL import Image
from miscc.config import cfg
from miscc.utils import mkdir_p
from miscc.utils import build_super_images, build_super_images2
from miscc.utils import weights_init, load_params, copy_G_params
from model import G_DCGAN, G_NET
from datasets import prepare_data
from model import RNN_ENCODER, CNN_ENCODER
from miscc.losses import words_loss
from miscc.losses import discriminator_loss, generator_loss, KL_loss
import os
import time
import numpy as np
import sys
# ################# Text to image task############################ #
class condGANTrainer(object):
def __init__(self, output_dir, data_loader, n_words, ixtoword):
if cfg.TRAIN.FLAG:
self.model_dir = os.path.join(output_dir, 'Model')
self.image_dir = os.path.join(output_dir, 'Image')
mkdir_p(self.model_dir)
mkdir_p(self.image_dir)
## torch.cuda.set_device(cfg.GPU_ID)
## cudnn.benchmark = True
self.batch_size = cfg.TRAIN.BATCH_SIZE
self.max_epoch = cfg.TRAIN.MAX_EPOCH
self.snapshot_interval = cfg.TRAIN.SNAPSHOT_INTERVAL
self.n_words = n_words
self.ixtoword = ixtoword
self.data_loader = data_loader
self.num_batches = len(self.data_loader)
def build_models(self):
# ###################encoders######################################## #
if cfg.TRAIN.NET_E == '':
print('Error: no pretrained text-image encoders')
return
image_encoder = CNN_ENCODER(cfg.TEXT.EMBEDDING_DIM)
img_encoder_path = cfg.TRAIN.NET_E.replace('text_encoder', 'image_encoder')
state_dict = \
torch.load(img_encoder_path, map_location=lambda storage, loc: storage)
image_encoder.load_state_dict(state_dict)
for p in image_encoder.parameters():
p.requires_grad = False
print('Load image encoder from:', img_encoder_path)
image_encoder.eval()
text_encoder = \
RNN_ENCODER(self.n_words, nhidden=cfg.TEXT.EMBEDDING_DIM)
state_dict = \
torch.load(cfg.TRAIN.NET_E,
map_location=lambda storage, loc: storage)
text_encoder.load_state_dict(state_dict)
for p in text_encoder.parameters():
p.requires_grad = False
print('Load text encoder from:', cfg.TRAIN.NET_E)
text_encoder.eval()
# #######################generator and discriminators############## #
netsD = []
if cfg.GAN.B_DCGAN:
if cfg.TREE.BRANCH_NUM ==1:
from model import D_NET64 as D_NET
elif cfg.TREE.BRANCH_NUM == 2:
from model import D_NET128 as D_NET
else: # cfg.TREE.BRANCH_NUM == 3:
from model import D_NET256 as D_NET
# TODO: elif cfg.TREE.BRANCH_NUM > 3:
netG = G_DCGAN()
netsD = [D_NET(b_jcu=False)]
else:
from model import D_NET64, D_NET128, D_NET256
netG = G_NET()
if cfg.TREE.BRANCH_NUM > 0:
netsD.append(D_NET64())
if cfg.TREE.BRANCH_NUM > 1:
netsD.append(D_NET128())
if cfg.TREE.BRANCH_NUM > 2:
netsD.append(D_NET256())
# TODO: if cfg.TREE.BRANCH_NUM > 3:
netG.apply(weights_init)
# print(netG)
for i in range(len(netsD)):
netsD[i].apply(weights_init)
# print(netsD[i])
print('# of netsD', len(netsD))
#
epoch = 0
if cfg.TRAIN.NET_G != '':
state_dict = \
torch.load(cfg.TRAIN.NET_G, map_location=lambda storage, loc: storage)
netG.load_state_dict(state_dict)
print('Load G from: ', cfg.TRAIN.NET_G)
istart = cfg.TRAIN.NET_G.rfind('_') + 1
iend = cfg.TRAIN.NET_G.rfind('.')
epoch = cfg.TRAIN.NET_G[istart:iend]
epoch = int(epoch) + 1
if cfg.TRAIN.B_NET_D:
Gname = cfg.TRAIN.NET_G
for i in range(len(netsD)):
s_tmp = Gname[:Gname.rfind('/')]
Dname = '%s/netD%d.pth' % (s_tmp, i)
print('Load D from: ', Dname)
state_dict = \
torch.load(Dname, map_location=lambda storage, loc: storage)
netsD[i].load_state_dict(state_dict)
# ########################################################### #
if cfg.CUDA:
text_encoder = text_encoder.cuda()
image_encoder = image_encoder.cuda()
netG.cuda()
for i in range(len(netsD)):
netsD[i].cuda()
return [text_encoder, image_encoder, netG, netsD, epoch]
def define_optimizers(self, netG, netsD):
optimizersD = []
num_Ds = len(netsD)
for i in range(num_Ds):
opt = optim.Adam(netsD[i].parameters(),
lr=cfg.TRAIN.DISCRIMINATOR_LR,
betas=(0.5, 0.999))
optimizersD.append(opt)
optimizerG = optim.Adam(netG.parameters(),
lr=cfg.TRAIN.GENERATOR_LR,
betas=(0.5, 0.999))
return optimizerG, optimizersD
def prepare_labels(self):
batch_size = self.batch_size
real_labels = Variable(torch.FloatTensor(batch_size).fill_(1))
fake_labels = Variable(torch.FloatTensor(batch_size).fill_(0))
match_labels = Variable(torch.LongTensor(list(range(batch_size))))
if cfg.CUDA:
real_labels = real_labels.cuda()
fake_labels = fake_labels.cuda()
match_labels = match_labels.cuda()
return real_labels, fake_labels, match_labels
def save_model(self, netG, avg_param_G, netsD, epoch):
backup_para = copy_G_params(netG)
load_params(netG, avg_param_G)
torch.save(netG.state_dict(),
'%s/netG_epoch_%d.pth' % (self.model_dir, epoch))
load_params(netG, backup_para)
#
for i in range(len(netsD)):
netD = netsD[i]
torch.save(netD.state_dict(),
'%s/netD%d.pth' % (self.model_dir, i))
print('Save G/Ds models.')
def set_requires_grad_value(self, models_list, brequires):
for i in range(len(models_list)):
for p in models_list[i].parameters():
p.requires_grad = brequires
def save_img_results(self, netG, noise, sent_emb, words_embs, mask,
image_encoder, captions, cap_lens,
gen_iterations, name='current'):
# Save images
fake_imgs, attention_maps, _, _ = netG(noise, sent_emb, words_embs, mask)
for i in range(len(attention_maps)):
if len(fake_imgs) > 1:
img = fake_imgs[i + 1].detach().cpu()
lr_img = fake_imgs[i].detach().cpu()
else:
img = fake_imgs[0].detach().cpu()
lr_img = None
attn_maps = attention_maps[i]
att_sze = attn_maps.size(2)
img_set, _ = \
build_super_images(img, captions, self.ixtoword,
attn_maps, att_sze, lr_imgs=lr_img)
if img_set is not None:
im = Image.fromarray(img_set)
fullpath = '%s/G_%s_%d_%d.png'\
% (self.image_dir, name, gen_iterations, i)
im.save(fullpath)
# for i in range(len(netsD)):
i = -1
img = fake_imgs[i].detach()
region_features, _ = image_encoder(img)
att_sze = region_features.size(2)
_, _, att_maps = words_loss(region_features.detach(),
words_embs.detach(),
None, cap_lens,
None, self.batch_size)
img_set, _ = \
build_super_images(fake_imgs[i].detach().cpu(),
captions, self.ixtoword, att_maps, att_sze)
if img_set is not None:
im = Image.fromarray(img_set)
fullpath = '%s/D_%s_%d.png'\
% (self.image_dir, name, gen_iterations)
im.save(fullpath)
def train(self):
text_encoder, image_encoder, netG, netsD, start_epoch = self.build_models()
avg_param_G = copy_G_params(netG)
optimizerG, optimizersD = self.define_optimizers(netG, netsD)
real_labels, fake_labels, match_labels = self.prepare_labels()
batch_size = self.batch_size
nz = cfg.GAN.Z_DIM
noise = Variable(torch.FloatTensor(batch_size, nz))
fixed_noise = Variable(torch.FloatTensor(batch_size, nz).normal_(0, 1))
if cfg.CUDA:
noise, fixed_noise = noise.cuda(), fixed_noise.cuda()
gen_iterations = 0
# gen_iterations = start_epoch * self.num_batches
for epoch in range(start_epoch, self.max_epoch):
start_t = time.time()
data_iter = iter(self.data_loader)
step = 0
while step < self.num_batches:
# reset requires_grad to be trainable for all Ds
# self.set_requires_grad_value(netsD, True)
######################################################
# (1) Prepare training data and Compute text embeddings
######################################################
data = next(data_iter)
imgs, captions, cap_lens, class_ids, keys = prepare_data(data)
hidden = text_encoder.init_hidden(batch_size)
# words_embs: batch_size x nef x seq_len
# sent_emb: batch_size x nef
words_embs, sent_emb = text_encoder(captions, cap_lens, hidden)
words_embs, sent_emb = words_embs.detach(), sent_emb.detach()
mask = (captions == 0)
num_words = words_embs.size(2)
if mask.size(1) > num_words:
mask = mask[:, :num_words]
#######################################################
# (2) Generate fake images
######################################################
noise.data.normal_(0, 1)
fake_imgs, _, mu, logvar = netG(noise, sent_emb, words_embs, mask)
#######################################################
# (3) Update D network
######################################################
errD_total = 0
D_logs = ''
for i in range(len(netsD)):
netsD[i].zero_grad()
errD = discriminator_loss(netsD[i], imgs[i], fake_imgs[i],
sent_emb, real_labels, fake_labels)
# backward and update parameters
errD.backward()
optimizersD[i].step()
errD_total += errD
D_logs += 'errD%d: %.2f ' % (i, errD.data[0])
#######################################################
# (4) Update G network: maximize log(D(G(z)))
######################################################
# compute total loss for training G
step += 1
gen_iterations += 1
# do not need to compute gradient for Ds
# self.set_requires_grad_value(netsD, False)
netG.zero_grad()
errG_total, G_logs = \
generator_loss(netsD, image_encoder, fake_imgs, real_labels,
words_embs, sent_emb, match_labels, cap_lens, class_ids)
kl_loss = KL_loss(mu, logvar)
errG_total += kl_loss
G_logs += 'kl_loss: %.2f ' % kl_loss.data[0]
# backward and update parameters
errG_total.backward()
optimizerG.step()
for p, avg_p in zip(netG.parameters(), avg_param_G):
avg_p.mul_(0.999).add_(0.001, p.data)
if gen_iterations % 100 == 0:
print(D_logs + '\n' + G_logs)
# save images
if gen_iterations % 1000 == 0:
backup_para = copy_G_params(netG)
load_params(netG, avg_param_G)
self.save_img_results(netG, fixed_noise, sent_emb,
words_embs, mask, image_encoder,
captions, cap_lens, epoch, name='average')
load_params(netG, backup_para)
#
# self.save_img_results(netG, fixed_noise, sent_emb,
# words_embs, mask, image_encoder,
# captions, cap_lens,
# epoch, name='current')
end_t = time.time()
print('''[%d/%d][%d]
Loss_D: %.2f Loss_G: %.2f Time: %.2fs'''
% (epoch, self.max_epoch, self.num_batches,
errD_total.data[0], errG_total.data[0],
end_t - start_t))
if epoch % cfg.TRAIN.SNAPSHOT_INTERVAL == 0: # and epoch != 0:
self.save_model(netG, avg_param_G, netsD, epoch)
self.save_model(netG, avg_param_G, netsD, self.max_epoch)
def save_singleimages(self, images, filenames, save_dir,
split_dir, sentenceID=0):
for i in range(images.size(0)):
s_tmp = '%s/single_samples/%s/%s' %\
(save_dir, split_dir, filenames[i])
folder = s_tmp[:s_tmp.rfind('/')]
if not os.path.isdir(folder):
print('Make a new folder: ', folder)
mkdir_p(folder)
fullpath = '%s_%d.jpg' % (s_tmp, sentenceID)
# range from [-1, 1] to [0, 1]
# img = (images[i] + 1.0) / 2
img = images[i].add(1).div(2).mul(255).clamp(0, 255).byte()
# range from [0, 1] to [0, 255]
ndarr = img.permute(1, 2, 0).data.cpu().numpy()
im = Image.fromarray(ndarr)
im.save(fullpath)
def sampling(self, split_dir):
if cfg.TRAIN.NET_G == '':
print('Error: the path for morels is not found!')
else:
if split_dir == 'test':
split_dir = 'valid'
# Build and load the generator
if cfg.GAN.B_DCGAN:
netG = G_DCGAN()
else:
netG = G_NET()
netG.apply(weights_init)
netG.cuda()
netG.eval()
#
text_encoder = RNN_ENCODER(self.n_words, nhidden=cfg.TEXT.EMBEDDING_DIM)
state_dict = \
torch.load(cfg.TRAIN.NET_E, map_location=lambda storage, loc: storage)
text_encoder.load_state_dict(state_dict)
print('Load text encoder from:', cfg.TRAIN.NET_E)
text_encoder = text_encoder.cuda()
text_encoder.eval()
batch_size = self.batch_size
nz = cfg.GAN.Z_DIM
noise = Variable(torch.FloatTensor(batch_size, nz), volatile=True)
noise = noise.cuda()
model_dir = cfg.TRAIN.NET_G
state_dict = \
torch.load(model_dir, map_location=lambda storage, loc: storage)
# state_dict = torch.load(cfg.TRAIN.NET_G)
netG.load_state_dict(state_dict)
print('Load G from: ', model_dir)
# the path to save generated images
s_tmp = model_dir[:model_dir.rfind('.pth')]
save_dir = '%s/%s' % (s_tmp, split_dir)
mkdir_p(save_dir)
cnt = 0
for _ in range(1): # (cfg.TEXT.CAPTIONS_PER_IMAGE):
for step, data in enumerate(self.data_loader, 0):
cnt += batch_size
if step % 100 == 0:
print('step: ', step)
# if step > 50:
# break
imgs, captions, cap_lens, class_ids, keys = prepare_data(data)
hidden = text_encoder.init_hidden(batch_size)
# words_embs: batch_size x nef x seq_len
# sent_emb: batch_size x nef
words_embs, sent_emb = text_encoder(captions, cap_lens, hidden)
words_embs, sent_emb = words_embs.detach(), sent_emb.detach()
mask = (captions == 0)
num_words = words_embs.size(2)
if mask.size(1) > num_words:
mask = mask[:, :num_words]
#######################################################
# (2) Generate fake images
######################################################
noise.data.normal_(0, 1)
fake_imgs, _, _, _ = netG(noise, sent_emb, words_embs, mask)
for j in range(batch_size):
s_tmp = '%s/single/%s' % (save_dir, keys[j])
folder = s_tmp[:s_tmp.rfind('/')]
if not os.path.isdir(folder):
print('Make a new folder: ', folder)
mkdir_p(folder)
k = -1
# for k in range(len(fake_imgs)):
im = fake_imgs[k][j].data.cpu().numpy()
# [-1, 1] --> [0, 255]
im = (im + 1.0) * 127.5
im = im.astype(np.uint8)
im = np.transpose(im, (1, 2, 0))
im = Image.fromarray(im)
fullpath = '%s_s%d.png' % (s_tmp, k)
im.save(fullpath)
def gen_example(self, data_dic):
if cfg.TRAIN.NET_G == '':
print('Error: the path for morels is not found!')
else:
# Build and load the generator
text_encoder = \
RNN_ENCODER(self.n_words, nhidden=cfg.TEXT.EMBEDDING_DIM)
state_dict = \
torch.load(cfg.TRAIN.NET_E, map_location=lambda storage, loc: storage)
text_encoder.load_state_dict(state_dict)
print('Load text encoder from:', cfg.TRAIN.NET_E)
#text_encoder = text_encoder.cuda()
text_encoder.eval()
# the path to save generated images
if cfg.GAN.B_DCGAN:
netG = G_DCGAN()
else:
netG = G_NET()
s_tmp = cfg.TRAIN.NET_G[:cfg.TRAIN.NET_G.rfind('.pth')]
model_dir = cfg.TRAIN.NET_G
state_dict = \
torch.load(model_dir, map_location=lambda storage, loc: storage)
netG.load_state_dict(state_dict)
print('Load G from: ', model_dir)
#netG.cuda()
netG.eval()
for key in data_dic:
save_dir = '%s/%s' % (s_tmp, key)
mkdir_p(save_dir)
captions, cap_lens, sorted_indices = data_dic[key]
batch_size = captions.shape[0]
print(batch_size)
nz = cfg.GAN.Z_DIM
captions = Variable(torch.from_numpy(captions), volatile=True)
cap_lens = Variable(torch.from_numpy(cap_lens), volatile=True)
## captions = captions.cuda()
## cap_lens = cap_lens.cuda()
for i in range(1): # 16
noise = Variable(torch.FloatTensor(batch_size, nz), volatile=True)
## noise = noise.cuda()
#######################################################
# (1) Extract text embeddings
######################################################
hidden = text_encoder.init_hidden(batch_size)
# words_embs: batch_size x nef x seq_len
# sent_emb: batch_size x nef
words_embs, sent_emb = text_encoder(captions, cap_lens, hidden)
mask = (captions == 0)
#######################################################
# (2) Generate fake images
######################################################
noise.data.normal_(0, 1)
fake_imgs, attention_maps, _, _ = netG(noise, sent_emb, words_embs, mask)
# G attention
cap_lens_np = cap_lens.cpu().data.numpy()
for j in range(batch_size):
save_name = '%s/%d_s_%d' % (save_dir, i, sorted_indices[j])
for k in range(len(fake_imgs)):
im = fake_imgs[k][j].data.cpu().numpy()
im = (im + 1.0) * 127.5
im = im.astype(np.uint8)
# print('im', im.shape)
im = np.transpose(im, (1, 2, 0))
# print('im', im.shape)
im = Image.fromarray(im)
fullpath = '%s_g%d.png' % (save_name, k)
im.save(fullpath)
## for k in range(len(attention_maps)):
## if len(fake_imgs) > 1:
## im = fake_imgs[k + 1].detach().cpu()
## else:
## im = fake_imgs[0].detach().cpu()
## attn_maps = attention_maps[k]
## att_sze = attn_maps.size(2)
## img_set, sentences = \
## build_super_images2(im[j].unsqueeze(0),
## captions[j].unsqueeze(0),
## [cap_lens_np[j]], self.ixtoword,
## [attn_maps[j]], att_sze)
## if img_set is not None:
## im = Image.fromarray(img_set)
## fullpath = '%s_a%d.png' % (save_name, k)
## im.save(fullpath)
| 43.874759 | 93 | 0.481534 |
09d38588578056e82493c5fb56a2fd0ddfa44f36 | 87,040 | py | Python | cinder/volume/drivers/pure.py | ilay09/cinder | 86f084d42f18bd5971cc7a0df3e6d815543a472d | [
"Apache-2.0"
] | null | null | null | cinder/volume/drivers/pure.py | ilay09/cinder | 86f084d42f18bd5971cc7a0df3e6d815543a472d | [
"Apache-2.0"
] | null | null | null | cinder/volume/drivers/pure.py | ilay09/cinder | 86f084d42f18bd5971cc7a0df3e6d815543a472d | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2014 Pure Storage, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver for Pure Storage FlashArray storage system.
This driver requires Purity version 4.0.0 or later.
"""
import functools
import math
import platform
import re
import uuid
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
import six
from cinder import context
from cinder import exception
from cinder.i18n import _
from cinder import interface
from cinder.objects import fields
from cinder import utils
from cinder.volume import driver
from cinder.volume.drivers.san import san
from cinder.volume import utils as volume_utils
from cinder.volume import volume_types
from cinder.zonemanager import utils as fczm_utils
try:
from purestorage import purestorage
except ImportError:
purestorage = None
LOG = logging.getLogger(__name__)
PURE_OPTS = [
cfg.StrOpt("pure_api_token",
help="REST API authorization token."),
cfg.BoolOpt("pure_automatic_max_oversubscription_ratio",
default=True,
help="Automatically determine an oversubscription ratio based "
"on the current total data reduction values. If used "
"this calculated value will override the "
"max_over_subscription_ratio config option."),
# These are used as default settings. In future these can be overridden
# by settings in volume-type.
cfg.IntOpt("pure_replica_interval_default", default=900,
help="Snapshot replication interval in seconds."),
cfg.IntOpt("pure_replica_retention_short_term_default", default=14400,
help="Retain all snapshots on target for this "
"time (in seconds.)"),
cfg.IntOpt("pure_replica_retention_long_term_per_day_default", default=3,
help="Retain how many snapshots for each day."),
cfg.IntOpt("pure_replica_retention_long_term_default", default=7,
help="Retain snapshots per day on target for this time "
"(in days.)"),
cfg.BoolOpt("pure_eradicate_on_delete",
default=False,
help="When enabled, all Pure volumes, snapshots, and "
"protection groups will be eradicated at the time of "
"deletion in Cinder. Data will NOT be recoverable after "
"a delete with this set to True! When disabled, volumes "
"and snapshots will go into pending eradication state "
"and can be recovered."
)
]
CONF = cfg.CONF
CONF.register_opts(PURE_OPTS)
INVALID_CHARACTERS = re.compile(r"[^-a-zA-Z0-9]")
GENERATED_NAME = re.compile(r".*-[a-f0-9]{32}-cinder$")
REPLICATION_CG_NAME = "cinder-group"
CHAP_SECRET_KEY = "PURE_TARGET_CHAP_SECRET"
ERR_MSG_NOT_EXIST = "does not exist"
ERR_MSG_HOST_NOT_EXIST = "Host " + ERR_MSG_NOT_EXIST
ERR_MSG_NO_SUCH_SNAPSHOT = "No such volume or snapshot"
ERR_MSG_PENDING_ERADICATION = "has been destroyed"
ERR_MSG_ALREADY_EXISTS = "already exists"
ERR_MSG_COULD_NOT_BE_FOUND = "could not be found"
ERR_MSG_ALREADY_INCLUDES = "already includes"
ERR_MSG_ALREADY_ALLOWED = "already allowed on"
ERR_MSG_NOT_CONNECTED = "is not connected"
ERR_MSG_ALREADY_BELONGS = "already belongs to"
ERR_MSG_EXISTING_CONNECTIONS = "cannot be deleted due to existing connections"
ERR_MSG_ALREADY_IN_USE = "already in use"
EXTRA_SPECS_REPL_ENABLED = "replication_enabled"
UNMANAGED_SUFFIX = '-unmanaged'
MANAGE_SNAP_REQUIRED_API_VERSIONS = ['1.4', '1.5']
REPLICATION_REQUIRED_API_VERSIONS = ['1.3', '1.4', '1.5']
REPL_SETTINGS_PROPAGATE_RETRY_INTERVAL = 5 # 5 seconds
REPL_SETTINGS_PROPAGATE_MAX_RETRIES = 36 # 36 * 5 = 180 seconds
HOST_CREATE_MAX_RETRIES = 5
USER_AGENT_BASE = 'OpenStack Cinder'
def pure_driver_debug_trace(f):
"""Log the method entrance and exit including active backend name.
This should only be used on VolumeDriver class methods. It depends on
having a 'self' argument that is a PureBaseVolumeDriver.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
driver = args[0] # self
cls_name = driver.__class__.__name__
method_name = "%(cls_name)s.%(method)s" % {"cls_name": cls_name,
"method": f.__name__}
backend_name = driver._get_current_array()._backend_id
LOG.debug("[%(backend_name)s] Enter %(method_name)s" %
{"method_name": method_name, "backend_name": backend_name})
result = f(*args, **kwargs)
LOG.debug("[%(backend_name)s] Leave %(method_name)s" %
{"method_name": method_name, "backend_name": backend_name})
return result
return wrapper
class PureBaseVolumeDriver(san.SanDriver):
"""Performs volume management on Pure Storage FlashArray."""
SUPPORTED_REST_API_VERSIONS = ['1.2', '1.3', '1.4', '1.5']
# ThirdPartySystems wiki page
CI_WIKI_NAME = "Pure_Storage_CI"
def __init__(self, *args, **kwargs):
execute = kwargs.pop("execute", utils.execute)
super(PureBaseVolumeDriver, self).__init__(execute=execute, *args,
**kwargs)
self.configuration.append_config_values(PURE_OPTS)
self._array = None
self._storage_protocol = None
self._backend_name = (self.configuration.volume_backend_name or
self.__class__.__name__)
self._replication_target_arrays = []
self._replication_pg_name = REPLICATION_CG_NAME
self._replication_interval = None
self._replication_retention_short_term = None
self._replication_retention_long_term = None
self._replication_retention_long_term_per_day = None
self._is_replication_enabled = False
self._active_backend_id = kwargs.get('active_backend_id', None)
self._failed_over_primary_array = None
self._user_agent = '%(base)s %(class)s/%(version)s (%(platform)s)' % {
'base': USER_AGENT_BASE,
'class': self.__class__.__name__,
'version': self.VERSION,
'platform': platform.platform()
}
def parse_replication_configs(self):
self._replication_interval = (
self.configuration.pure_replica_interval_default)
self._replication_retention_short_term = (
self.configuration.pure_replica_retention_short_term_default)
self._replication_retention_long_term = (
self.configuration.pure_replica_retention_long_term_default)
self._replication_retention_long_term_per_day = (
self.configuration.
pure_replica_retention_long_term_per_day_default)
retention_policy = self._generate_replication_retention()
replication_devices = self.configuration.safe_get(
'replication_device')
primary_array = self._get_current_array()
if replication_devices:
for replication_device in replication_devices:
backend_id = replication_device["backend_id"]
san_ip = replication_device["san_ip"]
api_token = replication_device["api_token"]
verify_https = replication_device.get("ssl_cert_verify", False)
ssl_cert_path = replication_device.get("ssl_cert_path", None)
target_array = self._get_flasharray(
san_ip,
api_token,
verify_https=verify_https,
ssl_cert_path=ssl_cert_path
)
target_array._backend_id = backend_id
LOG.debug("Adding san_ip %(san_ip)s to replication_targets.",
{"san_ip": san_ip})
api_version = target_array.get_rest_version()
if api_version not in REPLICATION_REQUIRED_API_VERSIONS:
msg = _('Unable to do replication with Purity REST '
'API version %(api_version)s, requires one of '
'%(required_versions)s.') % {
'api_version': api_version,
'required_versions': REPLICATION_REQUIRED_API_VERSIONS
}
raise exception.PureDriverException(reason=msg)
target_array_info = target_array.get()
target_array.array_name = target_array_info["array_name"]
target_array.array_id = target_array_info["id"]
LOG.debug("secondary array name: %s", target_array.array_name)
LOG.debug("secondary array id: %s", target_array.array_id)
self._replication_target_arrays.append(target_array)
self._setup_replicated_pgroups(primary_array,
self._replication_target_arrays,
self._replication_pg_name,
self._replication_interval,
retention_policy)
def do_setup(self, context):
"""Performs driver initialization steps that could raise exceptions."""
if purestorage is None:
msg = _("Missing 'purestorage' python module, ensure the library"
" is installed and available.")
raise exception.PureDriverException(msg)
# Raises PureDriverException if unable to connect and PureHTTPError
# if unable to authenticate.
purestorage.FlashArray.supported_rest_versions = \
self.SUPPORTED_REST_API_VERSIONS
self._array = self._get_flasharray(
self.configuration.san_ip,
api_token=self.configuration.pure_api_token,
verify_https=self.configuration.driver_ssl_cert_verify,
ssl_cert_path=self.configuration.driver_ssl_cert_path
)
self._array._backend_id = self._backend_name
LOG.debug("Primary array backend_id: %s",
self.configuration.config_group)
LOG.debug("Primary array name: %s", self._array.array_name)
LOG.debug("Primary array id: %s", self._array.array_id)
self.do_setup_replication()
# If we have failed over at some point we need to adjust our current
# array based on the one that we have failed over to
if (self._active_backend_id is not None and
self._active_backend_id != self._array._backend_id):
for array in self._replication_target_arrays:
if array._backend_id == self._active_backend_id:
self._failed_over_primary_array = self._array
self._array = array
break
def do_setup_replication(self):
replication_devices = self.configuration.safe_get(
'replication_device')
if replication_devices:
self.parse_replication_configs()
self._is_replication_enabled = True
def check_for_setup_error(self):
# Avoid inheriting check_for_setup_error from SanDriver, which checks
# for san_password or san_private_key, not relevant to our driver.
pass
@pure_driver_debug_trace
def create_volume(self, volume):
"""Creates a volume."""
vol_name = self._get_vol_name(volume)
vol_size = volume["size"] * units.Gi
current_array = self._get_current_array()
current_array.create_volume(vol_name, vol_size)
self._add_to_group_if_needed(volume, vol_name)
self._enable_replication_if_needed(current_array, volume)
@pure_driver_debug_trace
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
vol_name = self._get_vol_name(volume)
if snapshot['group_snapshot'] or snapshot['cgsnapshot']:
snap_name = self._get_pgroup_snap_name_from_snapshot(snapshot)
else:
snap_name = self._get_snap_name(snapshot)
if not snap_name:
msg = _('Unable to determine snapshot name in Purity for snapshot '
'%(id)s.') % {'id': snapshot['id']}
raise exception.PureDriverException(reason=msg)
current_array = self._get_current_array()
current_array.copy_volume(snap_name, vol_name)
self._extend_if_needed(current_array,
vol_name,
snapshot["volume_size"],
volume["size"])
self._add_to_group_if_needed(volume, vol_name)
self._enable_replication_if_needed(current_array, volume)
def _enable_replication_if_needed(self, array, volume):
if self._is_volume_replicated_type(volume):
self._enable_replication(array, volume)
def _enable_replication(self, array, volume):
"""Add volume to replicated protection group."""
try:
array.set_pgroup(self._replication_pg_name,
addvollist=[self._get_vol_name(volume)])
except purestorage.PureHTTPError as err:
with excutils.save_and_reraise_exception() as ctxt:
if (err.code == 400 and
ERR_MSG_ALREADY_BELONGS in err.text):
# Happens if the volume already added to PG.
ctxt.reraise = False
LOG.warning("Adding Volume to Protection Group "
"failed with message: %s", err.text)
@pure_driver_debug_trace
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
vol_name = self._get_vol_name(volume)
src_name = self._get_vol_name(src_vref)
# Check which backend the source volume is on. In case of failover
# the source volume may be on the secondary array.
current_array = self._get_current_array()
current_array.copy_volume(src_name, vol_name)
self._extend_if_needed(current_array,
vol_name,
src_vref["size"],
volume["size"])
self._add_to_group_if_needed(volume, vol_name)
self._enable_replication_if_needed(current_array, volume)
def _extend_if_needed(self, array, vol_name, src_size, vol_size):
"""Extend the volume from size src_size to size vol_size."""
if vol_size > src_size:
vol_size = vol_size * units.Gi
array.extend_volume(vol_name, vol_size)
@pure_driver_debug_trace
def delete_volume(self, volume):
"""Disconnect all hosts and delete the volume"""
vol_name = self._get_vol_name(volume)
current_array = self._get_current_array()
try:
connected_hosts = current_array.list_volume_private_connections(
vol_name)
for host_info in connected_hosts:
host_name = host_info["host"]
self._disconnect_host(current_array, host_name, vol_name)
current_array.destroy_volume(vol_name)
if self.configuration.pure_eradicate_on_delete:
current_array.eradicate_volume(vol_name)
except purestorage.PureHTTPError as err:
with excutils.save_and_reraise_exception() as ctxt:
if (err.code == 400 and
ERR_MSG_NOT_EXIST in err.text):
# Happens if the volume does not exist.
ctxt.reraise = False
LOG.warning("Volume deletion failed with message: %s",
err.text)
@pure_driver_debug_trace
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
# Get current array in case we have failed over via replication.
current_array = self._get_current_array()
vol_name, snap_suff = self._get_snap_name(snapshot).split(".")
current_array.create_snapshot(vol_name, suffix=snap_suff)
@pure_driver_debug_trace
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
# Get current array in case we have failed over via replication.
current_array = self._get_current_array()
snap_name = self._get_snap_name(snapshot)
try:
current_array.destroy_volume(snap_name)
if self.configuration.pure_eradicate_on_delete:
current_array.eradicate_volume(snap_name)
except purestorage.PureHTTPError as err:
with excutils.save_and_reraise_exception() as ctxt:
if err.code == 400 and (
ERR_MSG_NOT_EXIST in err.text or
ERR_MSG_NO_SUCH_SNAPSHOT in err.text or
ERR_MSG_PENDING_ERADICATION in err.text):
# Happens if the snapshot does not exist.
ctxt.reraise = False
LOG.warning("Unable to delete snapshot, assuming "
"already deleted. Error: %s", err.text)
def ensure_export(self, context, volume):
pass
def create_export(self, context, volume, connector):
pass
def initialize_connection(self, volume, connector):
"""Connect the volume to the specified initiator in Purity.
This implementation is specific to the host type (iSCSI, FC, etc).
"""
raise NotImplementedError
def _get_host(self, array, connector):
"""Get a Purity Host that corresponds to the host in the connector.
This implementation is specific to the host type (iSCSI, FC, etc).
"""
raise NotImplementedError
def _disconnect(self, array, volume, connector, **kwargs):
vol_name = self._get_vol_name(volume)
host = self._get_host(array, connector)
if host:
host_name = host["name"]
result = self._disconnect_host(array, host_name, vol_name)
else:
LOG.error("Unable to disconnect host from volume, could not "
"determine Purity host")
result = False
return result
@pure_driver_debug_trace
def terminate_connection(self, volume, connector, **kwargs):
"""Terminate connection."""
# Get current array in case we have failed over via replication.
current_array = self._get_current_array()
self._disconnect(current_array, volume, connector, **kwargs)
@pure_driver_debug_trace
def _disconnect_host(self, array, host_name, vol_name):
"""Return value indicates if host should be cleaned up."""
try:
array.disconnect_host(host_name, vol_name)
except purestorage.PureHTTPError as err:
with excutils.save_and_reraise_exception() as ctxt:
if err.code == 400 and ERR_MSG_NOT_CONNECTED in err.text:
# Happens if the host and volume are not connected.
ctxt.reraise = False
LOG.error("Disconnection failed with message: "
"%(msg)s.", {"msg": err.text})
connections = None
try:
connections = array.list_host_connections(host_name, private=True)
except purestorage.PureHTTPError as err:
with excutils.save_and_reraise_exception() as ctxt:
if err.code == 400 and ERR_MSG_NOT_EXIST in err.text:
ctxt.reraise = False
# Assume still used if volumes are attached
host_still_used = bool(connections)
if GENERATED_NAME.match(host_name) and not host_still_used:
LOG.info("Attempting to delete unneeded host %(host_name)r.",
{"host_name": host_name})
try:
array.delete_host(host_name)
host_still_used = False
except purestorage.PureHTTPError as err:
with excutils.save_and_reraise_exception() as ctxt:
if err.code == 400:
if ERR_MSG_NOT_EXIST in err.text:
# Happens if the host is already deleted.
# This is fine though, just log so we know what
# happened.
ctxt.reraise = False
host_still_used = False
LOG.debug("Purity host deletion failed: "
"%(msg)s.", {"msg": err.text})
if ERR_MSG_EXISTING_CONNECTIONS in err.text:
# If someone added a connection underneath us
# that's ok, just keep going.
ctxt.reraise = False
host_still_used = True
LOG.debug("Purity host deletion ignored: %(msg)s",
{"msg": err.text})
return not host_still_used
@pure_driver_debug_trace
def get_volume_stats(self, refresh=False):
"""Return the current state of the volume service.
If 'refresh' is True, run the update first.
"""
if refresh:
LOG.debug("Updating volume stats.")
self._update_stats()
return self._stats
def _update_stats(self):
"""Set self._stats with relevant information."""
current_array = self._get_current_array()
# Collect info from the array
space_info = current_array.get(space=True)
perf_info = current_array.get(action='monitor')[0] # Always index 0
hosts = current_array.list_hosts()
snaps = current_array.list_volumes(snap=True, pending=True)
pgroups = current_array.list_pgroups(pending=True)
# Perform some translations and calculations
total_capacity = float(space_info["capacity"]) / units.Gi
used_space = float(space_info["total"]) / units.Gi
free_space = float(total_capacity - used_space)
prov_space, total_vols = self._get_provisioned_space()
total_hosts = len(hosts)
total_snaps = len(snaps)
total_pgroups = len(pgroups)
provisioned_space = float(prov_space) / units.Gi
thin_provisioning = self._get_thin_provisioning(provisioned_space,
used_space)
# Start with some required info
data = dict(
volume_backend_name=self._backend_name,
vendor_name='Pure Storage',
driver_version=self.VERSION,
storage_protocol=self._storage_protocol,
)
# Add flags for supported features
data['consistencygroup_support'] = True
data['thin_provisioning_support'] = True
data['multiattach'] = False
data['QoS_support'] = False
# Add capacity info for scheduler
data['total_capacity_gb'] = total_capacity
data['free_capacity_gb'] = free_space
data['reserved_percentage'] = self.configuration.reserved_percentage
data['provisioned_capacity'] = provisioned_space
data['max_over_subscription_ratio'] = thin_provisioning
# Add the filtering/goodness functions
data['filter_function'] = self.get_filter_function()
data['goodness_function'] = self.get_goodness_function()
# Add array metadata counts for filtering and weighing functions
data['total_volumes'] = total_vols
data['total_snapshots'] = total_snaps
data['total_hosts'] = total_hosts
data['total_pgroups'] = total_pgroups
# Add performance stats for filtering and weighing functions
# IOPS
data['writes_per_sec'] = perf_info['writes_per_sec']
data['reads_per_sec'] = perf_info['reads_per_sec']
# Bandwidth
data['input_per_sec'] = perf_info['input_per_sec']
data['output_per_sec'] = perf_info['output_per_sec']
# Latency
data['usec_per_read_op'] = perf_info['usec_per_read_op']
data['usec_per_write_op'] = perf_info['usec_per_write_op']
data['queue_depth'] = perf_info['queue_depth']
# Replication
data["replication_enabled"] = self._is_replication_enabled
data["replication_type"] = ["async"]
data["replication_count"] = len(self._replication_target_arrays)
data["replication_targets"] = [array._backend_id for array
in self._replication_target_arrays]
self._stats = data
def _get_provisioned_space(self):
"""Sum up provisioned size of all volumes on array"""
volumes = self._get_current_array().list_volumes(pending=True)
return sum(item["size"] for item in volumes), len(volumes)
def _get_thin_provisioning(self, provisioned_space, used_space):
"""Get the current value for the thin provisioning ratio.
If pure_automatic_max_oversubscription_ratio is True we will calculate
a value, if not we will respect the configuration option for the
max_over_subscription_ratio.
"""
if (self.configuration.pure_automatic_max_oversubscription_ratio and
used_space != 0 and provisioned_space != 0):
# If array is empty we can not calculate a max oversubscription
# ratio. In this case we look to the config option as a starting
# point. Once some volumes are actually created and some data is
# stored on the array a much more accurate number will be
# presented based on current usage.
thin_provisioning = provisioned_space / used_space
else:
thin_provisioning = self.configuration.max_over_subscription_ratio
return thin_provisioning
@pure_driver_debug_trace
def extend_volume(self, volume, new_size):
"""Extend volume to new_size."""
# Get current array in case we have failed over via replication.
current_array = self._get_current_array()
vol_name = self._get_vol_name(volume)
new_size = new_size * units.Gi
current_array.extend_volume(vol_name, new_size)
def _add_volume_to_consistency_group(self, group_id, vol_name):
pgroup_name = self._get_pgroup_name_from_id(group_id)
current_array = self._get_current_array()
current_array.set_pgroup(pgroup_name, addvollist=[vol_name])
@pure_driver_debug_trace
def create_consistencygroup(self, context, group):
"""Creates a consistencygroup."""
current_array = self._get_current_array()
current_array.create_pgroup(self._get_pgroup_name_from_id(group.id))
model_update = {'status': fields.ConsistencyGroupStatus.AVAILABLE}
return model_update
def _create_cg_from_cgsnap(self, volumes, snapshots):
"""Creates a new consistency group from a cgsnapshot.
The new volumes will be consistent with the snapshot.
"""
for volume, snapshot in zip(volumes, snapshots):
self.create_volume_from_snapshot(volume, snapshot)
def _create_cg_from_cg(self, group, source_group, volumes, source_vols):
"""Creates a new consistency group from an existing cg.
The new volumes will be in a consistent state, but this requires
taking a new temporary group snapshot and cloning from that.
"""
pgroup_name = self._get_pgroup_name_from_id(source_group.id)
tmp_suffix = '%s-tmp' % uuid.uuid4()
tmp_pgsnap_name = '%(pgroup_name)s.%(pgsnap_suffix)s' % {
'pgroup_name': pgroup_name,
'pgsnap_suffix': tmp_suffix,
}
LOG.debug('Creating temporary Protection Group snapshot %(snap_name)s '
'while cloning Consistency Group %(source_group)s.',
{'snap_name': tmp_pgsnap_name,
'source_group': source_group.id})
current_array = self._get_current_array()
current_array.create_pgroup_snapshot(pgroup_name, suffix=tmp_suffix)
try:
for source_vol, cloned_vol in zip(source_vols, volumes):
source_snap_name = self._get_pgroup_vol_snap_name(
pgroup_name,
tmp_suffix,
self._get_vol_name(source_vol)
)
cloned_vol_name = self._get_vol_name(cloned_vol)
current_array.copy_volume(source_snap_name, cloned_vol_name)
self._add_volume_to_consistency_group(
group.id,
cloned_vol_name
)
finally:
self._delete_pgsnapshot(tmp_pgsnap_name)
@pure_driver_debug_trace
def create_consistencygroup_from_src(self, context, group, volumes,
cgsnapshot=None, snapshots=None,
source_cg=None, source_vols=None):
self.create_consistencygroup(context, group)
if cgsnapshot and snapshots:
self._create_cg_from_cgsnap(volumes,
snapshots)
elif source_cg:
self._create_cg_from_cg(group, source_cg, volumes, source_vols)
return None, None
@pure_driver_debug_trace
def delete_consistencygroup(self, context, group, volumes):
"""Deletes a consistency group."""
try:
pgroup_name = self._get_pgroup_name_from_id(group.id)
current_array = self._get_current_array()
current_array.destroy_pgroup(pgroup_name)
if self.configuration.pure_eradicate_on_delete:
current_array.eradicate_pgroup(pgroup_name)
except purestorage.PureHTTPError as err:
with excutils.save_and_reraise_exception() as ctxt:
if (err.code == 400 and
(ERR_MSG_PENDING_ERADICATION in err.text or
ERR_MSG_NOT_EXIST in err.text)):
# Treat these as a "success" case since we are trying
# to delete them anyway.
ctxt.reraise = False
LOG.warning("Unable to delete Protection Group: %s",
err.text)
for volume in volumes:
self.delete_volume(volume)
return None, None
@pure_driver_debug_trace
def update_consistencygroup(self, context, group,
add_volumes=None, remove_volumes=None):
pgroup_name = self._get_pgroup_name_from_id(group.id)
if add_volumes:
addvollist = [self._get_vol_name(vol) for vol in add_volumes]
else:
addvollist = []
if remove_volumes:
remvollist = [self._get_vol_name(vol) for vol in remove_volumes]
else:
remvollist = []
current_array = self._get_current_array()
current_array.set_pgroup(pgroup_name, addvollist=addvollist,
remvollist=remvollist)
return None, None, None
@pure_driver_debug_trace
def create_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Creates a cgsnapshot."""
cg_id = self._get_group_id_from_snap(cgsnapshot)
pgroup_name = self._get_pgroup_name_from_id(cg_id)
pgsnap_suffix = self._get_pgroup_snap_suffix(cgsnapshot)
current_array = self._get_current_array()
current_array.create_pgroup_snapshot(pgroup_name, suffix=pgsnap_suffix)
return None, None
def _delete_pgsnapshot(self, pgsnap_name):
current_array = self._get_current_array()
try:
# FlashArray.destroy_pgroup is also used for deleting
# pgroup snapshots. The underlying REST API is identical.
current_array.destroy_pgroup(pgsnap_name)
if self.configuration.pure_eradicate_on_delete:
current_array.eradicate_pgroup(pgsnap_name)
except purestorage.PureHTTPError as err:
with excutils.save_and_reraise_exception() as ctxt:
if (err.code == 400 and
(ERR_MSG_PENDING_ERADICATION in err.text or
ERR_MSG_NOT_EXIST in err.text)):
# Treat these as a "success" case since we are trying
# to delete them anyway.
ctxt.reraise = False
LOG.warning("Unable to delete Protection Group "
"Snapshot: %s", err.text)
@pure_driver_debug_trace
def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Deletes a cgsnapshot."""
pgsnap_name = self._get_pgroup_snap_name(cgsnapshot)
self._delete_pgsnapshot(pgsnap_name)
return None, None
def _validate_manage_existing_ref(self, existing_ref, is_snap=False):
"""Ensure that an existing_ref is valid and return volume info
If the ref is not valid throw a ManageExistingInvalidReference
exception with an appropriate error.
Will return volume or snapshot information from the array for
the object specified by existing_ref.
"""
if "name" not in existing_ref or not existing_ref["name"]:
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref,
reason=_("manage_existing requires a 'name'"
" key to identify an existing volume."))
if is_snap:
# Purity snapshot names are prefixed with the source volume name.
ref_vol_name, ref_snap_suffix = existing_ref['name'].split('.')
else:
ref_vol_name = existing_ref['name']
current_array = self._get_current_array()
try:
volume_info = current_array.get_volume(ref_vol_name, snap=is_snap)
if volume_info:
if is_snap:
for snap in volume_info:
if snap['name'] == existing_ref['name']:
return snap
else:
return volume_info
except purestorage.PureHTTPError as err:
with excutils.save_and_reraise_exception() as ctxt:
if (err.code == 400 and
ERR_MSG_NOT_EXIST in err.text):
ctxt.reraise = False
# If volume information was unable to be retrieved we need
# to throw a Invalid Reference exception.
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref,
reason=_("Unable to find Purity ref with name=%s") % ref_vol_name)
def _add_to_group_if_needed(self, volume, vol_name):
if volume['group_id']:
# If the query blows up just let it raise up the stack, the volume
# should be put into an error state
group = volume_utils.group_get_by_id(volume['group_id'])
if volume_utils.is_group_a_cg_snapshot_type(group):
self._add_volume_to_consistency_group(
volume['group_id'],
vol_name
)
elif volume['consistencygroup_id']:
self._add_volume_to_consistency_group(
volume['consistencygroup_id'],
vol_name
)
def create_group(self, ctxt, group):
"""Creates a group.
:param ctxt: the context of the caller.
:param group: the Group object of the group to be created.
:returns: model_update
"""
if volume_utils.is_group_a_cg_snapshot_type(group):
return self.create_consistencygroup(ctxt, group)
# If it wasn't a consistency group request ignore it and we'll rely on
# the generic group implementation.
raise NotImplementedError()
def delete_group(self, ctxt, group, volumes):
"""Deletes a group.
:param ctxt: the context of the caller.
:param group: the Group object of the group to be deleted.
:param volumes: a list of Volume objects in the group.
:returns: model_update, volumes_model_update
"""
if volume_utils.is_group_a_cg_snapshot_type(group):
return self.delete_consistencygroup(ctxt, group, volumes)
# If it wasn't a consistency group request ignore it and we'll rely on
# the generic group implementation.
raise NotImplementedError()
def update_group(self, ctxt, group,
add_volumes=None, remove_volumes=None):
"""Updates a group.
:param ctxt: the context of the caller.
:param group: the Group object of the group to be updated.
:param add_volumes: a list of Volume objects to be added.
:param remove_volumes: a list of Volume objects to be removed.
:returns: model_update, add_volumes_update, remove_volumes_update
"""
if volume_utils.is_group_a_cg_snapshot_type(group):
return self.update_consistencygroup(ctxt,
group,
add_volumes,
remove_volumes)
# If it wasn't a consistency group request ignore it and we'll rely on
# the generic group implementation.
raise NotImplementedError()
def create_group_from_src(self, ctxt, group, volumes,
group_snapshot=None, snapshots=None,
source_group=None, source_vols=None):
"""Creates a group from source.
:param ctxt: the context of the caller.
:param group: the Group object to be created.
:param volumes: a list of Volume objects in the group.
:param group_snapshot: the GroupSnapshot object as source.
:param snapshots: a list of snapshot objects in group_snapshot.
:param source_group: the Group object as source.
:param source_vols: a list of volume objects in the source_group.
:returns: model_update, volumes_model_update
"""
if volume_utils.is_group_a_cg_snapshot_type(group):
return self.create_consistencygroup_from_src(ctxt,
group,
volumes,
group_snapshot,
snapshots,
source_group,
source_vols)
# If it wasn't a consistency group request ignore it and we'll rely on
# the generic group implementation.
raise NotImplementedError()
def create_group_snapshot(self, ctxt, group_snapshot, snapshots):
"""Creates a group_snapshot.
:param ctxt: the context of the caller.
:param group_snapshot: the GroupSnapshot object to be created.
:param snapshots: a list of Snapshot objects in the group_snapshot.
:returns: model_update, snapshots_model_update
"""
if volume_utils.is_group_a_cg_snapshot_type(group_snapshot):
return self.create_cgsnapshot(ctxt, group_snapshot, snapshots)
# If it wasn't a consistency group request ignore it and we'll rely on
# the generic group implementation.
raise NotImplementedError()
def delete_group_snapshot(self, ctxt, group_snapshot, snapshots):
"""Deletes a group_snapshot.
:param ctxt: the context of the caller.
:param group_snapshot: the GroupSnapshot object to be deleted.
:param snapshots: a list of snapshot objects in the group_snapshot.
:returns: model_update, snapshots_model_update
"""
if volume_utils.is_group_a_cg_snapshot_type(group_snapshot):
return self.delete_cgsnapshot(ctxt, group_snapshot, snapshots)
# If it wasn't a consistency group request ignore it and we'll rely on
# the generic group implementation.
raise NotImplementedError()
@pure_driver_debug_trace
def manage_existing(self, volume, existing_ref):
"""Brings an existing backend storage object under Cinder management.
We expect a volume name in the existing_ref that matches one in Purity.
"""
self._validate_manage_existing_ref(existing_ref)
ref_vol_name = existing_ref['name']
current_array = self._get_current_array()
connected_hosts = \
current_array.list_volume_private_connections(ref_vol_name)
if len(connected_hosts) > 0:
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref,
reason=_("%(driver)s manage_existing cannot manage a volume "
"connected to hosts. Please disconnect this volume "
"from existing hosts before importing"
) % {'driver': self.__class__.__name__})
new_vol_name = self._get_vol_name(volume)
LOG.info("Renaming existing volume %(ref_name)s to %(new_name)s",
{"ref_name": ref_vol_name, "new_name": new_vol_name})
self._rename_volume_object(ref_vol_name,
new_vol_name,
raise_not_exist=True)
return None
@pure_driver_debug_trace
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of volume to be managed by manage_existing.
We expect a volume name in the existing_ref that matches one in Purity.
"""
volume_info = self._validate_manage_existing_ref(existing_ref)
size = self._round_bytes_to_gib(volume_info['size'])
return size
def _rename_volume_object(self, old_name, new_name, raise_not_exist=False):
"""Rename a volume object (could be snapshot) in Purity.
This will not raise an exception if the object does not exist
"""
current_array = self._get_current_array()
try:
current_array.rename_volume(old_name, new_name)
except purestorage.PureHTTPError as err:
with excutils.save_and_reraise_exception() as ctxt:
if (err.code == 400 and
ERR_MSG_NOT_EXIST in err.text):
ctxt.reraise = raise_not_exist
LOG.warning("Unable to rename %(old_name)s, error "
"message: %(error)s",
{"old_name": old_name, "error": err.text})
return new_name
@pure_driver_debug_trace
def unmanage(self, volume):
"""Removes the specified volume from Cinder management.
Does not delete the underlying backend storage object.
The volume will be renamed with "-unmanaged" as a suffix
"""
vol_name = self._get_vol_name(volume)
unmanaged_vol_name = vol_name + UNMANAGED_SUFFIX
LOG.info("Renaming existing volume %(ref_name)s to %(new_name)s",
{"ref_name": vol_name, "new_name": unmanaged_vol_name})
self._rename_volume_object(vol_name, unmanaged_vol_name)
def _verify_manage_snap_api_requirements(self):
current_array = self._get_current_array()
api_version = current_array.get_rest_version()
if api_version not in MANAGE_SNAP_REQUIRED_API_VERSIONS:
msg = _('Unable to do manage snapshot operations with Purity REST '
'API version %(api_version)s, requires '
'%(required_versions)s.') % {
'api_version': api_version,
'required_versions': MANAGE_SNAP_REQUIRED_API_VERSIONS
}
raise exception.PureDriverException(reason=msg)
def manage_existing_snapshot(self, snapshot, existing_ref):
"""Brings an existing backend storage object under Cinder management.
We expect a snapshot name in the existing_ref that matches one in
Purity.
"""
self._verify_manage_snap_api_requirements()
self._validate_manage_existing_ref(existing_ref, is_snap=True)
ref_snap_name = existing_ref['name']
new_snap_name = self._get_snap_name(snapshot)
LOG.info("Renaming existing snapshot %(ref_name)s to "
"%(new_name)s", {"ref_name": ref_snap_name,
"new_name": new_snap_name})
self._rename_volume_object(ref_snap_name,
new_snap_name,
raise_not_exist=True)
return None
def manage_existing_snapshot_get_size(self, snapshot, existing_ref):
"""Return size of snapshot to be managed by manage_existing.
We expect a snapshot name in the existing_ref that matches one in
Purity.
"""
self._verify_manage_snap_api_requirements()
snap_info = self._validate_manage_existing_ref(existing_ref,
is_snap=True)
size = self._round_bytes_to_gib(snap_info['size'])
return size
def unmanage_snapshot(self, snapshot):
"""Removes the specified snapshot from Cinder management.
Does not delete the underlying backend storage object.
We expect a snapshot name in the existing_ref that matches one in
Purity.
"""
self._verify_manage_snap_api_requirements()
snap_name = self._get_snap_name(snapshot)
unmanaged_snap_name = snap_name + UNMANAGED_SUFFIX
LOG.info("Renaming existing snapshot %(ref_name)s to "
"%(new_name)s", {"ref_name": snap_name,
"new_name": unmanaged_snap_name})
self._rename_volume_object(snap_name, unmanaged_snap_name)
def get_manageable_volumes(self, cinder_volumes, marker, limit, offset,
sort_keys, sort_dirs):
"""List volumes on the backend available for management by Cinder.
Rule out volumes that are attached to a Purity host or that
are already in the list of cinder_volumes. We return references
of the volume names for any others.
"""
array = self._get_current_array()
pure_vols = array.list_volumes()
hosts_with_connections = array.list_hosts(all=True)
# Put together a map of volumes that are connected to hosts
connected_vols = {}
for host in hosts_with_connections:
vol = host.get('vol')
if vol:
connected_vols[vol] = host['name']
# Put together a map of existing cinder volumes on the array
# so we can lookup cinder id's by purity volume names
existing_vols = {}
for cinder_vol in cinder_volumes:
existing_vols[self._get_vol_name(cinder_vol)] = cinder_vol.name_id
manageable_vols = []
for pure_vol in pure_vols:
vol_name = pure_vol['name']
cinder_id = existing_vols.get(vol_name)
is_safe = True
reason_not_safe = None
host = connected_vols.get(vol_name)
if host:
is_safe = False
reason_not_safe = _('Volume connected to host %s.') % host
if cinder_id:
is_safe = False
reason_not_safe = _('Volume already managed.')
manageable_vols.append({
'reference': {'name': vol_name},
'size': self._round_bytes_to_gib(pure_vol['size']),
'safe_to_manage': is_safe,
'reason_not_safe': reason_not_safe,
'cinder_id': cinder_id,
'extra_info': None,
})
return volume_utils.paginate_entries_list(
manageable_vols, marker, limit, offset, sort_keys, sort_dirs)
def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset,
sort_keys, sort_dirs):
"""List snapshots on the backend available for management by Cinder."""
array = self._get_current_array()
pure_snapshots = array.list_volumes(snap=True)
# Put together a map of existing cinder snapshots on the array
# so we can lookup cinder id's by purity snapshot names
existing_snapshots = {}
for cinder_snap in cinder_snapshots:
name = self._get_snap_name(cinder_snap)
existing_snapshots[name] = cinder_snap.id
manageable_snaps = []
for pure_snap in pure_snapshots:
snap_name = pure_snap['name']
cinder_id = existing_snapshots.get(snap_name)
is_safe = True
reason_not_safe = None
if cinder_id:
is_safe = False
reason_not_safe = _("Snapshot already managed.")
manageable_snaps.append({
'reference': {'name': snap_name},
'size': self._round_bytes_to_gib(pure_snap['size']),
'safe_to_manage': is_safe,
'reason_not_safe': reason_not_safe,
'cinder_id': cinder_id,
'extra_info': None,
'source_reference': {'name': pure_snap['source']},
})
return volume_utils.paginate_entries_list(
manageable_snaps, marker, limit, offset, sort_keys, sort_dirs)
@staticmethod
def _round_bytes_to_gib(size):
return int(math.ceil(float(size) / units.Gi))
def _get_flasharray(self, san_ip, api_token, rest_version=None,
verify_https=None, ssl_cert_path=None):
array = purestorage.FlashArray(san_ip,
api_token=api_token,
rest_version=rest_version,
verify_https=verify_https,
ssl_cert=ssl_cert_path,
user_agent=self._user_agent)
array_info = array.get()
array.array_name = array_info["array_name"]
array.array_id = array_info["id"]
LOG.debug("connected to %(array_name)s with REST API %(api_version)s",
{"array_name": array.array_name,
"api_version": array._rest_version})
return array
@staticmethod
def _client_version_greater_than(version):
module_version = [int(v) for v in purestorage.VERSION.split('.')]
for limit_version, actual_version in zip(version, module_version):
if actual_version > limit_version:
return True
return False
@staticmethod
def _get_vol_name(volume):
"""Return the name of the volume Purity will use."""
return volume["name"] + "-cinder"
@staticmethod
def _get_snap_name(snapshot):
"""Return the name of the snapshot that Purity will use."""
return "%s-cinder.%s" % (snapshot["volume_name"], snapshot["name"])
@staticmethod
def _get_pgroup_name_from_id(id):
return "consisgroup-%s-cinder" % id
@staticmethod
def _get_pgroup_snap_suffix(group_snapshot):
return "cgsnapshot-%s-cinder" % group_snapshot['id']
@staticmethod
def _get_group_id_from_snap(group_snap):
# We don't really care what kind of group it is, if we are calling
# this look for a group_id and fall back to using a consistencygroup_id
id = None
try:
id = group_snap['group_id']
except AttributeError:
pass
if id is None:
try:
id = group_snap['consistencygroup_id']
except AttributeError:
pass
return id
@classmethod
def _get_pgroup_snap_name(cls, group_snapshot):
"""Return the name of the pgroup snapshot that Purity will use"""
group_id = cls._get_group_id_from_snap(group_snapshot)
return "%s.%s" % (cls._get_pgroup_name_from_id(group_id),
cls._get_pgroup_snap_suffix(group_snapshot))
@staticmethod
def _get_pgroup_vol_snap_name(pg_name, pgsnap_suffix, volume_name):
return "%(pgroup_name)s.%(pgsnap_suffix)s.%(volume_name)s" % {
'pgroup_name': pg_name,
'pgsnap_suffix': pgsnap_suffix,
'volume_name': volume_name,
}
def _get_pgroup_snap_name_from_snapshot(self, snapshot):
"""Return the name of the snapshot that Purity will use."""
group_snap = None
if snapshot.group_snapshot:
group_snap = snapshot.group_snapshot
elif snapshot.cgsnapshot:
group_snap = snapshot.cgsnapshot
pg_vol_snap_name = "%(group_snap)s.%(volume_name)s-cinder" % {
'group_snap': self._get_pgroup_snap_name(group_snap),
'volume_name': snapshot.volume_name
}
return pg_vol_snap_name
@staticmethod
def _generate_purity_host_name(name):
"""Return a valid Purity host name based on the name passed in."""
if len(name) > 23:
name = name[0:23]
name = INVALID_CHARACTERS.sub("-", name)
name = name.lstrip("-")
return "{name}-{uuid}-cinder".format(name=name, uuid=uuid.uuid4().hex)
@staticmethod
def _connect_host_to_vol(array, host_name, vol_name):
connection = None
try:
connection = array.connect_host(host_name, vol_name)
except purestorage.PureHTTPError as err:
if err.code == 400 and ERR_MSG_HOST_NOT_EXIST in err.text:
LOG.debug('Unable to attach volume to host: %s', err.text)
raise exception.PureRetryableException()
with excutils.save_and_reraise_exception() as ctxt:
if (err.code == 400 and
ERR_MSG_ALREADY_EXISTS in err.text):
# Happens if the volume is already connected to the host.
# Treat this as a success.
ctxt.reraise = False
LOG.debug("Volume connection already exists for Purity "
"host with message: %s", err.text)
# Get the info for the existing connection.
connected_hosts = (
array.list_volume_private_connections(vol_name))
for host_info in connected_hosts:
if host_info["host"] == host_name:
connection = host_info
break
if not connection:
raise exception.PureDriverException(
reason=_("Unable to connect or find connection to host"))
return connection
def retype(self, context, volume, new_type, diff, host):
"""Retype from one volume type to another on the same backend.
For a Pure Array there is currently no differentiation between types
of volumes other than some being part of a protection group to be
replicated.
"""
previous_vol_replicated = self._is_volume_replicated_type(volume)
new_vol_replicated = False
if new_type:
specs = new_type.get("extra_specs")
if specs and EXTRA_SPECS_REPL_ENABLED in specs:
replication_capability = specs[EXTRA_SPECS_REPL_ENABLED]
# Do not validate settings, ignore invalid.
new_vol_replicated = (replication_capability == "<is> True")
if previous_vol_replicated and not new_vol_replicated:
# Remove from protection group.
self._disable_replication(volume)
elif not previous_vol_replicated and new_vol_replicated:
# Add to protection group.
self._enable_replication(self._get_current_array(), volume)
return True, None
@pure_driver_debug_trace
def _disable_replication(self, volume):
"""Disable replication on the given volume."""
current_array = self._get_current_array()
LOG.debug("Disabling replication for volume %(id)s residing on "
"array %(backend_id)s." %
{"id": volume["id"],
"backend_id": current_array._backend_id})
try:
current_array.set_pgroup(self._replication_pg_name,
remvollist=([self._get_vol_name(volume)]))
except purestorage.PureHTTPError as err:
with excutils.save_and_reraise_exception() as ctxt:
if (err.code == 400 and
ERR_MSG_COULD_NOT_BE_FOUND in err.text):
ctxt.reraise = False
LOG.warning("Disable replication on volume failed: "
"already disabled: %s", err.text)
else:
LOG.error("Disable replication on volume failed with "
"message: %s", err.text)
@pure_driver_debug_trace
def failover_host(self, context, volumes, secondary_id=None):
"""Failover backend to a secondary array
This action will not affect the original volumes in any
way and it will stay as is. If a subsequent failover is performed we
will simply overwrite the original (now unmanaged) volumes.
"""
if secondary_id == 'default':
# We are going back to the 'original' driver config, just put
# our current array back to the primary.
if self._failed_over_primary_array:
self._set_current_array(self._failed_over_primary_array)
return secondary_id, []
else:
msg = _('Unable to failback to "default", this can only be '
'done after a failover has completed.')
raise exception.InvalidReplicationTarget(message=msg)
current_array = self._get_current_array()
LOG.debug("Failover replication for array %(primary)s to "
"%(secondary)s." % {
"primary": current_array._backend_id,
"secondary": secondary_id
})
if secondary_id == current_array._backend_id:
raise exception.InvalidReplicationTarget(
reason=_("Secondary id can not be the same as primary array, "
"backend_id = %(secondary)s.") %
{"secondary": secondary_id}
)
secondary_array, pg_snap = self._find_failover_target(secondary_id)
LOG.debug("Starting failover from %(primary)s to %(secondary)s",
{"primary": current_array.array_name,
"secondary": secondary_array.array_name})
# NOTE(patrickeast): This currently requires a call with REST API 1.3.
# If we need to, create a temporary FlashArray for this operation.
api_version = secondary_array.get_rest_version()
LOG.debug("Current REST API for array id %(id)s is %(api_version)s",
{"id": secondary_array.array_id, "api_version": api_version})
if api_version != '1.3':
target_array = self._get_flasharray(
secondary_array._target,
api_token=secondary_array._api_token,
rest_version='1.3',
verify_https=secondary_array._verify_https,
ssl_cert_path=secondary_array._ssl_cert
)
else:
target_array = secondary_array
volume_snaps = target_array.get_volume(pg_snap['name'],
snap=True,
pgroup=True)
# We only care about volumes that are in the list we are given.
vol_names = set()
for vol in volumes:
vol_names.add(self._get_vol_name(vol))
for snap in volume_snaps:
vol_name = snap['name'].split('.')[-1]
if vol_name in vol_names:
vol_names.remove(vol_name)
LOG.debug('Creating volume %(vol)s from replicated snapshot '
'%(snap)s', {'vol': vol_name, 'snap': snap['name']})
secondary_array.copy_volume(snap['name'],
vol_name,
overwrite=True)
else:
LOG.debug('Ignoring unmanaged volume %(vol)s from replicated '
'snapshot %(snap)s.', {'vol': vol_name,
'snap': snap['name']})
# The only volumes remaining in the vol_names set have been left behind
# on the array and should be considered as being in an error state.
model_updates = []
for vol in volumes:
if self._get_vol_name(vol) in vol_names:
model_updates.append({
'volume_id': vol['id'],
'updates': {
'status': 'error',
}
})
# After failover we want our current array to be swapped for the
# secondary array we just failed over to.
self._failed_over_primary_array = self._get_current_array()
self._set_current_array(secondary_array)
return secondary_array._backend_id, model_updates
def _does_pgroup_exist(self, array, pgroup_name):
"""Return True/False"""
try:
array.get_pgroup(pgroup_name)
return True
except purestorage.PureHTTPError as err:
with excutils.save_and_reraise_exception() as ctxt:
if err.code == 400 and ERR_MSG_NOT_EXIST in err.text:
ctxt.reraise = False
return False
# Any unexpected exception to be handled by caller.
@pure_driver_debug_trace
@utils.retry(exception.PureDriverException,
REPL_SETTINGS_PROPAGATE_RETRY_INTERVAL,
REPL_SETTINGS_PROPAGATE_MAX_RETRIES)
def _wait_until_target_group_setting_propagates(
self, target_array, pgroup_name_on_target):
# Wait for pgroup to show up on target array.
if self._does_pgroup_exist(target_array, pgroup_name_on_target):
return
else:
raise exception.PureDriverException(message=
_('Protection Group not '
'ready.'))
@pure_driver_debug_trace
@utils.retry(exception.PureDriverException,
REPL_SETTINGS_PROPAGATE_RETRY_INTERVAL,
REPL_SETTINGS_PROPAGATE_MAX_RETRIES)
def _wait_until_source_array_allowed(self, source_array, pgroup_name):
result = source_array.get_pgroup(pgroup_name)
if result["targets"][0]["allowed"]:
return
else:
raise exception.PureDriverException(message=_('Replication not '
'allowed yet.'))
def _get_pgroup_name_on_target(self, source_array_name, pgroup_name):
return "%s:%s" % (source_array_name, pgroup_name)
@pure_driver_debug_trace
def _setup_replicated_pgroups(self, primary, secondaries, pg_name,
replication_interval, retention_policy):
self._create_protection_group_if_not_exist(
primary, pg_name)
# Apply retention policies to a protection group.
# These retention policies will be applied on the replicated
# snapshots on the target array.
primary.set_pgroup(pg_name, **retention_policy)
# Configure replication propagation frequency on a
# protection group.
primary.set_pgroup(pg_name,
replicate_frequency=replication_interval)
for target_array in secondaries:
try:
# Configure PG to replicate to target_array.
primary.set_pgroup(pg_name,
addtargetlist=[target_array.array_name])
except purestorage.PureHTTPError as err:
with excutils.save_and_reraise_exception() as ctxt:
if err.code == 400 and (
ERR_MSG_ALREADY_INCLUDES
in err.text):
ctxt.reraise = False
LOG.info("Skipping add target %(target_array)s"
" to protection group %(pgname)s"
" since it's already added.",
{"target_array": target_array.array_name,
"pgname": pg_name})
# Wait until "Target Group" setting propagates to target_array.
pgroup_name_on_target = self._get_pgroup_name_on_target(
primary.array_name, pg_name)
for target_array in secondaries:
self._wait_until_target_group_setting_propagates(
target_array,
pgroup_name_on_target)
try:
# Configure the target_array to allow replication from the
# PG on source_array.
target_array.set_pgroup(pgroup_name_on_target,
allowed=True)
except purestorage.PureHTTPError as err:
with excutils.save_and_reraise_exception() as ctxt:
if (err.code == 400 and
ERR_MSG_ALREADY_ALLOWED in err.text):
ctxt.reraise = False
LOG.info("Skipping allow pgroup %(pgname)s on "
"target array %(target_array)s since "
"it is already allowed.",
{"pgname": pg_name,
"target_array": target_array.array_name})
# Wait until source array acknowledges previous operation.
self._wait_until_source_array_allowed(primary, pg_name)
# Start replication on the PG.
primary.set_pgroup(pg_name, replicate_enabled=True)
@pure_driver_debug_trace
def _generate_replication_retention(self):
"""Generates replication retention settings in Purity compatible format
An example of the settings:
target_all_for = 14400 (i.e. 4 hours)
target_per_day = 6
target_days = 4
The settings above configure the target array to retain 4 hours of
the most recent snapshots.
After the most recent 4 hours, the target will choose 4 snapshots
per day from the previous 6 days for retention
:return: a dictionary representing replication retention settings
"""
replication_retention = dict(
target_all_for=self._replication_retention_short_term,
target_per_day=self._replication_retention_long_term_per_day,
target_days=self._replication_retention_long_term
)
return replication_retention
@pure_driver_debug_trace
def _get_latest_replicated_pg_snap(self,
target_array,
source_array_name,
pgroup_name):
# Get all protection group snapshots.
snap_name = "%s:%s" % (source_array_name, pgroup_name)
LOG.debug("Looking for snap %(snap)s on array id %(array_id)s",
{"snap": snap_name, "array_id": target_array.array_id})
pg_snaps = target_array.get_pgroup(snap_name, snap=True, transfer=True)
LOG.debug("Retrieved snapshots on target %(pg_snaps)s",
{"pg_snaps": pg_snaps})
# Only use snapshots that are replicated completely.
pg_snaps_filtered = [s for s in pg_snaps if s["progress"] == 1]
LOG.debug("Filtered list of snapshots %(pg_snaps_filtered)s",
{"pg_snaps_filtered": pg_snaps_filtered})
# Go through the protection group snapshots, latest first ....
# stop when we find required volume snapshot.
pg_snaps_filtered.sort(key=lambda x: x["created"], reverse=True)
LOG.debug("Sorted list of snapshots %(pg_snaps_filtered)s",
{"pg_snaps_filtered": pg_snaps_filtered})
pg_snap = pg_snaps_filtered[0] if pg_snaps_filtered else None
LOG.debug("Selecting snapshot %(pg_snap)s for failover.",
{"pg_snap": pg_snap})
return pg_snap
@pure_driver_debug_trace
def _create_protection_group_if_not_exist(self, source_array, pgname):
try:
source_array.create_pgroup(pgname)
except purestorage.PureHTTPError as err:
with excutils.save_and_reraise_exception() as ctxt:
if err.code == 400 and ERR_MSG_ALREADY_EXISTS in err.text:
# Happens if the PG already exists
ctxt.reraise = False
LOG.warning("Skipping creation of PG %s since it "
"already exists.", pgname)
# We assume PG has already been setup with correct
# replication settings.
return
if err.code == 400 and (
ERR_MSG_PENDING_ERADICATION in err.text):
ctxt.reraise = False
LOG.warning("Protection group %s is deleted but not"
" eradicated - will recreate.", pgname)
source_array.eradicate_pgroup(pgname)
source_array.create_pgroup(pgname)
def _is_volume_replicated_type(self, volume):
ctxt = context.get_admin_context()
replication_flag = False
if volume["volume_type_id"]:
volume_type = volume_types.get_volume_type(
ctxt, volume["volume_type_id"])
specs = volume_type.get("extra_specs")
if specs and EXTRA_SPECS_REPL_ENABLED in specs:
replication_capability = specs[EXTRA_SPECS_REPL_ENABLED]
# Do not validate settings, ignore invalid.
replication_flag = (replication_capability == "<is> True")
return replication_flag
def _find_failover_target(self, secondary):
if not self._replication_target_arrays:
raise exception.PureDriverException(
reason=_("Unable to find failover target, no "
"secondary targets configured."))
secondary_array = None
pg_snap = None
if secondary:
for array in self._replication_target_arrays:
if array._backend_id == secondary:
secondary_array = array
break
if not secondary_array:
raise exception.InvalidReplicationTarget(
reason=_("Unable to determine secondary_array from"
" supplied secondary: %(secondary)s.") %
{"secondary": secondary}
)
pg_snap = self._get_latest_replicated_pg_snap(
secondary_array,
self._get_current_array().array_name,
self._replication_pg_name
)
else:
LOG.debug('No secondary array id specified, checking all targets.')
for array in self._replication_target_arrays:
try:
secondary_array = array
pg_snap = self._get_latest_replicated_pg_snap(
secondary_array,
self._get_current_array().array_name,
self._replication_pg_name
)
if pg_snap:
break
except Exception:
LOG.exception('Error finding replicated pg snapshot '
'on %(secondary)s.',
{'secondary': array._backend_id})
if not secondary_array:
raise exception.PureDriverException(
reason=_("Unable to find viable secondary array from"
"configured targets: %(targets)s.") %
{"targets": six.text_type(self._replication_target_arrays)}
)
if not pg_snap:
raise exception.PureDriverException(
reason=_("Unable to find viable pg snapshot to use for"
"failover on selected secondary array: %(id)s.") %
{"id": secondary_array._backend_id}
)
return secondary_array, pg_snap
def _get_current_array(self):
return self._array
def _set_current_array(self, array):
self._array = array
@interface.volumedriver
class PureISCSIDriver(PureBaseVolumeDriver, san.SanISCSIDriver):
"""OpenStack Volume Driver to support Pure Storage FlashArray.
This version of the driver enables the use of iSCSI for
the underlying storage connectivity with the FlashArray.
"""
VERSION = "6.0.0"
def __init__(self, *args, **kwargs):
execute = kwargs.pop("execute", utils.execute)
super(PureISCSIDriver, self).__init__(execute=execute, *args, **kwargs)
self._storage_protocol = "iSCSI"
def _get_host(self, array, connector):
"""Return dict describing existing Purity host object or None."""
hosts = array.list_hosts()
for host in hosts:
if connector["initiator"] in host["iqn"]:
return host
return None
@pure_driver_debug_trace
def initialize_connection(self, volume, connector):
"""Allow connection to connector and return connection info."""
connection = self._connect(volume, connector)
target_ports = self._get_target_iscsi_ports()
multipath = connector.get("multipath", False)
properties = self._build_connection_properties(connection,
target_ports,
multipath)
if self.configuration.use_chap_auth:
properties["data"]["auth_method"] = "CHAP"
properties["data"]["auth_username"] = connection["auth_username"]
properties["data"]["auth_password"] = connection["auth_password"]
initiator_update = connection.get("initiator_update", False)
if initiator_update:
properties["initiator_update"] = initiator_update
return properties
def _build_connection_properties(self, connection, target_ports,
multipath):
props = {
"driver_volume_type": "iscsi",
"data": {
"target_discovered": False,
"discard": True,
},
}
port_iter = iter(target_ports)
target_luns = []
target_iqns = []
target_portals = []
for port in port_iter:
target_luns.append(connection["lun"])
target_iqns.append(port["iqn"])
target_portals.append(port["portal"])
# If we have multiple ports always report them.
if target_luns and target_iqns and target_portals:
props["data"]["target_luns"] = target_luns
props["data"]["target_iqns"] = target_iqns
props["data"]["target_portals"] = target_portals
return props
def _get_target_iscsi_ports(self):
"""Return list of iSCSI-enabled port descriptions."""
current_array = self._get_current_array()
ports = current_array.list_ports()
iscsi_ports = [port for port in ports if port["iqn"]]
if not iscsi_ports:
raise exception.PureDriverException(
reason=_("No iSCSI-enabled ports on target array."))
return iscsi_ports
@staticmethod
def _generate_chap_secret():
return volume_utils.generate_password()
def _get_chap_secret_from_init_data(self, initiator):
data = self.driver_utils.get_driver_initiator_data(initiator)
if data:
for d in data:
if d["key"] == CHAP_SECRET_KEY:
return d["value"]
return None
def _get_chap_credentials(self, host, initiator):
username = host
password = self._get_chap_secret_from_init_data(initiator)
if not password:
password = self._generate_chap_secret()
success = self.driver_utils.insert_driver_initiator_data(
initiator, CHAP_SECRET_KEY, password)
if not success:
# The only reason the save would have failed is if someone
# else (read: another thread/instance of the driver) set
# one before we did. In that case just do another query.
password = self._get_chap_secret_from_init_data(initiator)
return username, password
@utils.retry(exception.PureRetryableException,
retries=HOST_CREATE_MAX_RETRIES)
def _connect(self, volume, connector):
"""Connect the host and volume; return dict describing connection."""
iqn = connector["initiator"]
if self.configuration.use_chap_auth:
(chap_username, chap_password) = \
self._get_chap_credentials(connector['host'], iqn)
current_array = self._get_current_array()
vol_name = self._get_vol_name(volume)
host = self._get_host(current_array, connector)
if host:
host_name = host["name"]
LOG.info("Re-using existing purity host %(host_name)r",
{"host_name": host_name})
if self.configuration.use_chap_auth:
if not GENERATED_NAME.match(host_name):
LOG.error("Purity host %(host_name)s is not managed "
"by Cinder and can't have CHAP credentials "
"modified. Remove IQN %(iqn)s from the host "
"to resolve this issue.",
{"host_name": host_name,
"iqn": connector["initiator"]})
raise exception.PureDriverException(
reason=_("Unable to re-use a host that is not "
"managed by Cinder with use_chap_auth=True,"))
elif chap_username is None or chap_password is None:
LOG.error("Purity host %(host_name)s is managed by "
"Cinder but CHAP credentials could not be "
"retrieved from the Cinder database.",
{"host_name": host_name})
raise exception.PureDriverException(
reason=_("Unable to re-use host with unknown CHAP "
"credentials configured."))
else:
host_name = self._generate_purity_host_name(connector["host"])
LOG.info("Creating host object %(host_name)r with IQN:"
" %(iqn)s.", {"host_name": host_name, "iqn": iqn})
try:
current_array.create_host(host_name, iqnlist=[iqn])
except purestorage.PureHTTPError as err:
if (err.code == 400 and
(ERR_MSG_ALREADY_EXISTS in err.text or
ERR_MSG_ALREADY_IN_USE in err.text)):
# If someone created it before we could just retry, we will
# pick up the new host.
LOG.debug('Unable to create host: %s', err.text)
raise exception.PureRetryableException()
if self.configuration.use_chap_auth:
try:
current_array.set_host(host_name,
host_user=chap_username,
host_password=chap_password)
except purestorage.PureHTTPError as err:
if (err.code == 400 and
ERR_MSG_HOST_NOT_EXIST in err.text):
# If the host disappeared out from under us that's ok,
# we will just retry and snag a new host.
LOG.debug('Unable to set CHAP info: %s', err.text)
raise exception.PureRetryableException()
connection = self._connect_host_to_vol(current_array,
host_name,
vol_name)
if self.configuration.use_chap_auth:
connection["auth_username"] = chap_username
connection["auth_password"] = chap_password
return connection
@interface.volumedriver
class PureFCDriver(PureBaseVolumeDriver, driver.FibreChannelDriver):
"""OpenStack Volume Driver to support Pure Storage FlashArray.
This version of the driver enables the use of Fibre Channel for
the underlying storage connectivity with the FlashArray. It fully
supports the Cinder Fibre Channel Zone Manager.
"""
VERSION = "4.0.0"
def __init__(self, *args, **kwargs):
execute = kwargs.pop("execute", utils.execute)
super(PureFCDriver, self).__init__(execute=execute, *args, **kwargs)
self._storage_protocol = "FC"
self._lookup_service = fczm_utils.create_lookup_service()
def _get_host(self, array, connector):
"""Return dict describing existing Purity host object or None."""
hosts = array.list_hosts()
for host in hosts:
for wwn in connector["wwpns"]:
if wwn.lower() in str(host["wwn"]).lower():
return host
@staticmethod
def _get_array_wwns(array):
"""Return list of wwns from the array"""
ports = array.list_ports()
return [port["wwn"] for port in ports if port["wwn"]]
@fczm_utils.add_fc_zone
@pure_driver_debug_trace
def initialize_connection(self, volume, connector):
"""Allow connection to connector and return connection info."""
current_array = self._get_current_array()
connection = self._connect(volume, connector)
target_wwns = self._get_array_wwns(current_array)
init_targ_map = self._build_initiator_target_map(target_wwns,
connector)
properties = {
"driver_volume_type": "fibre_channel",
"data": {
'target_discovered': True,
"target_lun": connection["lun"],
"target_wwn": target_wwns,
'initiator_target_map': init_targ_map,
"discard": True,
}
}
return properties
@utils.retry(exception.PureRetryableException,
retries=HOST_CREATE_MAX_RETRIES)
def _connect(self, volume, connector):
"""Connect the host and volume; return dict describing connection."""
wwns = connector["wwpns"]
current_array = self._get_current_array()
vol_name = self._get_vol_name(volume)
host = self._get_host(current_array, connector)
if host:
host_name = host["name"]
LOG.info("Re-using existing purity host %(host_name)r",
{"host_name": host_name})
else:
host_name = self._generate_purity_host_name(connector["host"])
LOG.info("Creating host object %(host_name)r with WWN:"
" %(wwn)s.", {"host_name": host_name, "wwn": wwns})
try:
current_array.create_host(host_name, wwnlist=wwns)
except purestorage.PureHTTPError as err:
if (err.code == 400 and
(ERR_MSG_ALREADY_EXISTS in err.text or
ERR_MSG_ALREADY_IN_USE in err.text)):
# If someone created it before we could just retry, we will
# pick up the new host.
LOG.debug('Unable to create host: %s', err.text)
raise exception.PureRetryableException()
return self._connect_host_to_vol(current_array, host_name, vol_name)
def _build_initiator_target_map(self, target_wwns, connector):
"""Build the target_wwns and the initiator target map."""
init_targ_map = {}
if self._lookup_service:
# use FC san lookup to determine which NSPs to use
# for the new VLUN.
dev_map = self._lookup_service.get_device_mapping_from_network(
connector['wwpns'],
target_wwns)
for fabric_name in dev_map:
fabric = dev_map[fabric_name]
for initiator in fabric['initiator_port_wwn_list']:
if initiator not in init_targ_map:
init_targ_map[initiator] = []
init_targ_map[initiator] += fabric['target_port_wwn_list']
init_targ_map[initiator] = list(set(
init_targ_map[initiator]))
else:
init_targ_map = dict.fromkeys(connector["wwpns"], target_wwns)
return init_targ_map
@fczm_utils.remove_fc_zone
@pure_driver_debug_trace
def terminate_connection(self, volume, connector, **kwargs):
"""Terminate connection."""
current_array = self._get_current_array()
no_more_connections = self._disconnect(current_array, volume,
connector, **kwargs)
properties = {"driver_volume_type": "fibre_channel", "data": {}}
if no_more_connections:
target_wwns = self._get_array_wwns(current_array)
init_targ_map = self._build_initiator_target_map(target_wwns,
connector)
properties["data"] = {"target_wwn": target_wwns,
"initiator_target_map": init_targ_map}
return properties
| 43.260437 | 79 | 0.602344 |
b580393dc45a8b435d8d8deef8e621f629ea62ef | 31,235 | py | Python | python/ccxt/async_support/gdax.py | justinchou/ccxt | c4e87ff857808b0e934b44b7cedd234baec4b942 | [
"MIT"
] | null | null | null | python/ccxt/async_support/gdax.py | justinchou/ccxt | c4e87ff857808b0e934b44b7cedd234baec4b942 | [
"MIT"
] | null | null | null | python/ccxt/async_support/gdax.py | justinchou/ccxt | c4e87ff857808b0e934b44b7cedd234baec4b942 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
import base64
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import NotSupported
class gdax (Exchange):
def describe(self):
return self.deep_extend(super(gdax, self).describe(), {
'id': 'gdax',
'name': 'GDAX',
'countries': ['US'],
'rateLimit': 1000,
'userAgent': self.userAgents['chrome'],
'has': {
'cancelAllOrders': True,
'CORS': True,
'deposit': True,
'fetchAccounts': True,
'fetchClosedOrders': True,
'fetchDepositAddress': True,
'createDepositAddress': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderTrades': True,
'fetchOrders': True,
'fetchTransactions': True,
'withdraw': True,
},
'timeframes': {
'1m': 60,
'5m': 300,
'15m': 900,
'1h': 3600,
'6h': 21600,
'1d': 86400,
},
'urls': {
'test': 'https://api-public.sandbox.gdax.com',
'logo': 'https://user-images.githubusercontent.com/1294454/27766527-b1be41c6-5edb-11e7-95f6-5b496c469e2c.jpg',
'api': 'https://api.gdax.com',
'www': 'https://www.gdax.com',
'doc': 'https://docs.gdax.com',
'fees': [
'https://www.gdax.com/fees',
'https://support.gdax.com/customer/en/portal/topics/939402-depositing-and-withdrawing-funds/articles',
],
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
'password': True,
},
'api': {
'public': {
'get': [
'currencies',
'products',
'products/{id}/book',
'products/{id}/candles',
'products/{id}/stats',
'products/{id}/ticker',
'products/{id}/trades',
'time',
],
},
'private': {
'get': [
'accounts',
'accounts/{id}',
'accounts/{id}/holds',
'accounts/{id}/ledger',
'accounts/{id}/transfers',
'coinbase-accounts',
'fills',
'funding',
'orders',
'orders/{id}',
'otc/orders',
'payment-methods',
'position',
'reports/{id}',
'users/self/trailing-volume',
],
'post': [
'conversions',
'deposits/coinbase-account',
'deposits/payment-method',
'coinbase-accounts/{id}/addresses',
'funding/repay',
'orders',
'position/close',
'profiles/margin-transfer',
'reports',
'withdrawals/coinbase',
'withdrawals/crypto',
'withdrawals/payment-method',
],
'delete': [
'orders',
'orders/{id}',
],
},
},
'fees': {
'trading': {
'tierBased': True, # complicated tier system per coin
'percentage': True,
'maker': 0.15 / 100, # highest fee of all tiers
'taker': 0.25 / 100, # highest fee of all tiers
},
'funding': {
'tierBased': False,
'percentage': False,
'withdraw': {
'BCH': 0,
'BTC': 0,
'LTC': 0,
'ETH': 0,
'EUR': 0.15,
'USD': 25,
},
'deposit': {
'BCH': 0,
'BTC': 0,
'LTC': 0,
'ETH': 0,
'EUR': 0.15,
'USD': 10,
},
},
},
'exceptions': {
'exact': {
'Insufficient funds': InsufficientFunds,
'NotFound': OrderNotFound,
'Invalid API Key': AuthenticationError,
'invalid signature': AuthenticationError,
'Invalid Passphrase': AuthenticationError,
'Invalid order id': InvalidOrder,
},
'broad': {
'Order already done': OrderNotFound,
'order not found': OrderNotFound,
'price too small': InvalidOrder,
'price too precise': InvalidOrder,
},
},
})
async def fetch_markets(self, params={}):
response = await self.publicGetProducts(params)
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'id')
baseId = self.safe_string(market, 'base_currency')
quoteId = self.safe_string(market, 'quote_currency')
base = self.common_currency_code(baseId)
quote = self.common_currency_code(quoteId)
symbol = base + '/' + quote
priceLimits = {
'min': self.safe_float(market, 'quote_increment'),
'max': None,
}
precision = {
'amount': 8,
'price': self.precision_from_string(self.safe_string(market, 'quote_increment')),
}
taker = self.fees['trading']['taker'] # does not seem right
if (base == 'ETH') or (base == 'LTC'):
taker = 0.003
active = market['status'] == 'online'
result.append(self.extend(self.fees['trading'], {
'id': id,
'symbol': symbol,
'baseId': baseId,
'quoteId': quoteId,
'base': base,
'quote': quote,
'precision': precision,
'limits': {
'amount': {
'min': self.safe_float(market, 'base_min_size'),
'max': self.safe_float(market, 'base_max_size'),
},
'price': priceLimits,
'cost': {
'min': self.safe_float(market, 'min_market_funds'),
'max': self.safe_float(market, 'max_market_funds'),
},
},
'taker': taker,
'active': active,
'info': market,
}))
return result
async def fetch_accounts(self, params={}):
response = await self.privateGetAccounts(params)
#
# [
# {
# id: '4aac9c60-cbda-4396-9da4-4aa71e95fba0',
# currency: 'BTC',
# balance: '0.0000000000000000',
# available: '0',
# hold: '0.0000000000000000',
# profile_id: 'b709263e-f42a-4c7d-949a-a95c83d065da'
# },
# {
# id: 'f75fa69a-1ad1-4a80-bd61-ee7faa6135a3',
# currency: 'USDC',
# balance: '0.0000000000000000',
# available: '0',
# hold: '0.0000000000000000',
# profile_id: 'b709263e-f42a-4c7d-949a-a95c83d065da'
# },
# ]
#
result = []
for i in range(0, len(response)):
account = response[i]
accountId = self.safe_string(account, 'id')
currencyId = self.safe_string(account, 'currency')
code = self.common_currency_code(currencyId)
result.append({
'id': accountId,
'type': None,
'currency': code,
'info': account,
})
return result
async def fetch_balance(self, params={}):
await self.load_markets()
balances = await self.privateGetAccounts(params)
result = {'info': balances}
for b in range(0, len(balances)):
balance = balances[b]
currency = balance['currency']
account = {
'free': self.safe_float(balance, 'available'),
'used': self.safe_float(balance, 'hold'),
'total': self.safe_float(balance, 'balance'),
}
result[currency] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
orderbook = await self.publicGetProductsIdBook(self.extend({
'id': self.market_id(symbol),
'level': 2, # 1 best bidask, 2 aggregated, 3 full
}, params))
return self.parse_order_book(orderbook)
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = self.extend({
'id': market['id'],
}, params)
ticker = await self.publicGetProductsIdTicker(request)
timestamp = self.parse8601(self.safe_value(ticker, 'time'))
bid = None
ask = None
if 'bid' in ticker:
bid = self.safe_float(ticker, 'bid')
if 'ask' in ticker:
ask = self.safe_float(ticker, 'ask')
last = self.safe_float(ticker, 'price')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': None,
'low': None,
'bid': bid,
'bidVolume': None,
'ask': ask,
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_float(ticker, 'volume'),
'quoteVolume': None,
'info': ticker,
}
def parse_trade(self, trade, market=None):
timestamp = self.parse8601(self.safe_string_2(trade, 'time', 'created_at'))
symbol = None
if market is None:
marketId = self.safe_string(trade, 'product_id')
market = self.safe_value(self.markets_by_id, marketId)
if market:
symbol = market['symbol']
feeRate = None
feeCurrency = None
takerOrMaker = None
if market is not None:
feeCurrency = market['quote']
if 'liquidity' in trade:
takerOrMaker = 'taker' if (trade['liquidity'] == 'T') else 'maker'
feeRate = market[takerOrMaker]
feeCost = self.safe_float(trade, 'fill_fees')
if feeCost is None:
feeCost = self.safe_float(trade, 'fee')
fee = {
'cost': feeCost,
'currency': feeCurrency,
'rate': feeRate,
}
type = None
id = self.safe_string(trade, 'trade_id')
side = 'sell' if (trade['side'] == 'buy') else 'buy'
orderId = self.safe_string(trade, 'order_id')
# GDAX returns inverted side to fetchMyTrades vs fetchTrades
if orderId is not None:
side = 'buy' if (trade['side'] == 'buy') else 'sell'
price = self.safe_float(trade, 'price')
amount = self.safe_float(trade, 'size')
return {
'id': id,
'order': orderId,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': type,
'takerOrMaker': takerOrMaker,
'side': side,
'price': price,
'amount': amount,
'fee': fee,
'cost': price * amount,
}
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
# as of 2018-08-23
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'product_id': market['id'],
}
if limit is not None:
request['limit'] = limit
response = await self.privateGetFills(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
response = await self.publicGetProductsIdTrades(self.extend({
'id': market['id'], # fixes issue #2
}, params))
return self.parse_trades(response, market, since, limit)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
return [
ohlcv[0] * 1000,
ohlcv[3],
ohlcv[2],
ohlcv[1],
ohlcv[4],
ohlcv[5],
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
granularity = self.timeframes[timeframe]
request = {
'id': market['id'],
'granularity': granularity,
}
if since is not None:
request['start'] = self.ymdhms(since)
if limit is None:
# https://docs.gdax.com/#get-historic-rates
limit = 300 # max = 300
request['end'] = self.ymdhms(self.sum(limit * granularity * 1000, since))
response = await self.publicGetProductsIdCandles(self.extend(request, params))
return self.parse_ohlcvs(response, market, timeframe, since, limit)
async def fetch_time(self, params={}):
response = await self.publicGetTime(params)
return self.parse8601(self.parse8601(response, 'iso'))
def parse_order_status(self, status):
statuses = {
'pending': 'open',
'active': 'open',
'open': 'open',
'done': 'closed',
'canceled': 'canceled',
'canceling': 'open',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
timestamp = self.parse8601(order['created_at'])
symbol = None
if market is None:
if order['product_id'] in self.markets_by_id:
market = self.markets_by_id[order['product_id']]
status = self.parse_order_status(self.safe_string(order, 'status'))
price = self.safe_float(order, 'price')
amount = self.safe_float(order, 'size')
if amount is None:
amount = self.safe_float(order, 'funds')
if amount is None:
amount = self.safe_float(order, 'specified_funds')
filled = self.safe_float(order, 'filled_size')
remaining = None
if amount is not None:
if filled is not None:
remaining = amount - filled
cost = self.safe_float(order, 'executed_value')
fee = {
'cost': self.safe_float(order, 'fill_fees'),
'currency': None,
'rate': None,
}
if market:
symbol = market['symbol']
return {
'id': order['id'],
'info': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': order['type'],
'side': order['side'],
'price': price,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'fee': fee,
}
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
response = await self.privateGetOrdersId(self.extend({
'id': id,
}, params))
return self.parse_order(response)
async def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = None
if symbol is not None:
market = self.market(symbol)
request = {
'order_id': id,
}
response = await self.privateGetFills(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
'status': 'all',
}
market = None
if symbol is not None:
market = self.market(symbol)
request['product_id'] = market['id']
response = await self.privateGetOrders(self.extend(request, params))
return self.parse_orders(response, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['product_id'] = market['id']
response = await self.privateGetOrders(self.extend(request, params))
return self.parse_orders(response, market, since, limit)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
'status': 'done',
}
market = None
if symbol is not None:
market = self.market(symbol)
request['product_id'] = market['id']
response = await self.privateGetOrders(self.extend(request, params))
return self.parse_orders(response, market, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
# oid = str(self.nonce())
request = {
'product_id': self.market_id(symbol),
'side': side,
'size': self.amount_to_precision(symbol, amount),
'type': type,
}
if type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
response = await self.privatePostOrders(self.extend(request, params))
return self.parse_order(response)
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
return await self.privateDeleteOrdersId({'id': id})
async def cancel_all_orders(self, symbol=None, params={}):
return await self.privateDeleteOrders(params)
def calculate_fee(self, symbol, type, side, amount, price, takerOrMaker='taker', params={}):
market = self.markets[symbol]
rate = market[takerOrMaker]
cost = amount * price
currency = market['quote']
return {
'type': takerOrMaker,
'currency': currency,
'rate': rate,
'cost': float(self.currency_to_precision(currency, rate * cost)),
}
async def get_payment_methods(self):
response = await self.privateGetPaymentMethods()
return response
async def deposit(self, code, amount, address, params={}):
await self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
'amount': amount,
}
method = 'privatePostDeposits'
if 'payment_method_id' in params:
# deposit from a payment_method, like a bank account
method += 'PaymentMethod'
elif 'coinbase_account_id' in params:
# deposit into GDAX account from a Coinbase account
method += 'CoinbaseAccount'
else:
# deposit methodotherwise we did not receive a supported deposit location
# relevant docs link for the Googlers
# https://docs.gdax.com/#deposits
raise NotSupported(self.id + ' deposit() requires one of `coinbase_account_id` or `payment_method_id` extra params')
response = await getattr(self, method)(self.extend(request, params))
if not response:
raise ExchangeError(self.id + ' deposit() error: ' + self.json(response))
return {
'info': response,
'id': response['id'],
}
async def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
currency = self.currency(code)
await self.load_markets()
request = {
'currency': currency['id'],
'amount': amount,
}
method = 'privatePostWithdrawals'
if 'payment_method_id' in params:
method += 'PaymentMethod'
elif 'coinbase_account_id' in params:
method += 'CoinbaseAccount'
else:
method += 'Crypto'
request['crypto_address'] = address
response = await getattr(self, method)(self.extend(request, params))
if not response:
raise ExchangeError(self.id + ' withdraw() error: ' + self.json(response))
return {
'info': response,
'id': response['id'],
}
async def fetch_transactions(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
await self.loadAccounts()
currency = None
id = self.safe_string(params, 'id') # account id
if id is None:
if code is None:
raise ArgumentsRequired(self.id + ' fetchTransactions() requires a currency code argument if no account id specified in params')
currency = self.currency(code)
accountsByCurrencyCode = self.index_by(self.accounts, 'currency')
account = self.safe_value(accountsByCurrencyCode, code)
if account is None:
raise ExchangeError(self.id + ' fetchTransactions() could not find account id for ' + code)
id = account['id']
request = {
'id': id,
}
if limit is not None:
request['limit'] = limit
response = await self.privateGetAccountsIdTransfers(self.extend(request, params))
for i in range(0, len(response)):
response[i]['currency'] = code
return self.parseTransactions(response, currency, since, limit)
def parse_transaction_status(self, transaction):
if 'canceled_at' in transaction and transaction['canceled_at']:
return 'canceled'
elif 'completed_at' in transaction and transaction['completed_at']:
return 'ok'
elif (('canceled_at' in list(transaction.keys())) and not transaction['canceled_at']) and(('completed_at' in list(transaction.keys())) and not transaction['completed_at']) and(('processed_at' in list(transaction.keys())) and not transaction['processed_at']):
return 'pending'
elif 'processed_at' in transaction and transaction['processed_at']:
return 'pending'
else:
return 'failed'
def parse_transaction(self, transaction, currency=None):
details = self.safe_value(transaction, 'details', {})
id = self.safe_string(transaction, 'id')
txid = self.safe_string(details, 'crypto_transaction_hash')
timestamp = self.parse8601(self.safe_string(transaction, 'created_at'))
updated = self.parse8601(self.safe_string(transaction, 'processed_at'))
code = None
currencyId = self.safe_string(transaction, 'currency')
if currencyId in self.currencies_by_id:
currency = self.currencies_by_id[currencyId]
code = currency['code']
else:
code = self.common_currency_code(currencyId)
fee = None
status = self.parse_transaction_status(transaction)
amount = self.safe_float(transaction, 'amount')
type = self.safe_string(transaction, 'type')
address = self.safe_string(details, 'crypto_address')
tag = self.safe_string(details, 'destination_tag')
address = self.safe_string(transaction, 'crypto_address', address)
if type == 'withdraw':
type = 'withdrawal'
address = self.safe_string(details, 'sent_to_address', address)
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'address': address,
'tag': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': updated,
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
request = '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if method == 'GET':
if query:
request += '?' + self.urlencode(query)
url = self.urls['api'] + request
if api == 'private':
self.check_required_credentials()
nonce = str(self.nonce())
payload = ''
if method != 'GET':
if query:
body = self.json(query)
payload = body
# payload = body if (body) else ''
what = nonce + method + request + payload
secret = base64.b64decode(self.secret)
signature = self.hmac(self.encode(what), secret, hashlib.sha256, 'base64')
headers = {
'CB-ACCESS-KEY': self.apiKey,
'CB-ACCESS-SIGN': self.decode(signature),
'CB-ACCESS-TIMESTAMP': nonce,
'CB-ACCESS-PASSPHRASE': self.password,
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
async def fetch_deposit_address(self, code, params={}):
await self.load_markets()
currency = self.currency(code)
accounts = self.safe_value(self.options, 'coinbaseAccounts')
if accounts is None:
accounts = await self.privateGetCoinbaseAccounts()
self.options['coinbaseAccounts'] = accounts # cache it
self.options['coinbaseAccountsByCurrencyId'] = self.index_by(accounts, 'currency')
currencyId = currency['id']
account = self.safe_value(self.options['coinbaseAccountsByCurrencyId'], currencyId)
if account is None:
# eslint-disable-next-line quotes
raise InvalidAddress(self.id + " fetchDepositAddress() could not find currency code " + code + " with id = " + currencyId + " in self.options['coinbaseAccountsByCurrencyId']")
request = {
'id': account['id'],
}
response = await self.privateGetCoinbaseAccountsIdAddresses(self.extend(request, params))
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'destination_tag')
return {
'currency': code,
'address': self.check_address(address),
'tag': tag,
'info': response,
}
async def create_deposit_address(self, code, params={}):
await self.load_markets()
currency = self.currency(code)
accounts = self.safe_value(self.options, 'coinbaseAccounts')
if accounts is None:
accounts = await self.privateGetCoinbaseAccounts()
self.options['coinbaseAccounts'] = accounts # cache it
self.options['coinbaseAccountsByCurrencyId'] = self.index_by(accounts, 'currency')
currencyId = currency['id']
account = self.safe_value(self.options['coinbaseAccountsByCurrencyId'], currencyId)
if account is None:
# eslint-disable-next-line quotes
raise InvalidAddress(self.id + " fetchDepositAddress() could not find currency code " + code + " with id = " + currencyId + " in self.options['coinbaseAccountsByCurrencyId']")
request = {
'id': account['id'],
}
response = await self.privatePostCoinbaseAccountsIdAddresses(self.extend(request, params))
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'destination_tag')
return {
'currency': code,
'address': self.check_address(address),
'tag': tag,
'info': response,
}
def handle_errors(self, code, reason, url, method, headers, body, response):
if (code == 400) or (code == 404):
if body[0] == '{':
message = response['message']
feedback = self.id + ' ' + message
exact = self.exceptions['exact']
if message in exact:
raise exact[message](feedback)
broad = self.exceptions['broad']
broadKey = self.findBroadlyMatchedKey(broad, message)
if broadKey is not None:
raise broad[broadKey](feedback)
raise ExchangeError(feedback) # unknown message
raise ExchangeError(self.id + ' ' + body)
async def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = await self.fetch2(path, api, method, params, headers, body)
if 'message' in response:
raise ExchangeError(self.id + ' ' + self.json(response))
return response
| 39.891443 | 266 | 0.520634 |
a1fe9e3bcd932bcfcb990676cf74784e1cee977e | 1,240 | py | Python | src/crate/client/sqlalchemy/tests/__init__.py | mxm/crate-python | de13bf4a04e7c45864ebfdc144dffe1ddb53b88f | [
"Apache-2.0"
] | null | null | null | src/crate/client/sqlalchemy/tests/__init__.py | mxm/crate-python | de13bf4a04e7c45864ebfdc144dffe1ddb53b88f | [
"Apache-2.0"
] | null | null | null | src/crate/client/sqlalchemy/tests/__init__.py | mxm/crate-python | de13bf4a04e7c45864ebfdc144dffe1ddb53b88f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from unittest import TestSuite, makeSuite
from .connection_test import SqlAlchemyConnectionTest
from .dict_test import SqlAlchemyDictTypeTest
from .datetime_test import SqlAlchemyDateAndDateTimeTest
from .compiler_test import SqlAlchemyCompilerTest
from .update_test import SqlAlchemyUpdateTest
from .match_test import SqlAlchemyMatchTest
from .bulk_test import SqlAlchemyBulkTest
from .insert_from_select_test import SqlAlchemyInsertFromSelectTest
from .create_table_test import CreateTableTest
from .array_test import SqlAlchemyArrayTypeTest
from ..sa_version import SA_1_1, SA_VERSION
def test_suite():
tests = TestSuite()
tests.addTest(makeSuite(SqlAlchemyConnectionTest))
tests.addTest(makeSuite(SqlAlchemyDictTypeTest))
tests.addTest(makeSuite(SqlAlchemyDateAndDateTimeTest))
tests.addTest(makeSuite(SqlAlchemyCompilerTest))
tests.addTest(makeSuite(SqlAlchemyUpdateTest))
tests.addTest(makeSuite(SqlAlchemyMatchTest))
tests.addTest(makeSuite(CreateTableTest))
tests.addTest(makeSuite(SqlAlchemyBulkTest))
tests.addTest(makeSuite(SqlAlchemyInsertFromSelectTest))
if SA_VERSION >= SA_1_1:
tests.addTest(makeSuite(SqlAlchemyArrayTypeTest))
return tests
| 38.75 | 67 | 0.824194 |
8dfd6772f87993fdadb333ab313dbd4d485c8b21 | 37,086 | py | Python | Score/ProteomeTools.py | m1258218761/p-score | 6031d0352561ba3b5baa352645c6cfdf560224f2 | [
"MIT"
] | 7 | 2019-09-16T13:14:29.000Z | 2019-09-18T01:47:51.000Z | Score/ProteomeTools.py | m1258218761/p-score | 6031d0352561ba3b5baa352645c6cfdf560224f2 | [
"MIT"
] | null | null | null | Score/ProteomeTools.py | m1258218761/p-score | 6031d0352561ba3b5baa352645c6cfdf560224f2 | [
"MIT"
] | null | null | null | # coding=utf-8
import copy
import math
import torch
import numpy as np
from tqdm import tqdm
from sklearn.metrics import r2_score
from sklearn.metrics.pairwise import cosine_similarity
from Model.data_util import data
from Score.match_ions import MATCH
from Model.Resnet_model import ResNet18
'''
This is for ProteomeTools2 dataset
'''
class ProteomeTools(object):
def __init__(self, workpath='', nce=''):
self.workpath = workpath + '/NCE' + nce + '/'
self.nce = nce
# Compare string a and string b difference
def find_diff(self, a, b):
diff_index = np.array(list(a)) != np.array(list(b))
array_a = np.array(list(a))
diffa = list(array_a[diff_index])
result_stra = ""
for x in diffa:
result_stra += x
array_b = np.array(list(b))
diffb = list(array_b[diff_index])
result_strb = ""
for x in diffb:
result_strb += x
return result_stra, result_strb
# Delete the spectrum of the specified index
def delmore(self, index=[]):
_index = [774, 1599, 1600, 4176]
count = 0
flag = 1
with open(self.workpath + 'selected_NCE' + self.nce + '.mgf', 'r') as r, open(
self.workpath + '_selected_NCE' + self.nce + '.mgf', 'a+') as w:
while True:
line = r.readline()
if not line.strip():
break
if 'BEGIN IONS' in line:
count += 1
if count in _index:
flag = 0
else:
flag = 1
if flag == 1:
w.write(line)
# Delete the unconventional amino acids from Comet identification results:U
def find_unkonwn_aa(self):
with open(self.workpath + 'selected_NCE' + self.nce + '_forcomet.txt', 'r') as r, open(
self.workpath + '_selected_NCE' + self.nce + '_forcomet.txt', 'a+') as w:
r.__next__()
r.__next__()
while True:
line = r.readline()
if not line.strip():
break
l = line.split('\t')
if 'U' in l[11]:
print(line)
else:
w.write(line)
###---Basic function---:read Search egine identification results and return
### Parameter: have_decoyt:Return results include Decoy;
### have_score:0 means return score, 1 means return evalue;
### have_charge:retrun peptide charge
### filename:identification file
# Comet
def read_comet_results(self, have_decoy=False, have_score=0, have_charge=False, filename=''):
with open(filename, 'r') as rf:
results = {}
CHARGE = {}
while True:
line = rf.readline().strip().split('\t')
if line == ['']:
break
Index = line[0]
sequence = line[11]
_charge = line[2]
if have_score == 0:
_score = line[6] ##xcorr:6,evalue:5
elif have_score == 1:
_score = line[5]
modif = line[17]
if 'DECOY_' in line[15].split(',')[0]:
if have_decoy:
sequence = 'DECOY-' + line[11]
else:
continue
if modif != '-':
modif = modif.split(',')
_modif = ''
_M = []
_C = ''
for one in modif:
_one_modif = one.split('_')
if _one_modif[1] == 'V':
_M.append('Oxidation@M' + _one_modif[0])
# _modif.append('Oxidation@M' + _one_modif[0]+';')
else:
if ';Carbamidomethyl@C' == _C:
continue
else:
_C = ';Carbamidomethyl@C'
if _C == '':
_C = ';'
_M = sorted(_M, key=lambda x: int(x.split('@M')[1]))
_modif = ';'.join(_M) + _C
modif = _modif
else:
modif = ';'
if results.get(Index) == None:
if have_score:
results[Index] = [[sequence + '_' + modif, _score]]
else:
results[Index] = [sequence + '_' + modif]
if have_charge:
CHARGE[Index] = _charge
elif results.get(Index) != None:
if have_score:
results[Index].append([sequence + '_' + modif, _score])
else:
results[Index].append(sequence + '_' + modif)
print('comet results number : ' + str(len(results)))
# print('decoy at first : ' + str(d_count))
if have_charge:
return results, CHARGE
else:
return results
# MSGF+
def read_msgf_results(self, have_decoy=False, have_score=0, have_charge=False, filename=''):
with open(filename, 'r') as rf:
_results = {}
CHARGE = {}
_flag = 0
_ = rf.readline()
while True:
line = rf.readline()
if line.strip() == '':
break
l = line.strip().split('\t')
_index = str(int(l[1].split('=')[1]) + 1)
_charge = l[8]
if int(_charge) > 6:
_charge = '6'
_score = l[12]
_evalue = l[14]
_seq = l[9][2:-2]
_M = []
_seq = _seq.replace('+15.995', 'm')
_seq = _seq.replace('+57.021', 'c')
if '+' in _seq or 'U' in _seq or 'X' in _seq:
continue
if 'c' in _seq:
_C = ';Carbamidomethyl@C'
_seq = _seq.replace('c', '')
else:
_C = ';'
while 'm' in _seq:
_m_index = _seq.index('m')
_M.append('Oxidation@M' + str(_m_index))
_seq = _seq.replace('m', '', 1)
if 'Decoy_' in l[10]:
_seq = 'DECOY-' + _seq
if not have_decoy:
continue
_modif = ';'.join(_M) + _C
if _results.get(_index) == None:
if have_score:
_results[_index] = [[_seq + '_' + _modif, _evalue]]
else:
_results[_index] = [_seq + '_' + _modif]
if have_charge:
CHARGE[_index] = _charge
elif _results.get(_index) != None:
if have_score:
for i in _results[_index]:
_s = _seq + '_' + _modif
if i[0] == _s:
_flag = 1
if _flag == 1:
_flag = 0
continue
_results[_index].append([_seq + '_' + _modif, _evalue])
else:
for i in _results[_index]:
_s = _seq + '_' + _modif
if i == _s:
_flag = 1
if _flag == 1:
_flag = 0
continue
_results[_index].append(_seq + '_' + _modif)
print('MSGF+ results number : ' + str(len(_results)))
if have_charge:
return _results, CHARGE
else:
return _results
# ---Basic function---:get correct peptide and spectrum
def read_correct_PSMs(self, filename=''):
with open(filename, 'r') as rf:
mgf_listcontent = []
content = []
while True:
line = rf.readline()
if not line:
break
_line = []
if 'BEGIN IONS' in line:
_line.append(line)
while True:
line = rf.readline()
if 'SQE=' in line:
_seq = line.strip().split('=')[1]
_temp = _seq
if 'Modifications=' in line:
_modeified = line.strip().split('=')[1]
if _modeified == 'NULL':
_modeified = ';'
else:
_a = _modeified.split(',')[0::2]
_b = _modeified.split(',')[1::2]
_modeified = ''
for i in range(len(_a)):
_modeified = _modeified + 'Oxidation@M' + _a[i] + ';'
_temp += '_' + _modeified
mgf_listcontent.append(_temp)
if 'SQE=' in line or 'Modifications=' in line or 'NCE=' in line or 'PIF=' in line or 'Score=' in line:
continue
else:
_line.append(line)
if 'END IONS' in line:
content.append(_line)
_line = []
break
print('correct results number : ' + str(len(mgf_listcontent)))
return mgf_listcontent, content
'''-------------------------------top1 hit rate--------------------------------'''
# Evaluation of comet identification results and generate related files,include Comet top1 missed and unmissed
def get_different_peptide(self):
total_PSMs = 0
count = 0
unmissed_total_PSMs = 0
unmissed_count = 0
with open(self.workpath + 'selected_NCE' + self.nce + '_missed_peptide.txt', 'a+') as mtw, open(
self.workpath + 'selected_NCE' + self.nce + '_missed_PSMs.mgf', 'a+') as mgw, open(
self.workpath + 'selected_NCE' + self.nce + '_unmissed_PSMs.mgf', 'a+') as ugw, open(
self.workpath + 'selected_NCE' + self.nce + '_unmissed_peptide.txt', 'a+') as utw:
comet_results = self.read_comet_results(have_decoy=False,
filename=self.workpath + 'selected_NCE' + self.nce + '_forcomet.txt')
correcte_results, correcte_spectrum = self.read_correct_PSMs(
filename=self.workpath + 'selected_NCE' + self.nce + '.mgf')
for i in range(len(correcte_results)):
correcte_seq = correcte_results[i]
if comet_results.get(str(i + 1)) == None:
print('comet have no peptide index : ' + str(i + 1))
continue
comet_seq = comet_results[str(i + 1)]
c_index = 1000
for index in range(len(comet_seq)):
if comet_seq[index].replace(' ', '') == correcte_seq:
c_index = index
break
if c_index != 0:
mtw.write(str(i) + '\t' + correcte_seq + '\t' + '\t'.join(comet_seq) + '\n')
count += 1
comet_seq.append(correcte_seq)
total_PSMs += len(comet_seq)
for o in comet_seq:
seq = o.split('_')[0]
modif = o.split('_')[1]
_psm = copy.deepcopy(correcte_spectrum[i])
_psm.insert(2, 'Sequence=' + seq + '\n')
_psm.insert(4, 'Modified=' + modif + '\n')
mgw.write(''.join(_psm))
if c_index == 0:
utw.write(str(i) + '\t' + correcte_seq + '\t' + '\t'.join(comet_seq) + '\n')
unmissed_count += 1
unmissed_total_PSMs += len(comet_seq)
for o in comet_seq:
seq = o.split('_')[0]
modif = o.split('_')[1]
_psm = copy.deepcopy(correcte_spectrum[i])
_psm.insert(2, 'Sequence=' + seq + '\n')
_psm.insert(4, 'Modified=' + modif + '\n')
ugw.write(''.join(_psm))
print('missed peptide number : ' + str(count))
print('missed total PSMs : ' + str(total_PSMs))
print('unmissed peptide number : ' + str(unmissed_count))
print('unmissed total PSMs : ' + str(unmissed_total_PSMs))
# Annotate regular ions(b1+,y1+,b2+,y2+) and generate the files can be scored by P-score
def get_byions(self):
m = MATCH(self.workpath, 'selected_NCE' + self.nce + '_missed_PSMs.mgf')
m.write_files()
um = MATCH(self.workpath, 'selected_NCE' + self.nce + '_unmissed_PSMs.mgf')
um.write_files()
# Obtaining Probability Matrix by Model
def get_MatrixP(self):
file_mode = 'missed'
file = 'selected_' + self.nce + '_' + file_mode + '_PSMs_byions.txt'
##Model parameters
BATCH_SIZE = 16
Label_number = 4
features_size = 105
weights4_nce30 = [0.5381, 0.2366, 0.0912, 0.0448, 0.0261, 0.0162, 0.0109, 0.0078, 0.0055, 0.004, 0.0187]
weights4_nce35 = [0.6586, 0.1741, 0.0663, 0.0324, 0.0188, 0.012, 0.0083, 0.006, 0.0044, 0.0033, 0.0158]
# Run Testing
print('start...')
model = ResNet18(BATCH_SIZE, weight=weights4_nce30, feature_size=features_size)
model.load_state_dict(torch.load('./Model/model_2_bestacc.pkl'))
model.eval()
if torch.cuda.is_available():
model.cuda()
Data = data(self.workpath + 'FDR/splited_by_ions', Label_number, run_model='Test', test_file=file)
Test_data, Test_label, Test_length, _, _, _ = Data.GetData(BATCH_SIZE)
print('Test data number: ' + str(len(Test_length) * BATCH_SIZE))
with torch.no_grad():
Results = []
P = []
Matrix_P = []
for T_index, T_data in tqdm(enumerate(Test_data)):
t_data = T_data
t_label = Test_label[T_index]
t_length = Test_length[T_index]
t_input_features = torch.tensor(t_data).cuda()
t_ions_level = torch.tensor(t_label).cuda()
t_batch_length = torch.tensor(t_length).cuda()
y_true, y_pred, results, loss, _p = model(t_input_features.permute(0, 2, 1), t_ions_level,
t_batch_length)
Results.extend(results)
P.extend(_p[0])
Matrix_P.extend(_p[1])
start = 0
R = []
Mae = []
Mae_local = []
Cosine = []
Cosine_0_rate = []
print('[Score Info]start to write results...')
# with open( self.workpath+'/35_random10000/pep_credibility/sorted_by_pccandother/' + file_mode + '_score_pep_P.txt',
# 'a+') as fw:
with open(
self.workpath + 'FDR/selected_' + self.nce + '_score_pep_P_4label_humanmodel.txt',
'a+') as fw:
while start + 2 <= len(Results):
_p = P[int(start / 2)]
_matrix_p = Matrix_P[int(start / 2)]
_R = r2_score(Results[start], Results[start + 1])
R.append(_R)
_mae = sum(abs(np.array(Results[start]) - np.array(Results[start + 1]))) / len(Results[start])
Mae.append(_mae)
local_index = np.where((np.array(Results[start]) + np.array(Results[start + 1])) != 0)
try:
_mae_local = sum(abs(
np.array(Results[start])[local_index] - np.array(Results[start + 1])[local_index])) / len(
local_index[0])
except:
_mae_local = 0.0
Mae_local.append(_mae_local)
_Cosine = cosine_similarity([Results[start], Results[start + 1]])[0, 1]
Cosine.append(_Cosine)
_Cosine_0_rate = _Cosine * (1 - (Results[start].count(0) / len(Results[start])))
Cosine_0_rate.append(_Cosine_0_rate)
_true = ','.join(map(str, Results[start]))
_pred = ','.join(map(str, Results[start + 1]))
fw.write(
_true + '\t' + _pred + '\t' + str(_Cosine) + '\t' + str(_R) + '\t' + str(_mae) + '\t' + str(
_mae_local) + '\t' + str(_Cosine_0_rate) + '\t' + str(_p) + '\t' + str(_matrix_p) + '\n')
start += 2
# Compare Comet and P-score top1 hits rate
def eval_prediction(self):
file_mode = 'unmissed'
all_pepscore = []
all_correct_pep = []
pre_index = []
org_index = []
with open(self.workpath + 'selected_NCE' + self.nce + '_' + file_mode + '_peptide.txt', 'r') as mr, open(
self.workpath + 'sorted_by_pccandother/' + file_mode + '_score_pep_P.txt', 'r') as fr:
score = []
print('start reading score...')
while True:
line = fr.readline() ##True,Pred,Cosine,R,mae,mae_local
if not line.strip():
break
line = line.strip().split('\t')
y_true = list(map(int, line[0].split(',')))
y_pred = list(map(int, line[1].split(',')))
_score = 1.0
matrix_p = line[8].strip()[2:-2].replace(' ', '').split('],[')
for i in range(len(matrix_p)):
_p = list(map(float, matrix_p[i].split(',')))
_score = _score + _p[y_true[i]]
__score = float(_score) * ((len(y_true) - y_true.count(0) + 1) / (len(y_true) + 1))
score.append(__score)
start = 0
while True:
line = mr.readline().strip()
if not line:
break
l = line.split('\t')
correct_pep = l[1]
all_correct_pep.append(correct_pep)
l = l[2:]
##if file mode is missed,append correct peptide at the end
if file_mode == 'missed':
l.append(correct_pep)
_pep_score = {}
for one in l:
_pep_score[one] = score[start]
start += 1
all_pepscore.append(_pep_score)
count_len = [0] * 20
for iii in all_correct_pep:
count_len[math.ceil(len(iii.split('_')[0]) / 5) - 1] += 1
print('peptide length : ' + str(count_len))
total_number = len(pre_index)
print('total : ' + str(total_number))
orginal_diss = []
predict_diss = []
for c in range(10):
on = org_index.count(c)
orate = on / total_number
pn = pre_index.count(c)
prate = pn / total_number
orginal_diss.append(on)
predict_diss.append(pn)
print('orginal rank ' + str(c + 1) + ' : ' + str(on) + ' || rate : ' + str(round(orate, 3)))
print('predict rank ' + str(c + 1) + ' : ' + str(pn) + ' || rate : ' + str(round(prate, 3)))
orginal_diss.append(total_number - sum(orginal_diss))
predict_diss.append(total_number - sum(predict_diss))
print('original : ' + str(orginal_diss))
print('predict : ' + str(predict_diss))
'''--------------------------------FDR ROC plot---------------------------------'''
# generate all PSMs file and Annotate regular ions
def get_all_PSMs_and_byions(self):
comet_results = self.read_comet_results(have_decoy=True,
filename=self.workpath + 'selected_NCE' + self.nce + '_forcomet.txt')
correcte_pep, correcte_mgf = self.read_correct_PSMs(filename=self.workpath + 'selected_NCE' + self.nce + '.mgf')
spectrums_number = 0
index_missed = []
with open(self.workpath + 'FDR/selected_' + self.nce + '_all_PSMs.mgf', 'a+') as mgfw, open(
self.workpath + 'FDR/selected_' + self.nce + '_all_PSMs.txt', 'a+') as txtw:
for _index in tqdm(range(len(correcte_pep))):
_correcte_seq = correcte_pep[_index]
try:
_comet_seqs = comet_results[str(_index + 1)]
except:
index_missed.append(_index + 1)
continue
flag_index = 1000
for i in range(len(_comet_seqs)):
if _comet_seqs[i].replace(' ', '') == _correcte_seq:
flag_index = i
break
if flag_index == 1000:
_comet_seqs.append(_correcte_seq)
spectrums_number += len(_comet_seqs)
txtw.write(str(_index) + '\t' + _correcte_seq + '\t' + '\t'.join(_comet_seqs) + '\n')
for i in range(len(_comet_seqs)):
o = _comet_seqs[i]
if o.startswith('DECOY'):
seq = o.split('_')[0].split('-')[1]
else:
seq = o.split('_')[0]
modif = o.split('_')[1]
_psm = copy.deepcopy(correcte_mgf[_index])
_psm.insert(2, 'Sequence=' + seq + '\n')
_psm.insert(4, 'Modified=' + modif + '\n')
mgfw.write(''.join(_psm))
print('total spectrums number : ' + str(spectrums_number))
print(index_missed)
m = MATCH(self.workpath + 'FDR', 'selected_' + self.nce + '_all_PSMs.mgf')
m.write_files()
# split Annotated files for P-score,Because it takes up too much memory
def split_byions(self, each_number=100000):
with open(self.workpath + 'FDR/selected_' + self.nce + '_all_PSMs_byions.txt', 'r') as r:
count = 0
while True:
line = r.readline()
if not line.strip():
break
pep_length = len(line.split('\t')[0])
_line = []
_line.append(line)
for i in range(pep_length - 2):
line = r.readline()
_line.append(line)
_flag = int(count / each_number) + 1
with open(self.workpath + 'FDR/splited_by_ions/all_psms_spectrums_byions' + str(_flag) + '.txt',
'a+') as w:
w.write(''.join(_line))
_line = []
count += 1
print(count)
# Get FDR ROC plot Data file of P-score
def get_pscore_FDR(self, split_by_charge=False):
split_CHARGE = []
all_charge = []
Length = []
with open(self.workpath + 'FDR/selected_' + self.nce + '_all_PSMs.mgf', 'r') as r:
line = r.readline()
last_title = ''
start = 0
while True:
if not line.strip():
break
if line.startswith('TITLE='):
_title = line.strip().split('=')[1]
if line.startswith('CHARGE='):
_charge = line.strip().split('=')[1]
all_charge.append(int(_charge))
if last_title != _title:
split_CHARGE.append(_charge)
last_title = _title
Length.append(start)
start = 0
start += 1
line = r.readline()
print(len(split_CHARGE))
print(len(all_charge))
print(len(Length))
candidate_peps = []
correcte_peps = []
with open(self.workpath + 'FDR/selected_' + self.nce + '_all_PSMs.txt', 'r') as r:
line = r.readline()
while True:
if not line.strip():
break
_candidate = line.strip().split('\t')[2:]
_correcte = line.strip().split('\t')[1]
candidate_peps.append(_candidate)
correcte_peps.append(_correcte)
line = r.readline()
print(len(candidate_peps))
with open(self.workpath + 'FDR/selected_' + self.nce + '_score_pep_P_4label_humanmodel.txt', 'r') as r:
line = r.readline()
score = []
Y = []
l1 = ''
l2 = ''
while True:
if not line.strip():
print('read score end!')
break
l1 = l2
l2 = line
line = line.strip().split('\t')
try:
y_true = list(map(int, line[0].split(',')))
except:
print(l1)
print(l2)
y_pred = list(map(int, line[1].split(',')))
Y.append([line[0], line[1]])
_score = 1.0
matrix_p = line[8].strip()[2:-2].replace(' ', '').split('],[')
for i in range(len(matrix_p)):
_p = list(map(float, matrix_p[i].split(',')))
_score = _score + _p[y_true[i]]
_score = float(_score) * ((len(y_true) - y_true.count(0) + 1) / (y_true.count(0) + 1))
score.append(_score)
line = r.readline()
print(len(score))
start = 0
top_pep_charge_score = []
threshold_score = []
for i in range(len(candidate_peps)):
_candidate = candidate_peps[i]
_pep_charge_score = []
_charge = split_CHARGE[i]
_correcte = correcte_peps[i]
for one in _candidate:
_score = score[start]
_y = Y[start]
_pep_charge_score.append([one, _charge, _score] + _y + [_correcte])
start += 1
_c = [x for x in _pep_charge_score if x[0] == x[5]]
_pep_charge_score = sorted(_pep_charge_score, key=lambda x: x[2], reverse=True)
top_pep_charge_score.append(_pep_charge_score[0] + _c[0])
t = _pep_charge_score[0][2]
if t not in threshold_score:
threshold_score.append(t)
threshold_score = sorted(threshold_score)
print(threshold_score)
# write top1 ,format: pep charge score y_true y_pred _correcte
with open(
self.workpath + 'FDR/results/Decoy_score_P_4label_humanmodel_allcharge_changescore.txt',
'a+') as w, open(
self.workpath + 'FDR/results/missed_P_4label_humanmodel_allcharge_changescore.txt',
'a+') as mw:
for i in top_pep_charge_score:
_line = '\t'.join(list(map(str, i))) + '\n'
w.write(_line)
if i[0] != i[5]:
__line = '\t'.join(list(map(str, i))) + '\n'
mw.write(__line)
print('write top 1 end !')
if split_by_charge:
## get FDR by splite charge
with open(self.workpath + 'FDR/results/FDR_results_P_4label_humanmodel.txt', 'a+') as w:
for _index in tqdm(range(len(threshold_score))):
t = threshold_score[_index]
target_hits = [0, 0, 0, 0, 0]
threshold_all = list(x for x in top_pep_charge_score if x[2] >= t)
for o in threshold_all:
if o[0] == o[5]:
target_hits[int(o[1]) - 2] += 1
for c in ['2', '3', '4', '5']:
threshold_seq_score = list(x for x in threshold_all if x[1] == c)
False_seq_score = list(x for x in threshold_seq_score if x[0].startswith('DECOY-'))
try:
False_Discover_Rate = len(False_seq_score) / (
len(threshold_seq_score) - len(False_seq_score))
except:
False_Discover_Rate = 0.0
_line = 'Threshold peptide score : ' + str(t) + '\thold number : ' + str(
len(threshold_seq_score)) + '\tFDR : ' + str(
False_Discover_Rate) + '\ttarget hits : ' + str(
target_hits[int(c) - 2]) + '\tcharge : ' + str(c)
w.write(_line + '\n')
else:
## get FDR don't splite charge
with open(
self.workpath + 'FDR/results/FDR_results_P_4label_humanmodel_allcharge_changescore.txt',
'a+') as w:
for _index in tqdm(range(len(threshold_score))):
t = threshold_score[_index]
target_hits = 0
threshold_all = list(x for x in top_pep_charge_score if x[2] >= t)
for o in threshold_all:
if o[0] == o[5]:
target_hits += 1
False_seq_score = list(x for x in threshold_all if x[0].startswith('DECOY-'))
try:
False_Discover_Rate = len(False_seq_score) / (len(threshold_all) - len(False_seq_score))
except:
False_Discover_Rate = 0.0
_line = 'Threshold peptide score : ' + str(t) + '\thold number : ' + str(
len(threshold_all)) + '\tFDR : ' + str(False_Discover_Rate) + '\ttarget hits : ' + str(
target_hits)
w.write(_line + '\n')
# Get FDR ROC plot Data file of Comet
def get_comet_FDR(self, split_by_charge=False):
score_type = 0 ##0 is xcorr,1 is evalue
split_CHARGE = []
all_charge = []
with open(self.workpath + 'selected_NCE' + self.nce + '.mgf', 'r') as r:
line = r.readline()
last_title = ''
while True:
if not line.strip():
break
if line.startswith('TITLE='):
_title = line.strip().split('=')[1]
if line.startswith('CHARGE='):
_charge = line.strip().split('=')[1]
all_charge.append(int(_charge))
if last_title != _title:
split_CHARGE.append(_charge)
last_title = _title
line = r.readline()
print(len(split_CHARGE))
print(len(all_charge))
comet_results = self.read_comet_results(have_decoy=True, have_score=score_type,
filename=self.workpath + 'selected_NCE' + self.nce + '_forcomet.txt')
print(len(comet_results))
correcte_results, correcte_spectrum = self.read_correct_PSMs(
filename=self.workpath + 'selected_NCE' + self.nce + '.mgf')
print(len(correcte_results))
threshold_score = []
for k, v in comet_results.items():
_correcte = correcte_results[int(k) - 1]
_charge = split_CHARGE[int(k) - 1]
_v = v[0]
_v.extend([_charge, _correcte]) ##[pep,xcorr,charge,correcte]
comet_results[k] = _v
t = round(float(v[0][1]), 4)
if t not in threshold_score:
threshold_score.append(t)
if score_type == 0:
threshold_score = sorted(threshold_score, reverse=False) ##Xcorr:False;E-value:True
elif score_type == 1:
threshold_score = sorted(threshold_score, reverse=True)
print(threshold_score)
print(comet_results)
# write top1 ,format: pep xcorr charge correct_pep
with open(
self.workpath + 'FDR/results/comet_Decoy_score_xcorr_allcharge.txt',
'a+') as w, open(
self.workpath + 'FDR/results/comet_xcorr_missed_allcharge.txt',
'a+') as mw:
for key, value in comet_results.items():
_line = '\t'.join(value) + '\n'
w.write(_line)
if value[0] != value[3]:
mw.write(_line)
## get FDR by splite charge
if split_by_charge:
with open(self.workpath + 'FDR/results/comet_FDR_results_xcorr.txt', 'a+') as w:
for t in tqdm(threshold_score):
target_hits = [0, 0, 0, 0, 0] # charge:2,3,4,5
FDR_count = [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]
threshold_seq_score = list(
(key, value) for key, value in comet_results.items() if
float(value[1]) >= t) ##Xcorr:>;E-value:<
False_seq_score = list(
(key, value) for key, value in threshold_seq_score if value[0].startswith('DECOY-'))
_c = list((key, value) for key, value in threshold_seq_score if value[0] == value[3])
for i in range(len(target_hits)):
FDR_count[i][0] += len(
list((key, value) for key, value in False_seq_score if int(value[2]) == (i + 2)))
FDR_count[i][1] += len(
list((key, value) for key, value in threshold_seq_score if int(value[2]) == (i + 2)))
target_hits[i] = len(list((key, value) for key, value in _c if int(value[2]) == (i + 2)))
for i in [2, 3, 4, 5]:
try:
False_Discover_Rate = FDR_count[i - 2][0] / (FDR_count[i - 2][1] - FDR_count[i - 2][0])
except:
False_Discover_Rate = 0.0
_line = 'Threshold peptide score : ' + str(t) + '\thold number : ' + str(
FDR_count[i - 2][1]) + '\tFDR : ' + str(False_Discover_Rate) + '\ttarget hits : ' + str(
target_hits[i - 2]) + '\tcharge : ' + str(i)
# print(_line)
w.write(_line + '\n')
else:
## get FDR don't splite charge
with open(self.workpath + 'FDR/results/comet_FDR_results_xcorr_allcharge.txt', 'a+') as w:
for t in tqdm(threshold_score):
target_hits = 0
FDR_count = [0, 0]
threshold_seq_score = list((key, value) for key, value in comet_results.items() if
float(value[1]) >= t) ##Xcorr:>;E-value:<
False_seq_score = list(
(key, value) for key, value in threshold_seq_score if value[0].startswith('DECOY-'))
_c = list((key, value) for key, value in threshold_seq_score if value[0] == value[3])
FDR_count[0] += len(list((key, value) for key, value in False_seq_score))
FDR_count[1] += len(list((key, value) for key, value in threshold_seq_score))
target_hits = len(list((key, value) for key, value in _c))
try:
False_Discover_Rate = FDR_count[0] / (FDR_count[1] - FDR_count[0])
except:
False_Discover_Rate = 0.0
_line = 'Threshold peptide score : ' + str(t) + '\thold number : ' + str(
FDR_count[1]) + '\tFDR : ' + str(False_Discover_Rate) + '\ttarget hits : ' + str(target_hits)
w.write(_line + '\n')
if __name__ == '__main__':
proteometools = ProteomeTools(workpath='E:/data/1/get_ions/ProteomeTools2/selected_mgf2', nce='30')
proteometools.find_unkonwn_aa()
##top1 hits rate
proteometools.get_different_peptide()
proteometools.get_byions()
proteometools.get_MatrixP()
proteometools.eval_prediction()
##FDR ROC plot
proteometools.get_all_PSMs_and_byions()
proteometools.split_byions()
proteometools.get_pscore_FDR()
proteometools.get_comet_FDR()
| 47.123253 | 129 | 0.463328 |
66ac0d8ab76890c54c147b8f460a78a4317714d9 | 61 | py | Python | tests/__init__.py | mbhall88/npSimulate | 03d9e184428ef36fae19a91b339cff81fcf73d73 | [
"MIT"
] | 14 | 2018-04-08T17:24:38.000Z | 2021-06-19T08:05:37.000Z | tests/__init__.py | mbhall88/npSimulate | 03d9e184428ef36fae19a91b339cff81fcf73d73 | [
"MIT"
] | 2 | 2017-08-25T05:18:15.000Z | 2017-09-18T03:45:55.000Z | tests/__init__.py | mbhall88/taeper | 03d9e184428ef36fae19a91b339cff81fcf73d73 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Unit test package for taeper."""
| 15.25 | 35 | 0.557377 |
a9331ce1dffc4e92f7c53258d0a563baa7ef8d26 | 3,134 | py | Python | bot/utils/functions.py | rqinflow/cloudy-bot | 2f659f6258556f11a91c934c1aeecab35fed945d | [
"MIT"
] | null | null | null | bot/utils/functions.py | rqinflow/cloudy-bot | 2f659f6258556f11a91c934c1aeecab35fed945d | [
"MIT"
] | null | null | null | bot/utils/functions.py | rqinflow/cloudy-bot | 2f659f6258556f11a91c934c1aeecab35fed945d | [
"MIT"
] | null | null | null | import discord
import datetime
from firebase_admin import db
from discord.ext import commands
async def embedAttributes(embed_info, avatar):
content = embed_info.split(" && ")
title = content[0]
description = content[1]
color = content[2]
embed = discord.Embed(title=title, description=description, color=int(color, 16))
my_channel = None
if len(content) >= 3:
new_content = content[3:]
for number, item in enumerate(new_content):
if "FOOTER: " in new_content[number]:
footer = new_content[number].replace("FOOTER: ", "")
if "IMG: " in footer:
newfooter = footer.split("IMG: ")
footer_text = newfooter[0].replace("FOOTER: ", "")
if newfooter[1] == "AVATAR":
embed.set_footer(text=footer_text, icon_url=avatar)
else:
embed.set_footer(text=footer_text, icon_url=newfooter[1])
else:
embed.set_footer(text=footer)
elif "AUTHOR: " in new_content[number]:
author = new_content[number].replace("AUTHOR: ", "")
if "IMG: " in author:
newauthor = author.split("IMG: ")
author_text = newauthor[0].replace("IMG: ", "")
if newauthor[1] == "AVATAR":
embed.set_author(name=author_text, icon_url=avatar)
else:
embed.set_author(name=author_text, icon_url=newauthor[1])
else:
embed.set_author(name=author)
elif "THUMBNAIL" in new_content[number]:
thumbnail = new_content[number].replace("THUMBNAIL: ", "")
if thumbnail == "AVATAR":
embed.set_thumbnail(url=avatar)
else:
embed.set_thumbnail(url=thumbnail)
elif "IMAGE" in new_content[number]:
image = new_content[number].replace("IMAGE: ", "")
if image == "AVATAR":
embed.set_image(url=avatar)
else:
embed.set_image(url=image)
elif "CHANNEL" in new_content[number]:
my_channel = new_content[number].replace("CHANNEL: ", "")
elif "TIMESTAMP" in new_content[number]:
embed.timestamp = datetime.datetime.utcnow()
return embed, my_channel
async def gif_embed(gifdata, query=None):
if query == None:
embed = discord.Embed(title="random gif", color=0x303136)
embed.set_image(url=gifdata["images"]["original"]["url"])
embed.timestamp = datetime.datetime.utcnow()
embed.set_footer(text=gifdata["title"].lower())
else:
embed = discord.Embed(title=f"{query.lower()} gif", color=0x303136)
embed.set_image(url=gifdata["images"]["original"]["url"])
embed.timestamp = datetime.datetime.utcnow()
embed.set_footer(text=gifdata["title"].lower())
return embed | 46.776119 | 85 | 0.541481 |
047b509e39312ad29f2eaef45da035e84b695157 | 10,822 | py | Python | tests/test_external_list.py | arielmorelli/server_core | b34e3b334c5255bd60df0dc68ed16473e5b43ad7 | [
"Apache-2.0"
] | null | null | null | tests/test_external_list.py | arielmorelli/server_core | b34e3b334c5255bd60df0dc68ed16473e5b43ad7 | [
"Apache-2.0"
] | null | null | null | tests/test_external_list.py | arielmorelli/server_core | b34e3b334c5255bd60df0dc68ed16473e5b43ad7 | [
"Apache-2.0"
] | null | null | null | # encoding: utf-8
import datetime
from nose.tools import (
assert_raises,
assert_raises_regexp,
eq_,
set_trace,
)
from . import (
DatabaseTest,
DummyMetadataClient,
)
from ..model import (
DataSource,
Edition,
Identifier,
Subject,
)
from ..external_list import (
CustomListFromCSV,
MembershipManager,
ClassificationBasedMembershipManager,
)
class TestCustomListFromCSV(DatabaseTest):
def setup(self):
super(TestCustomListFromCSV, self).setup()
self.data_source = DataSource.lookup(self._db, DataSource.LIBRARY_STAFF)
self.metadata = DummyMetadataClient()
self.metadata.lookups['Octavia Butler'] = 'Butler, Octavia'
self.l = CustomListFromCSV(self.data_source.name, "Test list",
metadata_client = self.metadata,
display_author_field='author',
identifier_fields={Identifier.ISBN: "isbn"})
self.custom_list, ignore = self._customlist(
data_source_name=self.data_source.name, num_entries=0)
self.now = datetime.datetime.utcnow()
DATE_FORMAT = "%Y/%m/%d %H:%M:%S"
def create_row(self, display_author=None, sort_author=None):
"""Create a dummy row for this tests's custom list."""
l = self.l
row = dict()
for scalarkey in (l.title_field, l.annotation_field,
l.annotation_author_name_field,
l.annotation_author_affiliation_field):
row[scalarkey] = self._str
display_author = display_author or self._str
fn = l.sort_author_field
if isinstance(fn, list):
fn = fn[0]
row[fn] = sort_author
row['isbn'] = self._isbn
for key in l.subject_fields.keys():
row[key] = ", ".join([self._str, self._str])
for timekey in (l.first_appearance_field,
l.published_field):
if isinstance(timekey, list):
timekey = timekey[0]
row[timekey] = self._time.strftime(self.DATE_FORMAT)
row[self.l.display_author_field] = display_author
return row
def test_annotation_citation(self):
m = self.l.annotation_citation
row = dict()
eq_(None, m(row))
row[self.l.annotation_author_name_field] = "Alice"
eq_(u" —Alice", m(row))
row[self.l.annotation_author_affiliation_field] = "2nd Street Branch"
eq_(u" —Alice, 2nd Street Branch", m(row))
del row[self.l.annotation_author_name_field]
eq_(None, m(row))
def test_row_to_metadata_complete_success(self):
row = self.create_row()
metadata = self.l.row_to_metadata(row)
eq_(row[self.l.title_field], metadata.title)
eq_(row['author'], metadata.contributors[0].display_name)
eq_(row['isbn'], metadata.identifiers[0].identifier)
expect_pub = datetime.datetime.strptime(
row['published'], self.DATE_FORMAT)
eq_(expect_pub, metadata.published)
eq_(self.l.default_language, metadata.language)
def test_metadata_to_list_entry_complete_success(self):
row = self.create_row(display_author="Octavia Butler")
metadata = self.l.row_to_metadata(row)
list_entry = self.l.metadata_to_list_entry(
self.custom_list, self.data_source, self.now, metadata)
e = list_entry.edition
eq_(row[self.l.title_field], e.title)
eq_("Octavia Butler", e.author)
eq_("Butler, Octavia", e.sort_author)
i = e.primary_identifier
eq_(Identifier.ISBN, i.type)
eq_(row['isbn'], i.identifier)
# There should be one description.
expect = row[self.l.annotation_field] + self.l.annotation_citation(row)
eq_(expect, list_entry.annotation)
classifications = i.classifications
# There should be six classifications, two of type 'tag', two
# of type 'schema:audience', and two of type
# 'schema:typicalAgeRange'
eq_(6, len(classifications))
tags = [x for x in classifications if x.subject.type==Subject.TAG]
eq_(2, len(tags))
audiences = [x for x in classifications
if x.subject.type==Subject.FREEFORM_AUDIENCE]
eq_(2, len(audiences))
age_ranges = [x for x in classifications
if x.subject.type==Subject.AGE_RANGE]
eq_(2, len(age_ranges))
expect_first = datetime.datetime.strptime(
row[self.l.first_appearance_field], self.DATE_FORMAT)
eq_(expect_first, list_entry.first_appearance)
eq_(self.now, list_entry.most_recent_appearance)
def test_row_to_item_matching_work_found(self):
row = self.create_row(display_author="Octavia Butler")
work = self._work(title=row[self.l.title_field],
authors=['Butler, Octavia'])
self._db.commit()
metadata = self.l.row_to_metadata(row)
list_entry = self.l.metadata_to_list_entry(
self.custom_list, self.data_source, self.now, metadata)
e = list_entry.edition
eq_(row[self.l.title_field], e.title)
eq_("Octavia Butler", e.author)
eq_("Butler, Octavia", e.sort_author)
def test_non_default_language(self):
row = self.create_row()
row[self.l.language_field] = 'Spanish'
metadata = self.l.row_to_metadata(row)
list_entry = self.l.metadata_to_list_entry(
self.custom_list, self.data_source, self.now, metadata)
eq_('spa', list_entry.edition.language)
def test_non_default_language(self):
row = self.create_row()
row[self.l.language_field] = 'Spanish'
metadata = self.l.row_to_metadata(row)
list_entry = self.l.metadata_to_list_entry(
self.custom_list, self.data_source, self.now, metadata)
eq_('spa', list_entry.edition.language)
def test_overwrite_old_data(self):
self.l.overwrite_old_data = True
row1 = self.create_row()
row2 = self.create_row()
row3 = self.create_row()
for f in self.l.title_field, self.l.sort_author_field, self.l.display_author_field, 'isbn':
row2[f] = row1[f]
row3[f] = row1[f]
metadata = self.l.row_to_metadata(row1)
list_entry_1 = self.l.metadata_to_list_entry(
self.custom_list, self.data_source, self.now, metadata)
# Import from the second row, and (e.g.) the new annotation
# will overwrite the old annotation.
metadata2 = self.l.row_to_metadata(row2)
list_entry_2 = self.l.metadata_to_list_entry(
self.custom_list, self.data_source, self.now, metadata2)
eq_(list_entry_1, list_entry_2)
eq_(list_entry_1.annotation, list_entry_2.annotation)
# There are still six classifications.
i = list_entry_1.edition.primary_identifier
eq_(6, len(i.classifications))
# Now import from the third row, but with
# overwrite_old_data set to False.
self.l.overwrite_old_data = False
metadata3 = self.l.row_to_metadata(row3)
list_entry_3 = self.l.metadata_to_list_entry(
self.custom_list, self.data_source, self.now, metadata3)
eq_(list_entry_3, list_entry_1)
# Now there are 12 classifications.
eq_(12, len(i.classifications))
class BooksInSeries(MembershipManager):
"""A sample implementation of MembershipManager that makes a CustomList
out of all books that are in some series.
"""
@property
def new_membership(self):
"""Only books that are part of a series should be in this list."""
return self._db.query(Edition).filter(Edition.series != None)
class TestMembershipManager(DatabaseTest):
def test_update(self):
# Create two books that are part of series, and one book that
# is not.
series1 = self._edition()
series1.series = "Series 1"
series2 = self._edition()
series2.series = "Series Two"
no_series = self._edition()
eq_(None, no_series.series)
update_time = datetime.datetime(2015, 1, 1)
# To create necessary mocked objects,
# _customlist calls _work
# which calls _edition, which makes an edition and a pool (through _licensepool)
# then makes work through get_one_or_create
custom_list, ignore = self._customlist()
manager = BooksInSeries(custom_list)
manager.update(update_time)
[entry1] = [x for x in custom_list.entries if x.edition.series == "Series 1"]
[entry2] = [x for x in custom_list.entries if x.edition.series == "Series Two"]
eq_(update_time, entry1.first_appearance)
eq_(update_time, entry1.most_recent_appearance)
# In a shocking twist, one of the entries turns out not to
# have a series, while the entry previously thought not to
# have a series actually does.
series2.series = None
no_series.series = "Actually I do have a series."
self._db.commit()
new_update_time = datetime.datetime(2016, 1,1)
manager.update(new_update_time)
# Entry #2 has been removed from the list, and a new entry added.
[old_entry] = [x for x in custom_list.entries if x.edition.series == "Series 1"]
[new_entry] = [x for x in custom_list.entries if x.edition.series == "Actually I do have a series."]
eq_(update_time, old_entry.first_appearance)
eq_(new_update_time, old_entry.most_recent_appearance)
eq_(new_update_time, new_entry.first_appearance)
eq_(new_update_time, new_entry.most_recent_appearance)
def test_classification_based_membership_manager(self):
e1 = self._edition()
e2 = self._edition()
e3 = self._edition()
source = e1.data_source
e1.primary_identifier.classify(source, Subject.TAG, "GOOD FOOD")
e2.primary_identifier.classify(source, Subject.TAG, "barflies")
e3.primary_identifier.classify(source, Subject.TAG, "irrelevant")
custom_list, ignore = self._customlist()
fragments = ["foo", "bar"]
manager = ClassificationBasedMembershipManager(custom_list, fragments)
members = list(manager.new_membership)
eq_(2, len(members))
# e1 is a member of the list because its primary identifier is
# classified under a subject that matches %foo%.
#
# e2 is a member of the list because its primary identifier is
# classified under a subject that matches %bar%.
#
# e3 is not a member of the list.
assert e1 in members
assert e2 in members
| 36.684746 | 108 | 0.641009 |
61a884e9da6ed51c71d5a499675212b55a862ea6 | 4,342 | py | Python | tests/test_unique_entries.py | uk-gov-mirror/ONSdigital.companies-house-big-data-project | be74293b4398976696d07c6b2329d6121c9e5c6a | [
"MIT"
] | null | null | null | tests/test_unique_entries.py | uk-gov-mirror/ONSdigital.companies-house-big-data-project | be74293b4398976696d07c6b2329d6121c9e5c6a | [
"MIT"
] | null | null | null | tests/test_unique_entries.py | uk-gov-mirror/ONSdigital.companies-house-big-data-project | be74293b4398976696d07c6b2329d6121c9e5c6a | [
"MIT"
] | null | null | null | import unittest
import pandas as pd
from pandas.testing import assert_frame_equal
# Custom import
from src.data_processing.xbrl_pd_methods import XbrlSubsets
class TestUniqueEntries(unittest.TestCase):
"""
"""
def test_unique_entries_pos(self):
"""
Positive test case for the unique_entries function.
"""
# Dataframe that we create.
df1 = pd.DataFrame([[1, 6, 2, 3, 19],
[4, 5, 8, 6, 30],
[4, 5, 12, 8, 22],
[4, 7, 9, 5, 21],
[7, 8, 9, 12, 5]],
columns=['A', 'B', 'C', 'D', 'E'])
# The dataframe the function should return (tp_unique_entries1)
df2 = pd.DataFrame([[1, 6, 2, 3, 19],
[4, 5, 8, 6, 30],
[7, 8, 9, 12, 5]],
columns=['A', 'B', 'C', 'D', 'E'])
# The list the function should return (tp_unique_entries2)
list1 = [1, 4, 7]
# Assume
subsets = XbrlSubsets()
# Assume 1
tp_unique_entries1 = subsets.unique_entries(df1, 'A', False)
# Assume 2
tp_unique_entries2 = subsets.unique_entries(df1, 'A', True)
# Assert 1
assert_frame_equal(tp_unique_entries1.reset_index(drop=True),
df2.reset_index(drop=True))
# Assert 2
self.assertListEqual(tp_unique_entries2, list1)
def test_unique_entries_neg(self):
"""
Negative test case for the unique_entries function.
"""
# Dataframe that we create.
df1 = pd.DataFrame([[1, 6, 2, 3, 19],
[4, 5, 8, 6, 30],
[4, 5, 12, 8, 22],
[4, 7, 9, 5, 21],
[7, 8, 9, 12, 5]],
columns=['A', 'B', 'C', 'D', 'E'])
# Dataframe that is NOT the same as the one the function should return.
df2 = pd.DataFrame([[1, 6, 2, 3, 19],
[4, 5, 12, 8, 22],
[7, 8, 9, 12, 5]],
columns=['A', 'B', 'C', 'D', 'E'])
# List that is NOT the same as the one the function should return.
list1 = [1, 4, 4, 4, 7]
# Assume
subsets = XbrlSubsets()
# Assume 1
tn_unique_entries1 = subsets.unique_entries(df1, 'A', False)
# Assume 2
tn_unique_entries2 = subsets.unique_entries(df1, 'A', True)
# Assert 1
self.assertNotEqual(tn_unique_entries1.reset_index(drop=True).equals(df2.reset_index(drop=True)), True)
# Assert 2
self.assertNotEqual(tn_unique_entries2 == list1, True)
def test_types(self):
"""
Positive test case for the unique_entries function.
"""
# Assume
df1 = pd.DataFrame([[1, 6, 2, 3, 19],
[4, 5, 8, 6, 30],
[4, 5, 12, 8, 22],
[4, 7, 9, 5, 21],
[7, 8, 9, 12, 5]],
columns=['A', 'B', 'C', 'D', 'E'])
# Assume
subsets = XbrlSubsets()
# Assert
with self.assertRaises(TypeError):
subsets.unique_entries(1.0, 'A', False)
with self.assertRaises(TypeError):
subsets.unique_entries(df1, None, False)
with self.assertRaises(TypeError):
subsets.unique_entries(df1, ['A', 'B'], True)
with self.assertRaises(TypeError):
subsets.unique_entries(df1, 'A', 'False')
def test_values(self):
"""
Positive test case for the unique_entries function.
"""
# Assume
df1 = pd.DataFrame([[1, 6, 2, 3, 19],
[4, 5, 8, 6, 30],
[4, 5, 12, 8, 22],
[4, 7, 9, 5, 21],
[7, 8, 9, 12, 5]],
columns=['A', 'B', 'C', 'D', 'E'])
# Assume
subsets = XbrlSubsets()
# Assert
with self.assertRaises(ValueError):
subsets.unique_entries(df1, 'I', False)
with self.assertRaises(ValueError):
subsets.unique_entries(df1, 'A,B', True)
| 32.893939 | 111 | 0.461999 |
10027b0aecff7c4351eadeeded63d255041d31d9 | 2,730 | py | Python | src/test/python/apache/aurora/client/hooks/test_hooked_api.py | jeremyvdw/aurora | fa9d83a7ef3a96c522884089a471bbb0bef74c48 | [
"Apache-2.0"
] | 479 | 2015-03-27T22:59:49.000Z | 2022-03-09T08:40:49.000Z | src/test/python/apache/aurora/client/hooks/test_hooked_api.py | jeremyvdw/aurora | fa9d83a7ef3a96c522884089a471bbb0bef74c48 | [
"Apache-2.0"
] | 69 | 2015-05-26T20:06:29.000Z | 2020-01-13T19:18:59.000Z | src/test/python/apache/aurora/client/hooks/test_hooked_api.py | jeremyvdw/aurora | fa9d83a7ef3a96c522884089a471bbb0bef74c48 | [
"Apache-2.0"
] | 226 | 2015-03-27T20:02:59.000Z | 2022-03-09T08:40:53.000Z | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from inspect import getargspec
from mock import Mock, create_autospec
from apache.aurora.client.api import AuroraClientAPI
from apache.aurora.client.hooks.hooked_api import HookedAuroraClientAPI, NonHookedAuroraClientAPI
from apache.aurora.common.cluster import Cluster
API_METHODS = ('add_instances', 'create_job', 'kill_job', 'restart',
'start_cronjob', 'start_job_update')
API_METHODS_WITH_CONFIG_PARAM_ADDED = ('kill_job', 'restart', 'start_cronjob')
def pytest_generate_tests(metafunc):
if 'method_name' in metafunc.funcargnames:
metafunc.parametrize('method_name', API_METHODS)
def test_api_methods_exist(method_name):
api = Mock(spec=AuroraClientAPI)
method = getattr(api, method_name)
method() # is callable
method.assert_called_once_with()
def test_api_methods_params(method_name):
cluster = create_autospec(spec=Cluster, instance=True)
# cant use mock here; need to inspect methods
api = HookedAuroraClientAPI(cluster=cluster, user_agent="test-client")
hooked_method = getattr(api, method_name)
nonhooked_method = getattr(super(HookedAuroraClientAPI, api), method_name)
api_method = getattr(super(NonHookedAuroraClientAPI, api), method_name)
if method_name in API_METHODS_WITH_CONFIG_PARAM_ADDED:
assert api_method != nonhooked_method
assert nonhooked_method != hooked_method
api_argspec = getargspec(api_method)
hooked_argspec = getargspec(hooked_method)
nonhooked_argspec = getargspec(nonhooked_method)
if method_name in API_METHODS_WITH_CONFIG_PARAM_ADDED:
assert api_argspec.varargs == nonhooked_argspec.varargs
assert api_argspec.keywords == nonhooked_argspec.keywords
assert len(api_argspec.args) + 1 == len(nonhooked_argspec.args)
assert 'config' in nonhooked_argspec.args
if api_argspec.defaults is None:
assert len(nonhooked_argspec.defaults) == 1
assert nonhooked_argspec.defaults[0] is None
else:
assert len(api_argspec.defaults) + 1 == len(nonhooked_argspec.defaults)
assert nonhooked_argspec.defaults[len(api_argspec.defaults)] is None
else:
assert nonhooked_argspec == hooked_argspec
assert nonhooked_argspec == nonhooked_argspec
| 38.450704 | 97 | 0.779487 |
44a876c62b721e016efc9e184218bf9d20f0db24 | 8,205 | py | Python | darts_search_space/imagenet/rlnas/train_supernet/train.py | megvii-model/RLNAS | a7e2ef9debcd06a93b075181a027b806b737b106 | [
"MIT"
] | 17 | 2021-05-17T04:54:17.000Z | 2022-01-23T09:59:02.000Z | darts_search_space/imagenet/rlnas/train_supernet/train.py | megvii-model/RLNAS | a7e2ef9debcd06a93b075181a027b806b737b106 | [
"MIT"
] | 2 | 2021-07-09T05:14:29.000Z | 2022-02-05T10:15:31.000Z | darts_search_space/imagenet/rlnas/train_supernet/train.py | megvii-model/RLNAS | a7e2ef9debcd06a93b075181a027b806b737b106 | [
"MIT"
] | 8 | 2021-05-28T00:04:20.000Z | 2021-10-18T02:41:34.000Z | import os
import sys
import time
import glob
import numpy as np
import torch
from utils import *
import argparse
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from config import config
import shutil
import functools
print=functools.partial(print,flush=True)
from super_model import NetworkImageNet
import logging
import utils
import torchvision.transforms as transforms
import torchvision.datasets as datasets
parser = argparse.ArgumentParser("Prtorch RLNAS ImageNet")
parser.add_argument('--local_rank', type=int, default=None, help='local rank for distributed training')
parser.add_argument('--batch_size', type=int, default=512, help='batch size')
parser.add_argument('--learning_rate', type=float, default=0.25, help='init learning rate')
parser.add_argument('--min_lr', type=float, default=5e-4, help='min learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=4e-5, help='weight decay')
parser.add_argument('--report_freq', type=float, default=50, help='report frequency')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--epochs', type=int, default=50, help='num of training epochs')
parser.add_argument('--classes', type=int, default=1000, help='number of classes')
parser.add_argument('--seed', type=int, default=5, help='random seed')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping')
parser.add_argument('--label_smooth', type=float, default=0.1, help='label smoothing')
parser.add_argument('--init_channels', type=int, default=48, help='num of init channels')
parser.add_argument('--save', type=str, default='models', help='experiment name')
parser.add_argument('--data', metavar='DIR', default='./data/', help='path to dataset')
parser.add_argument('--workers', type=int, default=32, help='number of workers to load dataset')
args = parser.parse_args()
if args.local_rank == 0 and not os.path.exists(args.save):
utils.create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))
time.sleep(1)
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
IMAGENET_TRAINING_SET_SIZE = 1281167
train_iters = IMAGENET_TRAINING_SET_SIZE // args.batch_size
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
num_gpus = torch.cuda.device_count()
np.random.seed(args.seed)
args.gpu = args.local_rank % num_gpus
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
cudnn.deterministic = True
torch.manual_seed(args.seed)
cudnn.enabled=True
torch.cuda.manual_seed(args.seed)
group_name = 'darts_imagenet_supernet_training'
logging.info('gpu device = %d' % args.gpu)
logging.info("args = %s", args)
torch.distributed.init_process_group(backend='nccl', init_method='env://', group_name = group_name)
args.world_size = torch.distributed.get_world_size()
args.distributed = args.world_size > 1
args.batch_size = args.batch_size // args.world_size
criterion_smooth = utils.CrossEntropyLabelSmooth(args.classes, args.label_smooth).cuda()
total_iters = args.epochs * train_iters
# Prepare data
traindir = os.path.join(args.data, 'train')
train_transform = utils.get_train_transform()
train_dataset = utils.ImageNetWithRandomLabels(
root=traindir,
transform=train_transform
)
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers//args.world_size, pin_memory=True, sampler=train_sampler)
operations = []
for _ in range(config.edges):
operations.append(list(range(config.op_num)))
logging.info('operations={}'.format(operations))
# Prepare model
model, seed = NetworkImageNet(), args.seed
model = model.cuda(args.gpu)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True)
logging.info('arch = {}'.format(model.module.architecture()))
optimizer, scheduler = utils.get_optimizer_schedule(model, args, total_iters)
start_epoch = 0
checkpoint_tar = os.path.join(args.save, 'checkpoint.pth.tar')
if os.path.exists(checkpoint_tar):
checkpoint = torch.load(checkpoint_tar, map_location={'cuda:0':'cuda:{}'.format(args.local_rank)})
start_epoch = checkpoint['epoch'] + 1
seed = checkpoint['seed']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
now = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
logging.info('{} load checkpoint..., epoch = {}, operations={}'.format(now, start_epoch, operations))
# Reset the scheduler
for _ in range(start_epoch):
for _ in train_iters:
if scheduler.get_lr()[0] > args.min_lr:
scheduler.step()
# Save the base weights for computing angle
if args.local_rank == 0:
utils.save_checkpoint({'epoch':-1,
'state_dict': model.state_dict(),
'seed': seed
}, args.save)
for epoch in range(start_epoch, args.epochs):
# Supernet training
seed = train(train_loader, optimizer, scheduler, model, criterion_smooth, operations, epoch, train_iters, seed, args)
if args.local_rank==0 and (epoch+1)%5==0:
utils.save_checkpoint( { 'epoch':epoch,
'state_dict': model.state_dict(),
'seed':seed}, args.save)
def train(train_loader, optimizer, scheduler, model, criterion, operations, epoch, train_iters, seed, args):
objs, top1 = utils.AvgrageMeter(), utils.AvgrageMeter()
model.train()
for step, (image, target)in enumerate(train_loader):
t0 = time.time()
n = image.size(0)
image = image.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
datatime = time.time() - t0
# Uniform Sampling
normal_cell, seed = get_random_cand(seed, operations)
redcution_cell, seed = get_random_cand(seed, operations)
# Make sure each node has only two Predecessor nodes
normal_cell = utils.check_cand(normal_cell, operations)
redcution_cell = utils.check_cand(redcution_cell, operations)
logits = model(image, normal_cell, redcution_cell)
optimizer.zero_grad()
loss = criterion(logits, target)
loss.backward()
nn.utils.clip_grad_value_(model.parameters(), args.grad_clip)
optimizer.step()
prec1, _ = utils.accuracy(logits, target, topk=(1, 5))
objs.update(loss.data.item(), n)
top1.update(prec1.data.item(), n)
if step % args.report_freq == 0 and args.local_rank == 0:
now = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
logging.info('{} |=> Epoch={}, train: {} / {}, loss={:.2f}, acc={:.2f}, lr={}, datatime={:.2f}, seed={}' \
.format(now, epoch, step, train_iters, objs.avg, top1.avg, scheduler.get_lr()[0], float(datatime),
seed))
if scheduler.get_last_lr()[0] > args.min_lr:
scheduler.step()
return seed
def get_random_cand(seed, operations):
# Uniform Sampling
cell = []
for op in operations:
np.random.seed(seed)
k = np.random.randint(len(op))
select_op = op[k]
cell.append(select_op)
seed += 1
return cell, seed
if __name__ == '__main__':
main() | 41.231156 | 150 | 0.67532 |
9bc4e08dbfca0e7ad238329f7e217ecf9e0e9614 | 5,634 | py | Python | alien_invasion.py | juntaow0/alien_invasion_pygame | fe81afc4865a1bd931f067479af112d46e21db38 | [
"MIT"
] | null | null | null | alien_invasion.py | juntaow0/alien_invasion_pygame | fe81afc4865a1bd931f067479af112d46e21db38 | [
"MIT"
] | null | null | null | alien_invasion.py | juntaow0/alien_invasion_pygame | fe81afc4865a1bd931f067479af112d46e21db38 | [
"MIT"
] | null | null | null | import sys
import pygame
from settings import Settings
from ship import Ship
from bullet import Bullet
from alien import Alien
class AlienInvasion:
"""Overall class to manage game assets and behavior"""
def __init__(self) -> None:
"""initialize the game, and create game resources"""
pygame.init()
self.settings = Settings()
#self.screen = pygame.display.set_mode((0,0),pygame.FULLSCREEN)
#self.settings.screen_width = self.screen.get_rect().width
#self.settings.screen_height = self.screen.get_rect().height
self.screen = pygame.display.set_mode((self.settings.screen_width,self.settings.screen_height))
pygame.display.set_caption("Alien Invasion")
self.ship = Ship(self)
self.bullets = pygame.sprite.Group()
self.aliens = pygame.sprite.Group()
self._create_fleet()
def run_game(self):
"""Start the main loop for the game"""
while True:
self._check_events()
self.ship.update()
self._update_bullets()
self._update_aliens()
self._update_screen()
# make the most recently drawn screen visible
pygame.display.flip()
def _check_events(self):
# watch for keyboard and mouse events
for event in pygame.event.get():
if event.type==pygame.QUIT:
sys.exit()
elif event.type==pygame.KEYDOWN:
self._check_keydown_events(event)
elif event.type==pygame.KEYUP:
self._check_keyup_events(event)
def _check_keydown_events(self,event):
"""respond to key press events"""
if event.key == pygame.K_RIGHT:
# move ship to the right
self.ship.moving_right = True
elif event.key == pygame.K_LEFT:
self.ship.moving_left = True
elif event.key==pygame.K_q:
sys.exit()
elif event.key==pygame.K_SPACE:
self._fire_bullet()
def _check_keyup_events(self,event):
"""respond to key up events"""
if event.key==pygame.K_RIGHT:
self.ship.moving_right = False
elif event.key==pygame.K_LEFT:
self.ship.moving_left = False
def _fire_bullet(self):
"""create a new bullet and add it to the bullets group"""
if len(self.bullets) < self.settings.bullets_allowed:
new_bullet = Bullet(self)
self.bullets.add(new_bullet)
def _update_bullets(self):
"""update bullet position and remove old bullets"""
# update bullet positions
self.bullets.update()
# get rid of bullets that have disappeared
for bullet in self.bullets.copy():
if bullet.rect.bottom <=0:
self.bullets.remove(bullet)
# check for any bullet that have hit aliens and remove it
self._check_bullet_alien_collisions()
def _check_bullet_alien_collisions(self):
"""respond to bullet-alien collisions"""
# remove any bullet and alien that have collided
collisions = pygame.sprite.groupcollide(self.bullets, self.aliens, True, True)
if not self.aliens:
# destroy existing bullets and create new fleet
self.bullets.empty()
self._create_fleet()
def _create_alien(self, alien_number, row_number):
"""create am alien and place it in the row"""
alien = Alien(self)
alien_width, alien_height = alien.rect.size
alien.x = alien_width + 2*alien_width*alien_number
alien.rect.x = alien.x
alien.rect.y = alien_height + 2*alien_height*row_number
self.aliens.add(alien)
def _create_fleet(self):
"""create the fleet of aliens"""
# create an alien and find the number of aliens in a row
# space = one alien width
alien = Alien(self)
alien_width, alien_height = alien.rect.size
available_space_x = self.settings.screen_width - (2*alien_width)
num_aliens_x = available_space_x//(2*alien_width)
#determine the number of rows of aliens that fit on the screen
ship_height = self.ship.rect.height
available_space_y = (self.settings.screen_height-3*alien_height-ship_height)
num_rows = available_space_y//(2*alien_height)
# create the fleet of aliens
for row_number in range(num_rows):
for alien_number in range(num_aliens_x):
self._create_alien(alien_number, row_number)
def _check_fleet_edges(self):
"""respond appropriately if any alien have reached an edge"""
for alien in self.aliens.sprites():
if alien.check_edges():
self._change_fleet_direction()
break
def _change_fleet_direction(self):
"""drop the entire fleet and change the fleet's direction"""
for alien in self.aliens.sprites():
alien.rect.y += self.settings.fleet_drop_speed
self.settings.fleet_direction*=-1
def _update_aliens(self):
"""update the positions of all aliens in the fleet"""
self._check_fleet_edges()
self.aliens.update()
def _update_screen(self):
# Redraw screen
self.screen.fill(self.settings.bg_color)
self.ship.blitme()
for bullet in self.bullets.sprites():
bullet.draw_bullet()
self.aliens.draw(self.screen)
if __name__ == '__main__':
# make game instance and run game
ai = AlienInvasion()
ai.run_game() | 36.823529 | 103 | 0.621583 |
9ba0d05b44a20f3b492078a3b963f73de6e16227 | 62,929 | py | Python | ietf/utils/draft.py | omunroe-com/ietfdb2 | aeaae292fbd55aca1b6043227ec105e67d73367f | [
"BSD-3-Clause"
] | 2 | 2021-11-20T03:40:56.000Z | 2021-11-20T03:40:59.000Z | ietf/utils/draft.py | omunroe-com/ietfdb2 | aeaae292fbd55aca1b6043227ec105e67d73367f | [
"BSD-3-Clause"
] | null | null | null | ietf/utils/draft.py | omunroe-com/ietfdb2 | aeaae292fbd55aca1b6043227ec105e67d73367f | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python
# -*- python -*-
from __future__ import unicode_literals
"""
NAME
%(program)s - Extract meta-information from an IETF draft.
SYNOPSIS
%(program)s [OPTIONS] DRAFTLIST_FILE
DESCRIPTION
Extract information about authors' names and email addresses,
intended status and number of pages from Internet Drafts.
The information is emitted in the form of a line containing
xml-style attributes, prefixed with the name of the draft.
%(options)s
AUTHOR
Written by Henrik Levkowetz, <henrik@levkowetz.com>
COPYRIGHT
Copyright 2008 Henrik Levkowetz
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or (at
your option) any later version. There is NO WARRANTY; not even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the GNU General Public License for more details.
"""
import datetime
import getopt
import os
import os.path
import re
import stat
import six
import sys
import time
version = "0.35"
program = os.path.basename(sys.argv[0])
progdir = os.path.dirname(sys.argv[0])
# ----------------------------------------------------------------------
# Data
# ----------------------------------------------------------------------
opt_debug = False
opt_timestamp = False
opt_trace = False
opt_authorinfo = False
opt_getauthors = False
opt_attributes = False
# Don't forget to add the option variable to the globals list in _main below
# The following is an alias list for short forms which starts with a
# different letter than the long form.
longform = {
"Beth": "Elizabeth",
"Bill": "William",
"Bob": "Robert",
"Dick": "Richard",
"Fred": "Alfred",
"Jerry": "Gerald",
"Liz": "Elizabeth",
"Lynn": "Carolyn",
"Ned": "Edward",
"Ted":"Edward",
}
longform = dict([ (short+" ", longform[short]+" ") for short in longform ])
month_names = [ 'january', 'february', 'march', 'april', 'may', 'june', 'july', 'august', 'september', 'october', 'november', 'december' ]
month_names_abbrev3 = [ n[:3] for n in month_names ]
month_names_abbrev4 = [ n[:4] for n in month_names ]
# ----------------------------------------------------------------------
# Functions
# ----------------------------------------------------------------------
def _debug(string):
if opt_debug:
sys.stderr.write("%s\n" % (string))
# ----------------------------------------------------------------------
def _note(string):
sys.stdout.write("%s: %s\n" % (program, string))
# ----------------------------------------------------------------------
def _warn(string):
sys.stderr.write("%s: Warning: %s\n" % (program, string))
# ----------------------------------------------------------------------
def _err(string):
sys.stderr.write("%s: Error: %s\n" % (program, string))
sys.exit(1)
# ----------------------------------------------------------------------
def _gettext(file):
file = open(file)
text = file.read()
file.close()
text = re.sub(".\x08", "", text) # Get rid of inkribbon backspace-emphasis
text = text.replace("\r\n", "\n") # Convert DOS to unix
text = text.replace("\r", "\n") # Convert MAC to unix
text = text.expandtabs()
text = text.strip()
return text
def acronym_match(s, l):
acronym = re.sub("[^A-Z]", "", l)
#_debug(" s:%s; l:%s => %s; %s" % (s, l, acronym, s==acronym))
return s == acronym
# ----------------------------------------------------------------------
class Draft():
def __init__(self, text, source, name_from_source=False):
assert isinstance(text, six.text_type)
self.source = source
self.rawtext = text
self.name_from_source = name_from_source
text = re.sub(".\x08", "", text) # Get rid of inkribbon backspace-emphasis
text = text.replace("\r\n", "\n") # Convert DOS to unix
text = text.replace("\r", "\n") # Convert MAC to unix
text = text.strip()
self.text = text
self.errors = {}
self.rawlines = self.text.split("\n")
self.lines, self.pages = self._stripheaders()
# Some things (such as the filename) has to be on the first page. If
# we didn't get back a set of pages, only one single page with the
# whole document, then we need to do an enforced page split in order
# to limit later searches to the first page.
if len(self.pages) <= 1:
self.pages = []
for pagestart in range(0, len(self.lines), 56):
self.pages += [ "\n".join(self.lines[pagestart:pagestart+56]) ]
self.filename, self.revision = self._parse_draftname()
self._authors = None
self._authors_with_firm = None
self._author_info = None
self._abstract = None
self._pagecount = None
self._status = None
self._creation_date = None
self._title = None
# ------------------------------------------------------------------
def _parse_draftname(self):
draftname_regex = r"(draft-[a-z0-9-]*)-(\d\d)(\w|\.txt|\n|$)"
draftname_match = re.search(draftname_regex, self.pages[0])
if not draftname_match and self.name_from_source:
draftname_match = re.search(draftname_regex, self.source)
rfcnum_regex = r"(Re[qg]uests? [Ff]or Commm?ents?:? +|Request for Comments: RFC |RFC-|RFC )((# ?)?[0-9]+)( |,|\n|$)"
rfcnum_match = re.search(rfcnum_regex, self.pages[0])
if not rfcnum_match and self.name_from_source:
rfcnum_match = re.search(rfcnum_regex, self.source)
if draftname_match:
return (draftname_match.group(1), draftname_match.group(2) )
elif rfcnum_match:
return ("rfc"+rfcnum_match.group(2), "")
else:
self.errors["draftname"] = "Could not find the draft name and revision on the first page."
filename = ""
revision = ""
try:
__, base = self.source.rsplit("/", 1)
except ValueError:
base = self.source
if base.startswith("draft-"):
if '.' in base:
name, __ = base.split(".", 1)
else:
name = base
revmatch = re.search("\d\d$", name)
if revmatch:
filename = name[:-3]
revision = name[-2:]
else:
filename = name
return filename, revision
# ----------------------------------------------------------------------
def _stripheaders(self):
stripped = []
pages = []
page = []
line = ""
newpage = False
sentence = False
shortprev = False
blankcount = 0
linecount = 0
# two functions with side effects
def striplines(p):
beg = end = 0
for i in range(len(p)):
l = p[i]
if l.strip() == "":
continue
else:
beg = i
break
for i in range(len(p)-1,0,-1):
l = p[i]
if l.strip() == "":
continue
else:
end = i
break
return p[beg:end]
def endpage(pages, page, newpage, line):
if line:
page += [ line ]
return begpage(pages, page, newpage)
def begpage(pages, page, newpage, line=None):
if page and len(striplines(page)) > 5:
pages += [ "\n".join(page) ]
page = []
newpage = True
if line:
page += [ line ]
return pages, page, newpage
for line in self.rawlines:
linecount += 1
line = line.rstrip()
if re.search("\[?page [0-9ivx]+\]?[ \t\f]*$", line, re.I):
pages, page, newpage = endpage(pages, page, newpage, line)
continue
if re.search("\f", line, re.I):
pages, page, newpage = begpage(pages, page, newpage)
continue
if re.search("^ *Internet.Draft.+ .+[12][0-9][0-9][0-9] *$", line, re.I):
pages, page, newpage = begpage(pages, page, newpage, line)
continue
# if re.search("^ *Internet.Draft +", line, re.I):
# newpage = True
# continue
if re.search("^ *Draft.+[12][0-9][0-9][0-9] *$", line, re.I):
pages, page, newpage = begpage(pages, page, newpage, line)
continue
if re.search("^RFC[ -]?[0-9]+.*( +)[12][0-9][0-9][0-9]$", line, re.I):
pages, page, newpage = begpage(pages, page, newpage, line)
continue
if re.search("^draft-[-a-z0-9_.]+.*[0-9][0-9][0-9][0-9]$", line, re.I):
pages, page, newpage = endpage(pages, page, newpage, line)
continue
if linecount > 15 and re.search(".{58,}(Jan|Feb|Mar|March|Apr|April|May|Jun|June|Jul|July|Aug|Sep|Oct|Nov|Dec) (19[89][0-9]|20[0-9][0-9]) *$", line, re.I):
pages, page, newpage = begpage(pages, page, newpage, line)
continue
if newpage and re.search("^ *draft-[-a-z0-9_.]+ *$", line, re.I):
pages, page, newpage = begpage(pages, page, newpage, line)
continue
if re.search("^[^ \t]+", line):
sentence = True
if re.search("[^ \t]", line):
if newpage:
# 36 is a somewhat arbitrary count for a 'short' line
shortthis = len(line.strip()) < 36 # 36 is a somewhat arbitrary count for a 'short' line
if sentence or (shortprev and not shortthis):
stripped += [""]
else:
if blankcount:
stripped += [""]*blankcount
blankcount = 0
sentence = False
newpage = False
shortprev = len(line.strip()) < 36 # 36 is a somewhat arbitrary count for a 'short' line
if re.search("[.:]$", line):
sentence = True
if re.search("^[ \t]*$", line):
blankcount += 1
page += [ line ]
continue
page += [ line ]
stripped += [ line ]
pages, page, newpage = begpage(pages, page, newpage)
_debug('pages: %s' % len(pages))
return stripped, pages
# ----------------------------------------------------------------------
def get_pagecount(self):
if self._pagecount == None:
label_pages = len(re.findall("\[page [0-9ixldv]+\]", self.text, re.I))
count_pages = len(self.pages)
if label_pages > count_pages/2:
self._pagecount = label_pages
else:
self._pagecount = count_pages
return self._pagecount
# ------------------------------------------------------------------
def get_wordcount(self):
count = 0
# match any sequence of non-white-space characters like the Unix command "wc"
word_re = re.compile(r'\S+', re.UNICODE)
for l in self.lines:
count += sum(1 for _ in word_re.finditer(l))
return count
# ------------------------------------------------------------------
def get_formal_languages(self):
language_regexps = [
("abnf", [re.compile(r"\bABNF"), re.compile(r" +[a-zA-Z][a-zA-Z0-9_-]* +=[/ ]")]),
("asn1", [re.compile(r'DEFINITIONS +::= +BEGIN')]),
("cbor", [re.compile(r'\b(?:CBOR|CDDL)\b'), re.compile(r" +[a-zA-Z][a-zA-Z0-9_-]* += +[\{\[\(]")]),
("ccode", [re.compile(r"(?:\+\+\))|(?:for \(i)|(?: [!=]= 0\) \{)|(?: struct [a-zA-Z_0-9]+ \{)")]),
("json", [re.compile(r'\bJSON\b'), re.compile(r" \"[^\"]+\" ?: [a-zA-Z0-9\.\"\{\[]")]),
("xml", [re.compile(r"<\?xml")]),
]
already_matched = set()
for l in self.lines:
for lang_name, patterns in language_regexps:
for p in patterns:
if p not in already_matched and p.search(l):
already_matched.add(p)
return [
lang_name
for lang_name, patterns in language_regexps
if all(p in already_matched for p in patterns)
]
# ----------------------------------------------------------------------
def get_status(self):
if self._status == None:
for line in self.lines[:10]:
status_match = re.search("^\s*Intended [Ss]tatus:\s*(.*?) ", line)
if status_match:
self._status = status_match.group(1)
break
return self._status
# ------------------------------------------------------------------
def get_creation_date(self):
if self._creation_date:
return self._creation_date
date_regexes = [
r'^(?P<month>\w+)\s(?P<day>\d{1,2})(,|\s)+(?P<year>\d{4})',
r'^(?P<day>\d{1,2})(,|\s)+(?P<month>\w+)\s(?P<year>\d{4})',
r'^(?P<day>\d{1,2})-(?P<month>\w+)-(?P<year>\d{4})',
r'^(?P<month>\w+)\s(?P<year>\d{4})',
r'\s{3,}(?P<month>\w+)\s(?P<day>\d{1,2})(,|\s)+(?P<year>\d{4})',
r'\s{3,}(?P<day>\d{1,2})(,|\s)+(?P<month>\w+)\s(?P<year>\d{4})',
r'\s{3,}(?P<day>\d{1,2})-(?P<month>\w+)-(?P<year>\d{4})',
# RFC 3339 date (also ISO date)
r'\s{3,}(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})',
# 'October 2008' - default day to today's.
r'\s{3,}(?P<month>\w+)\s(?P<year>\d{4})',
]
dates = []
text = self.pages[0]
for regex in date_regexes:
match = re.search(regex, text, re.MULTILINE)
if match:
start = match.start()
if not "expires" in text[start-10:start].lower():
dates += [(start, match)]
dates.sort()
for start, match in dates:
md = match.groupdict()
mon = md['month'].lower()
day = int( md.get( 'day', 0 ) )
year = int( md['year'] )
try:
if mon in month_names:
month = month_names.index( mon ) + 1
elif mon in month_names_abbrev3:
month = month_names_abbrev3.index( mon ) + 1
elif mon in month_names_abbrev4:
month = month_names_abbrev4.index( mon ) + 1
elif mon.isdigit() and int(mon) in range(1,13):
month = int(mon)
else:
continue
today = datetime.date.today()
if day==0:
# if the date was given with only month and year, use
# today's date if month and year is today's month and
# year, otherwise pick the middle of the month.
# Don't use today's day for month and year in the past
if month==today.month and year==today.year:
day = today.day
else:
day = 15
self._creation_date = datetime.date(year, month, day)
return self._creation_date
except ValueError:
# mon abbreviation not in _MONTH_NAMES
# or month or day out of range
pass
self.errors['creation_date'] = 'Creation Date field is empty or the creation date is not in a proper format.'
return self._creation_date
# ------------------------------------------------------------------
def get_abstract(self):
if self._abstract:
return self._abstract
abstract_re = re.compile('^(\s*)abstract', re.I)
header_re = re.compile("^(\s*)([0-9]+\.? |Appendix|Status of|Table of|Full Copyright|Copyright|Intellectual Property|Acknowled|Author|Index|Disclaimer).*", re.I)
begin = False
abstract = []
abstract_indent = 0
look_for_header = False
for line in self.lines:
if not begin:
if abstract_re.match(line):
begin=True
abstract_indent = len(abstract_re.match(line).group(0))
continue
if begin:
if not line and not abstract:
continue
if not line:
look_for_header=True
abstract.append(line)
continue
if look_for_header and header_re.match(line):
break
look_for_header = False
abstract.append(line)
abstract = '\n'.join(abstract)
abstract = self._clean_abstract(abstract)
self._abstract = self._check_abstract_indent(abstract, abstract_indent)
return self._abstract
def _check_abstract_indent(self, abstract, indent):
indentation_re = re.compile('^(\s)*')
indent_lines = []
for line in abstract.split('\n'):
if line:
indent = len(indentation_re.match(line).group(0))
indent_lines.append(indent)
percents = {}
total = float(len(indent_lines))
formated = False
for indent in set(indent_lines):
count = indent_lines.count(indent)/total
percents[indent] = count
if count > 0.9:
formated = True
if not formated:
return abstract
new_abstract = []
for line in abstract.split('\n'):
if line:
indent = len(indentation_re.match(line).group(0))
if percents[indent] < 0.9:
break
new_abstract.append(line)
return '\n'.join(new_abstract)
def _clean_abstract(self, text):
text = re.sub("(?s)(Conventions [Uu]sed in this [Dd]ocument|Requirements [Ll]anguage)?[\n ]*The key words \"MUST\", \"MUST NOT\",.*$", "", text)
# Get rid of status/copyright boilerplate
text = re.sub("(?s)\nStatus of [tT]his Memo\n.*$", "", text)
# wrap long lines without messing up formatting of Ok paragraphs:
while re.match("([^\n]{72,}?) +", text):
text = re.sub("([^\n]{72,}?) +([^\n ]*)(\n|$)", "\\1\n\\2 ", text)
return text
# ------------------------------------------------------------------
def get_authors(self):
"""Returns a list of strings with author name and email within angle brackets"""
if self._authors == None:
self.extract_authors()
return self._authors
def get_authors_with_firm(self):
"""Returns a list of strings with author name and email within angle brackets"""
if self._authors_with_firm == None:
self.extract_authors()
return self._authors_with_firm
def get_author_list(self):
"""Returns a list of tuples, with each tuple containing (given_names,
surname, email, company). Email will be None if unknown.
"""
if self._author_info == None:
self.extract_authors()
return self._author_info
def extract_authors(self):
"""Extract author information from draft text.
"""
aux = {
"honor" : r"(?:[A-Z]\.|Dr\.?|Dr\.-Ing\.|Prof(?:\.?|essor)|Sir|Lady|Dame|Sri)",
"prefix": r"([Dd]e|Hadi|van|van de|van der|Ver|von|[Ee]l)",
"suffix": r"(jr.?|Jr.?|II|2nd|III|3rd|IV|4th)",
"first" : r"([A-Z][-A-Za-z'`~]*)(( ?\([A-Z][-A-Za-z'`~]*\))?(\.?[- ]{1,2}[A-Za-z'`~]+)*)",
"last" : r"([-A-Za-z'`~]{2,})",
"months": r"(January|February|March|April|May|June|July|August|September|October|November|December)",
"mabbr" : r"(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\.?",
}
authcompanyformats = [
r" {6}(?P<author>(%(first)s[ \.]{1,3})+((%(prefix)s )?%(last)s)( %(suffix)s)?), (?P<company>[^.]+\.?)$" % aux,
r" {6}(?P<author>(%(first)s[ \.]{1,3})+((%(prefix)s )?%(last)s)( %(suffix)s)?) *\((?P<company>[^.]+\.?)\)$" % aux,
]
authformats = [
r" {6}((%(first)s[ \.]{1,3})+((%(prefix)s )?%(last)s)( %(suffix)s)?)(, ([^.]+\.?|\([^.]+\.?|\)))?,?$" % aux,
r" {6}(((%(prefix)s )?%(last)s)( %(suffix)s)?, %(first)s)?$" % aux,
r" {6}(%(last)s)$" % aux,
]
multiauthformats = [
(
r" {6}(%(first)s[ \.]{1,3}((%(prefix)s )?%(last)s)( %(suffix)s)?)(, ?%(first)s[ \.]{1,3}((%(prefix)s )?%(last)s)( %(suffix)s)?)+$" % aux,
r"(%(first)s[ \.]{1,3}((%(prefix)s )?%(last)s)( %(suffix)s)?)" % aux
),
]
editorformats = [
r"(?:, | )([Ee]d\.?|\([Ee]d\.?\)|[Ee]ditor)$",
]
companyformats = [
r" {6}(([A-Za-z'][-A-Za-z0-9.& ']+)(,? ?(Inc|Ltd|AB|S\.A)\.?))$",
r" {6}(([A-Za-z'][-A-Za-z0-9.& ']+)(/([A-Za-z'][-A-Za-z0-9.& ']+))+)$",
r" {6}([a-z0-9.-]+)$",
r" {6}(([A-Za-z'][-A-Za-z0-9.&']+)( [A-Za-z'][-A-Za-z0-9.&']+)*)$",
r" {6}(([A-Za-z'][-A-Za-z0-9.']+)( & [A-Za-z'][-A-Za-z0-9.']+)*)$",
r" {6}\((.+)\)$",
r" {6}(\w+\s?\(.+\))$",
]
dateformat = r"(((%(months)s|%(mabbr)s) \d+, |\d+ (%(months)s|%(mabbr)s),? |\d+/\d+/)\d\d\d\d|\d\d\d\d-\d\d-\d\d)$" % aux
address_section = r"^ *([0-9]+\.)? *(Author|Editor)('s|s'|s|\(s\)) (Address|Addresses|Information)"
ignore = [
"Standards Track", "Current Practice", "Internet Draft", "Working Group",
"Expiration Date",
]
def make_authpat(hon, first, last, suffix):
def dotexp(s):
s = re.sub(r"\. ", r"\w* ", s)
s = re.sub(r"\.$", r"\w*", s)
s = re.sub(r"\.(\w)", r"\w* \1", s)
return s
first = dotexp(first)
last = dotexp(last)
first = re.sub("[()]", " ", first)
if " " in first:
# if there's a middle part, let it be optional
first, middle = first.split(" ", 1)
first = "%s( +%s)?" % (first, middle)
# Double names (e.g., Jean-Michel) are abbreviated as two letter
# connected by a dash -- let this expand appropriately
first = re.sub(r"^([A-Z])-([A-Z])\\w\*", r"\1.*-\2.*", first)
# Some chinese names are shown with double-letter(latin) abbreviated given names, rather than
# a single-letter(latin) abbreviation:
first = re.sub(r"^([A-Z])[A-Z]+\\w\*", r"\1[-\w]+", first)
# permit insertion of middle names between first and last, and
# add possible honorific and suffix information
authpat = r"(?:^| and )(?:%(hon)s ?)?(['`]*%(first)s\S*( +[^ ]+)* +%(last)s)( *\(.*|,( [A-Z][-A-Za-z0-9]*)?| %(suffix)s| [A-Z][a-z]+)?" % {"hon":hon, "first":first, "last":last, "suffix":suffix,}
return authpat
authors = []
companies = []
companies_seen = []
self._docheader = ""
# Collect first-page author information first
have_blankline = False
have_draftline = False
prev_blankline = False
for line in self.lines[:30]:
self._docheader += line+"\n"
author_on_line = False
_debug( " ** " + line)
leading_space = len(re.findall("^ *", line)[0])
line_len = len(line.rstrip())
trailing_space = line_len <= 72 and 72 - line_len or 0
# Truncate long lines at the first space past column 80:
trunc_space = line.find(" ", 80)
if line_len > 80 and trunc_space > -1:
line = line[:trunc_space]
if line_len > 60:
# Look for centered title, break if found:
if (leading_space > 5 and abs(leading_space - trailing_space) < 5):
_debug("Breaking for centered line")
break
if re.search(dateformat, line):
if authors:
_debug("Breaking for dateformat after author name")
for editorformat in editorformats:
if re.search(editorformat, line):
line = re.sub(editorformat, "", line)
break
for lineformat, authformat in multiauthformats:
match = re.search(lineformat, line)
if match:
_debug("a. Multiauth format: '%s'" % lineformat)
author_list = re.findall(authformat, line)
authors += [ a[0] for a in author_list ]
companies += [ None for a in author_list ]
author_on_line = True
#_debug("\nLine: " + line)
#_debug("Format: " + authformat)
for author in author_list:
_debug("Author: '%s'" % author[0])
break
if not author_on_line:
for lineformat in authcompanyformats:
match = re.search(lineformat, line)
if match:
_debug("b. Line format: '%s'" % lineformat)
maybe_company = match.group("company").strip(" ,.")
# is the putative company name just a partial name, i.e., a part
# that commonly occurs after a comma as part of a company name,
# as in "Foo Bar, Inc."? If so, skip; else assume there's a
# company name after the comma.
if not maybe_company in ["Inc", "Ltd", "S.A", "AG", "AB", "N.V", ]:
author = match.group("author")
company = match.group("company")
authors += [ author, '']
companies += [ None, company ]
#_debug("\nLine: " + line)
#_debug("Format: " + authformat)
_debug("Author: '%s'" % author)
_debug("Company: '%s'" % company)
author_on_line = True
break
if not author_on_line:
for authformat in authformats:
match = re.search(authformat, line)
if match:
_debug("c. Auth format: '%s'" % authformat)
author = match.group(1)
authors += [ author ]
companies += [ None ]
#_debug("\nLine: " + line)
#_debug("Format: " + authformat)
_debug("Author: '%s'" % author)
author_on_line = True
break
if not author_on_line:
for authformat in companyformats:
match = re.search(authformat, line)
if match:
_debug("d. Company format: '%s'" % authformat)
company = match.group(1)
authors += [ "" ]
companies += [ company ]
#_debug("\nLine: " + line)
#_debug("Format: " + authformat)
_debug("Company: '%s'" % company)
break
if authors and not author_on_line:
# Retain information about blank lines in author list
authors += [""]
companies += [ "" ]
if line.strip() == "":
if prev_blankline and authors:
_debug("Breaking, having found consecutive blank lines after author name")
break
if authors:
have_blankline = True
prev_blankline = True
else:
prev_blankline = False
if "draft-" in line:
have_draftline = True
if have_blankline and have_draftline:
_debug("Breaking, having found both blank line and draft-name line")
break
# remove trailing blank entries in the author list:
for i in range(len(authors)-1,-1,-1):
if authors[i] == "" and companies[i] == "":
del authors[i]
del companies[i]
else:
break
_debug("A:companies : %s" % str(companies))
#companies = [ None if a else '' for a in authors ]
#_debug("B:companies : %s" % str(companies))
#find authors' addresses section if it exists
_debug("B:authors : %s" % str(authors))
last_line = len(self.lines)-1
address_section_pos = last_line/2
for i in range(last_line/2,last_line):
line = self.lines[i]
if re.search(address_section, line):
address_section_pos = i
break
found_pos = []
company_or_author = None
for i in range(len(authors)):
_debug("1: authors[%s]: %s" % (i, authors[i]))
_debug(" company[%s]: %s" % (i, companies[i]))
author = authors[i]
if i+1 < len(authors):
company_or_author = authors[i+1]
else:
company_or_author = None
if author in [ None, '', ]:
continue
suffix_match = re.search(" %(suffix)s$" % aux, author)
if suffix_match:
suffix = suffix_match.group(1)
author = author[:-len(suffix)].strip()
else:
suffix = None
if "," in author:
last, first = author.split(",",1)
author = "%s %s" % (first.strip(), last.strip())
if not " " in author:
if "." in author:
first, last = author.rsplit(".", 1)
first += "."
else:
author = "[A-Z].+ " + author
first, last = author.rsplit(" ", 1)
else:
if "." in author:
first, last = author.rsplit(".", 1)
first += "."
else:
first, last = author.rsplit(" ", 1)
if "." in first and not ". " in first:
first = first.replace(".", ". ").strip()
first = first.strip()
last = last.strip()
prefix_match = re.search(" %(prefix)s$" % aux, first)
if prefix_match:
prefix = prefix_match.group(1)
first = first[:-len(prefix)].strip()
last = prefix+" "+last
_debug("First, Last: '%s' '%s'" % (first, last))
for firstname, surname, casefixname in [ (first,last,last), (last,first,first), (first,last,last.upper()), (last,first,first.upper()), ]:
for left, right in [(firstname, casefixname), (casefixname, firstname)]:
author = "%s %s" % (left, right)
_debug("\nAuthors: "+str(authors))
_debug("Author: "+author)
# Pattern for full author information search, based on first page author name:
authpat = make_authpat(aux['honor'], left, right, aux['suffix'])
_debug("Authpat: " + authpat)
start = 0
col = None
# Find start of author info for this author (if any).
# Scan towards the front from the end of the file, looking for a match to authpath
for j in range(last_line, address_section_pos, -1):
line = self.lines[j]
_debug( "Line: " + line)
forms = [ line ] + [ line.replace(short, longform[short]) for short in longform if short in line ]
for form in forms:
try:
if re.search(authpat, form.strip()) and not j in found_pos:
_debug( "Match")
start = j
found_pos += [ start ]
_debug( " ==> start %s, normalized '%s'" % (start, form.strip()))
# The author info could be formatted in multiple columns...
columns = re.split("( +| and )", form)
# _debug( "Columns:" + str(columns))
# Find which column:
# _debug( "Col range:" + str(range(len(columns))))
cols = [ c for c in range(len(columns)) if re.search(authpat+r"( and |, |$)", columns[c].strip()) ]
if cols:
col = cols[0]
if not (start, col) in found_pos:
found_pos += [ (start, col) ]
_debug( "Col: %d" % col)
beg = len("".join(columns[:col]))
_debug( "Beg: %d '%s'" % (beg, "".join(columns[:col])))
_debug( "Len: %d" % len(columns))
if col == len(columns) or col == len(columns)-1:
end = None
_debug( "End1: %s" % end)
else:
end = beg + len("".join(columns[col:col+2]))
_debug( "End2: %d '%s'" % (end, "".join(columns[col:col+2])))
_debug( "Cut: '%s'" % form[beg:end])
author_match = re.search(authpat, columns[col].strip()).group(1)
_debug( "AuthMatch: '%s'" % (author_match,))
if re.search('\(.*\)$', author_match.strip()):
author_match = author_match.rsplit('(',1)[0].strip()
if author_match in companies_seen:
companies[i] = authors[i]
authors[i] = None
else:
fullname = author_match
#if casefixname in author_match:
# fullname = author_match.replace(casefixname, surname)
#else:
# fullname = author_match
fullname = re.sub(" +", " ", fullname)
if left == firstname:
given_names, surname = fullname.rsplit(None, 1)
else:
surname, given_names = fullname.split(None, 1)
if " " in given_names:
first, middle = given_names.split(None, 1)
else:
first = given_names
middle = None
names = (first, middle, surname, suffix)
if suffix:
fullname = fullname+" "+suffix
for names in [
(first, middle, surname, suffix),
(first, surname, middle, suffix),
(middle, first, surname, suffix),
(middle, surname, first, suffix),
(surname, first, middle, suffix),
(surname, middle, first, suffix),
]:
parts = [ n for n in names if n ]
if (" ".join(parts) == fullname):
authors[i] = (fullname, first, middle, surname, suffix)
companies[i] = None
break
else:
_warn("Author tuple doesn't match text in draft: %s, %s" % (authors[i], fullname))
authors[i] = None
break
except AssertionError:
sys.stderr.write("filename: "+self.filename+"\n")
sys.stderr.write("authpat: "+authpat+"\n")
raise
if start and col != None:
break
if start and col != None:
break
if start and col != None:
break
# End for:
if not authors[i]:
continue
_debug("2: authors[%s]: %s" % (i, authors[i]))
if start and col != None:
_debug("\n * %s" % (authors[i], ))
nonblank_count = 0
blanklines = 0
email = None
country = None
for line_offset, line in enumerate(self.lines[start+1:]):
_debug( " " + line.strip())
# Break on the second blank line
if not line:
blanklines += 1
if blanklines >= 3:
_debug( " - Break on blanklines")
break
else:
continue
else:
nonblank_count += 1
# Maybe break on author name
# _debug("Line: %s"%line.strip())
# for a in authors:
# if a and a not in companies_seen:
# _debug("Search for: %s"%(r"(^|\W)"+re.sub("\.? ", ".* ", a)+"(\W|$)"))
authmatch = [ a for a in authors[i+1:] if a and not a.lower() in companies_seen and (re.search((r"(?i)(^|\W)"+re.sub("[. ]+", ".*", a)+"(\W|$)"), line.strip()) or acronym_match(a, line.strip()) )]
if authmatch:
_debug(" ? Other author or company ? : %s" % authmatch)
_debug(" Line: "+line.strip())
_debug(" C or A: %s"%company_or_author)
if nonblank_count == 1 or (nonblank_count == 2 and not blanklines) or (company_or_author==line.strip() and not blanklines):
# First line after an author -- this is a company
companies_seen += [ c.lower() for c in authmatch ]
companies_seen += [ line.strip().lower() ] # XXX fix this for columnized author list
companies_seen = list(set(companies_seen))
_debug(" -- Companies: " + ", ".join(companies_seen))
for k in range(i+1, len(authors)):
if authors[k] and authors[k].lower() in companies_seen:
companies[k] = authors[k]
authors[k] = None
elif blanklines and not "@" in line:
# Break on an author name
_debug( " - Break on other author name")
break
else:
pass
def columnify(l):
try:
column = l.replace('\t', 8 * ' ')[max(0, beg - 1):end].strip()
except:
column = l
column = re.sub(" *(?:\(at\)| <at> | at ) *", "@", column)
column = re.sub(" *(?:\(dot\)| <dot> | dot ) *", ".", column)
column = re.sub("&cisco.com", "@cisco.com", column)
column = column.replace("\xa0", " ")
return column
column = columnify(line)
# if re.search("^\w+: \w+", column):
# keyword = True
# else:
# if keyword:
# # Break on transition from keyword line to something else
# _debug( " - Break on end of keywords")
# break
#_debug( " Column text :: " + column)
if nonblank_count >= 2 and blanklines == 0:
# Usually, the contact info lines will look
# like this: "Email: someone@example.com" or
# "Tel: +1 (412)-2390 23123", but sometimes
# the : is left out. That's okay for things we
# can't misinterpret, but "tel" may match "Tel
# Aviv 69710, Israel" so match
# - misc contact info
# - tel/fax [number]
# - [phone number]
# - [email]
other_contact_info_regex = re.compile(r'^(((contact )?e|\(e|e-|m|electronic )?mail|email_id|mailto|e-main|(tele)?phone|voice|mobile|work|uri|url|tel:)\b|^((ph|tel\.?|telefax|fax) *[:.]? *\(?( ?\+ ?)?[0-9]+)|^(\++[0-9]+|\(\+*[0-9]+\)|\(dsn\)|[0-9]+)([ -.]*\b|\b[ -.]*)(([0-9]{2,}|\([0-9]{2,}\)|(\([0-9]\)|[0-9])[ -][0-9]{2,}|\([0-9]\)[0-9]+)([ -.]+([0-9]+|\([0-9]+\)))+|([0-9]{7,}|\([0-9]{7,}\)))|^(<?[-a-z0-9._+]+|{([-a-z0-9._+]+, ?)+[-a-z0-9._+]+})@[-a-z0-9._]+>?|^https?://|^www\.')
next_line_index = start + 1 + line_offset + 1
if (not country
and not other_contact_info_regex.search(column.lower())
and next_line_index < len(self.lines)):
next_line_lower = columnify(self.lines[next_line_index]).lower().strip()
if not next_line_lower or other_contact_info_regex.search(next_line_lower):
# country should be here, as the last
# part of the address, right before an
# empty line or other contact info
country = column.strip() or None
_debug(" Country: %s" % country)
_debug("3: authors[%s]: %s" % (i, authors[i]))
emailmatch = re.search("[-A-Za-z0-9_.+]+@[-A-Za-z0-9_.]+", column)
if emailmatch and not "@" in author:
email = emailmatch.group(0).lower()
break
authors[i] = authors[i] + ( email, country)
else:
if not author in ignore:
companies[i] = authors[i]
_debug("Not an author? '%s'" % (author))
authors[i] = None
assert(len(authors) == len(companies))
_debug('Author list: %s' % authors)
_debug('Company list: %s' % companies)
for i in range(len(authors)):
if authors[i]:
_debug('authors[%s]: %s' % (i, authors[i]))
company = ''
for k in range(i+1, len(companies)):
_debug('companies[%s]: %s' % (k, companies[k]))
if companies[k] != None:
company = companies[k]
break
authors[i] = authors[i] + ( company, )
authors = [ a for a in authors if a ]
_debug(" * Final author tuples: %s" % (authors,))
_debug(" * Final company list: %s" % (companies,))
_debug(" * Final companies_seen: %s" % (companies_seen,))
self._author_info = authors
self._authors_with_firm = [ "%s <%s> (%s)"%(full,email,company) for full,first,middle,last,suffix,email,country,company in authors ] # pyflakes:ignore
self._authors = [ "%s <%s>"%(full,email) if email else full for full,first,middle,last,suffix,email,country,company in authors ]
self._authors.sort()
_debug(" * Final author list: " + ", ".join(self._authors))
_debug("-"*72)
# ------------------------------------------------------------------
def get_title(self):
if self._title:
return self._title
match = re.search('(?:\n\s*\n\s*)((.+\n){0,2}(.+\n*))(\s+<?draft-\S+\s*\n)\s*\n', self.pages[0])
if not match:
match = re.search('(?:\n\s*\n\s*)<?draft-\S+\s*\n*((.+\n){1,3})\s*\n', self.pages[0])
if not match:
match = re.search('(?:\n\s*\n\s*)((.+\n){0,2}(.+\n*))(\s*\n){2}', self.pages[0])
if not match:
match = re.search('(?i)(.+\n|.+\n.+\n)(\s*status of this memo\s*\n)', self.pages[0])
if match:
title = match.group(1)
title = title.strip()
title = re.sub(r'(?s)\n\s*\<?draft-.*$','', title)
title = re.sub(r'\s*\n\s*', ' ', title)
title = re.sub(r' +', ' ', title)
self._title = title
return self._title
self.errors["title"] = "Could not find the title on the first page."
# ------------------------------------------------------------------
def get_refs(self):
# Bill's horrible "references section" regexps, built up over lots of years
# of fine tuning for different formats.
# Examples:
# Appendix A. References:
# A.1. Informative References:
sectionre = re.compile( r'(?i)(?:Appendix\s+)?(?:(?:[A-Z]\.)?[0-9.]*\s+)?(?:(\S+)\s*)?references:?$' )
# 9.1 Normative
sectionre2 = re.compile( r'(?i)(?:(?:[A-Z]\.)?[0-9.]*\s+)?(\S+ormative)$' )
# One other reference section type seen:
sectionre3 = re.compile( r'(?i)References \((\S+ormative)\)$' )
# An Internet-Draft reference.
idref = re.compile( r'(?i)\b(draft-(?:[-\w]+(?=-\d\d)|[-\w]+))(-\d\d)?\b' )
# An RFC-and-other-series reference.
rfcref = re.compile( r'(?i)\b(rfc|std|bcp|fyi)[- ]?(\d+)\b' )
# False positives for std
not_our_std_ref = re.compile( r'(?i)((\b(n?csc|fed|mil|is-j)-std\b)|(\bieee\s*std\d*\b)|(\bstd\s+802\b))' )
# An Internet-Draft or series reference hyphenated by a well-meaning line break.
eol = re.compile( r'(?i)\b(draft[-\w]*-|rfc|std|bcp|fyi)$' )
# std at the front of a line can hide things like IEEE STD or MIL-STD
std_start = re.compile( r'(?i)std\n*\b' )
refs = {}
in_ref_sect = False
in_norm_ref_sect = False
refType = 'unk'
for i in range( 15, len( self.lines ) ):
line = self.lines[ i ].strip()
# skip over lines until we find the start of the reference section
if not in_ref_sect:
m = sectionre.match( line )
if not m:
m = sectionre2.match( line )
if not m:
m = sectionre3.match( line )
if m:
in_ref_sect = True
refType = 'info'
if line.lower().find("normative") > 1:
in_norm_ref_sect = True
refType = 'norm'
# might be subsections within a references section
if in_ref_sect and not in_norm_ref_sect:
m = sectionre.match( line )
if not m:
m = sectionre2.match( line )
if not m:
m = sectionre3.match( line )
if m:
in_ref_sect = True
if line.lower().find("normative") > 1:
in_norm_ref_sect = True
refType = 'norm'
# look for the end of the normative reference section
if in_norm_ref_sect:
m = sectionre.match( line )
if not m:
m = sectionre2.match( line )
if not m:
m = sectionre3.match( line )
if m and line.lower().find("normative") < 0:
in_norm_ref_sect = False
refType = 'info'
# find references within the section
if in_ref_sect:
# If something got split badly, rejoin it.
if eol.search( line ) and i < len( self.lines ) - 1:
line += self.lines[ i + 1 ].lstrip()
m = idref.search( line )
if m:
draft = m.group( 1 )
if draft not in refs:
refs[ draft ] = refType
m = rfcref.search( line )
if m:
( series, number ) = m.groups()
if series.lower()=='std' and std_start.search(line) and i > 15:
line = self.lines[i-1].rstrip()+line
if series.lower()!='std' or not not_our_std_ref.search( line ):
name = series.lower() + number.lstrip( '0' )
if name not in refs:
refs[ name ] = refType
# Don't add any references that point back into this doc
if self.filename in refs:
del refs[self.filename]
return refs
def old_get_refs( self ):
refs = []
normrefs = []
rfcrefs = []
draftrefs = []
refline = None
for i in range(len(self.lines)-1, 15, -1):
if re.search(r"(?i)^ *[0-9.]+ *(((normative|informative|informational|non-normative) )?references|references\W+(normative|informative))", self.lines[i]):
if not '. . .' in self.lines[i] and not '...' in self.lines[i]:
refline = i
if refline:
for i in range(refline, len(self.lines)):
line = self.lines[i].strip()
ref_match = re.search(r"(?i)^\[[a-z0-9.-]+( [a-z0-9.-]+)?\].+", line)
if ref_match:
para = line
while True:
i += 1
if i >= len(self.lines):
break
line = self.lines[i].strip()
if not line:
break
if para[-1] not in ["-", "/"]:
para += " "
para += line
refs += [ para ]
rfc_match = re.search("(?i)rfc ?\d+", para)
if rfc_match:
rfcrefs += [ rfc_match.group(0).replace(" ","").lower() ]
draft_match = re.search("draft-[a-z0-9-]+", para)
if draft_match:
draft = draft_match.group(0).lower()
if not draft in draftrefs:
draftrefs += [ draft ]
normrefs = list(set(normrefs))
normrefs.sort()
rfcrefs = list(set(rfcrefs))
rfcrefs.sort()
refs = list(set(refs))
refs.sort()
return normrefs, rfcrefs, draftrefs, refs
# ----------------------------------------------------------------------
def getmeta(fn):
# Initial values
fields = {}
fields["eventsource"] = "draft"
if " " in fn or not fn.endswith(".txt"):
_warn("Skipping unexpected draft name: '%s'" % (fn))
return {}
if os.path.exists(fn):
filename = fn
fn = os.path.basename(fn)
else:
if fn.lower().startswith('rfc'):
filename = os.path.join("/www/tools.ietf.org/rfc", fn)
elif not "/" in fn:
filename = os.path.join("/www/tools.ietf.org/id", fn)
if not os.path.exists(filename):
fn = filename
while not "-00." in fn:
revmatch = re.search("-(\d\d)\.", fn)
if revmatch:
rev = revmatch.group(1)
prev = "%02d" % (int(rev)-1)
fn = fn.replace("-%s."%rev, "-%s."%prev)
if os.path.exists(fn):
_warn("Using rev %s instead: '%s'" % (prev, filename))
filename = fn
fn = os.path.basename(fn)
break
else:
break
else:
filename = fn
if not os.path.exists(filename):
_warn("Could not find file: '%s'" % (filename))
return
timestamp = time.strftime("%Y-%m-%dT%H:%M:%S+00:00", time.gmtime(os.stat(filename)[stat.ST_MTIME]))
with open(filename, 'rb') as file:
try:
draft = Draft(file.read().decode('utf8'), filename)
except UnicodeDecodeError:
draft = Draft(file.read().decode('latin1'), filename)
#_debug("\n".join(draft.lines))
fields["eventdate"] = timestamp
if draft.filename:
fields["doctag"] = draft.filename
fields["docrev"] = draft.revision
fields["doctitle"] = draft.get_title()
fields["docpages"] = str(draft.get_pagecount())
fields["docauthors"] = ", ".join(draft.get_authors())
fields["_authorlist"] = draft.get_author_list()
fields["docaffiliations"] = ", ".join(draft.get_authors_with_firm())
if opt_debug:
fields["docheader"] = draft._docheader
normrefs, rfcrefs, draftrefs, refs = draft.old_get_refs()
fields["docrfcrefs"] = ", ".join(rfcrefs)
fields["docdraftrefs"] = ", ".join(draftrefs)
fields["doccreationdate"] = str(draft.get_creation_date())
deststatus = draft.get_status()
if deststatus:
fields["docdeststatus"] = deststatus
abstract = draft.get_abstract()
if abstract:
fields["docabstract"] = abstract
return fields
# ----------------------------------------------------------------------
def _output(docname, fields, outfile=sys.stdout):
global company_domain
if opt_getauthors:
# Output an (incomplete!) getauthors-compatible format.
# Information about security and iana sections presence is
# missing.
for full,first,middle,last,suffix,email,country,company in fields["_authorlist"]:
if company in company_domain:
company = company_domain[company]
else:
if email and '@' in email:
company = email.split('@')[1]
if company.endswith(".com"):
company = company[:-4]
fields["name"] = full
fields["email"] = email
fields["company"] = company
fields["country"] = country or "UNKNOWN"
try:
year, month, day = fields["doccreationdate"].split("-")
except ValueError:
year, month, day = "UNKNOWN", "UNKNOWN", "UNKNOWN"
fields["day"] = day
fields["month"] = month_names[int(month)] if month != "UNKNOWN" else "UNKNOWN"
fields["year"] = year
print "%(doctag)s:%(name)s:%(company)s:%(email)s:%(country)s:%(docpages)s:%(month)s:%(year)s:%(day)s:" % fields
else:
if opt_attributes:
def outputkey(key, fields):
field = fields[key]
if "\n" in field:
field = "\n" + field.rstrip()
else:
field = field.strip()
outfile.write("%-24s: %s\n" % ( key, field.replace("\\", "\\\\" ).replace("'", "\\x27" )))
else:
def outputkey(key, fields):
outfile.write(" %s='%s'" % ( key.lower(), fields[key].strip().replace("\\", "\\\\" ).replace("'", "\\x27" ).replace("\n", "\\n")))
if opt_timestamp:
outfile.write("%s " % (fields["eventdate"]))
outfile.write("%s" % (os.path.basename(docname.strip())))
keys = fields.keys()
keys.sort()
for key in keys:
if fields[key] and not key in ["eventdate", ] and not key.startswith("_"):
outputkey(key, fields)
outfile.write("\n")
# ----------------------------------------------------------------------
def _printmeta(fn, outfile=sys.stdout):
if opt_trace:
t = time.time()
sys.stderr.write("%-58s" % fn[:-4])
fields = getmeta(fn)
if fields:
_output(fields.get("doctag", fn[:-7]), fields, outfile)
if opt_trace:
sys.stderr.write("%5.1f\n" % ((time.time() - t)))
# ----------------------------------------------------------------------
# Main
# ----------------------------------------------------------------------
company_domain = {}
def _main(outfile=sys.stdout):
global opt_debug, opt_timestamp, opt_trace, opt_authorinfo, opt_getauthors, files, company_domain, opt_attributes
# set default values, if any
# ----------------------------------------------------------------------
# Option processing
# ----------------------------------------------------------------------
options = ""
for line in re.findall("\n +(if|elif) +opt in \[(.+)\]:\s+#(.+)\n", open(sys.argv[0]).read()):
if not options:
options += "OPTIONS\n"
options += " %-16s %s\n" % (line[1].replace('"', ''), line[2])
options = options.strip()
# with ' < 1:' on the next line, this is a no-op:
if len(sys.argv) < 1:
vars = globals()
vars.update(locals())
print __doc__ % vars
sys.exit(1)
try:
opts, files = getopt.gnu_getopt(sys.argv[1:], "dhatTv", ["debug", "getauthors", "attribs", "attributes", "help", "timestamp", "notimestamp", "trace", "version",])
except Exception, e:
print "%s: %s" % (program, e)
sys.exit(1)
# parse options
for opt, value in opts:
if opt in ["-d", "--debug"]: # Output debug information
opt_debug = True
elif opt in ["-h", "--help"]: # Output this help text, then exit
vars = globals()
vars.update(locals())
print __doc__ % vars
sys.exit(1)
elif opt in ["-v", "--version"]: # Output version information, then exit
print program, version
sys.exit(0)
elif opt in ["--getauthors"]: # Output an (incomplete) getauthors-compatible format
opt_getauthors = True
elif opt in ["-a", "--attribs"]: # Output key-value attribute pairs
opt_attributes = True
elif opt in ["-t", ]: # Toggle leading timestamp information
opt_timestamp = not opt_timestamp
elif opt in ["--timestamp"]: # Emit leading timestamp information
opt_timestamp = True
elif opt in ["--notimestamp"]: # Omit leading timestamp information
opt_timestamp = False
elif opt in ["-T", "--trace"]: # Emit trace information while working
opt_trace = True
company_domain = {}
if opt_getauthors:
gadata = open("/www/tools.ietf.org/tools/getauthors/getauthors.data")
for line in gadata:
if line.startswith("company:"):
try:
kword, name, abbrev = line.strip().split(':')
company_domain[name] = abbrev
except ValueError:
pass
if not files:
files = [ "-" ]
for file in files:
_debug( "Reading drafts from '%s'" % file)
if file == "-":
file = sys.stdin
elif file.endswith(".gz"):
import gzip
file = gzip.open(file)
else:
file = open(file)
basename = os.path.basename(file.name)
if basename.startswith("draft-"):
draft = basename
_debug( "** Processing '%s'" % draft)
_printmeta(file.name, outfile)
else:
for line in file:
draft = line.strip()
if draft.startswith("#"):
continue
if draft:
_debug( "** Processing '%s'" % draft)
_printmeta(draft, outfile)
if __name__ == "__main__":
try:
_main()
except KeyboardInterrupt:
raise
except Exception, e:
if opt_debug:
raise
else:
_err(e)
| 44.821225 | 508 | 0.440449 |
f35119ef802c7adcfa01a2cf0cd5eb74e4028f4a | 16,896 | py | Python | landlab/components/pet/potential_evapotranspiration_field.py | AndresQuichimbo/landlab | 39fee962ec962a389ae4522a55a17f53a0d37a6e | [
"MIT"
] | null | null | null | landlab/components/pet/potential_evapotranspiration_field.py | AndresQuichimbo/landlab | 39fee962ec962a389ae4522a55a17f53a0d37a6e | [
"MIT"
] | null | null | null | landlab/components/pet/potential_evapotranspiration_field.py | AndresQuichimbo/landlab | 39fee962ec962a389ae4522a55a17f53a0d37a6e | [
"MIT"
] | null | null | null | import numpy as np
from landlab import Component
_VALID_METHODS = set(["Constant", "PriestleyTaylor", "MeasuredRadiationPT", "Cosine"])
def _assert_method_is_valid(method):
if method not in _VALID_METHODS:
raise ValueError("%s: Invalid method name" % method)
class PotentialEvapotranspiration(Component):
"""
Potential Evapotranspiration Component calculates spatially distributed
potential evapotranspiration based on input radiation factor (spatial
distribution of incoming radiation) using chosen method such as constant
or Priestley Taylor. Ref: Xiaochi et. al. 2013 for 'Cosine' method and
ASCE-EWRI Task Committee Report Jan 2005 for 'PriestleyTaylor' method.
Note: Calling 'PriestleyTaylor' method would generate/overwrite shortwave &
longwave radiation fields.
.. codeauthor:: Sai Nudurupati and Erkan Istanbulluoglu
Examples
--------
>>> from landlab import RasterModelGrid
>>> from landlab.components.pet import PotentialEvapotranspiration
>>> grid = RasterModelGrid((5, 4), xy_spacing=(0.2, 0.2))
>>> grid['cell']['radiation__ratio_to_flat_surface'] = np.array([
... 0.38488566, 0.38488566,
... 0.33309785, 0.33309785,
... 0.37381705, 0.37381705])
>>> PET = PotentialEvapotranspiration(grid)
>>> PET.name
'PotentialEvapotranspiration'
>>> PET.input_var_names
('radiation__ratio_to_flat_surface',)
>>> sorted(PET.output_var_names)
['radiation__incoming_shortwave_flux',
'radiation__net_flux',
'radiation__net_longwave_flux',
'radiation__net_shortwave_flux',
'surface__potential_evapotranspiration_rate']
>>> sorted(PET.units) # doctest: +NORMALIZE_WHITESPACE
[('radiation__incoming_shortwave_flux', 'W/m^2'),
('radiation__net_flux', 'W/m^2'),
('radiation__net_longwave_flux', 'W/m^2'),
('radiation__net_shortwave_flux', 'W/m^2'),
('radiation__ratio_to_flat_surface', 'None'),
('surface__potential_evapotranspiration_rate', 'mm')]
>>> PET.grid.number_of_cell_rows
3
>>> PET.grid.number_of_cell_columns
2
>>> PET.grid is grid
True
>>> pet_rate = grid.at_cell['surface__potential_evapotranspiration_rate']
>>> np.allclose(pet_rate, 0.)
True
>>> PET.current_time = 0.5
>>> PET.update()
>>> np.allclose(pet_rate, 0.)
False
References
----------
**Required Software Citation(s) Specific to this Component**
None Listed
**Additional References**
ASCE-EWRI: The ASCE standardized reference evapotranspiration equation, in:
Standardization of Reference Evapotranspiration Task Committee Final Report,
edited by: Allen, R. G., Walter, I. A., Elliot, R. L., Howell, T. A.,
Itenfisu, D., Jensen, M. E., and Snyder, R. L., Technical Committee report
to the Environmental and Water Resources Institute of the American Society
of Civil Engineers from the Task Committee on Standardization of Reference
Evapotranspiration, Reston, VA, USA, 2005.
Zhou, X., Istanbulluoglu, E., and Vivoni, E. R.: Modeling the
ecohydrological role of aspect-controlled radiation on tree-grass-shrub
coexistence in a semiarid climate, Water Resour. Res., 49, 2872– 2895,
doi:10.1002/wrcr.20259, 2013.
"""
_name = "PotentialEvapotranspiration"
_info = {
"radiation__incoming_shortwave_flux": {
"dtype": float,
"intent": "out",
"optional": False,
"units": "W/m^2",
"mapping": "cell",
"doc": "total incident shortwave radiation over the time step",
},
"radiation__net_flux": {
"dtype": float,
"intent": "out",
"optional": False,
"units": "W/m^2",
"mapping": "cell",
"doc": "net total radiation over the time step",
},
"radiation__net_longwave_flux": {
"dtype": float,
"intent": "out",
"optional": False,
"units": "W/m^2",
"mapping": "cell",
"doc": "net incident longwave radiation over the time step",
},
"radiation__net_shortwave_flux": {
"dtype": float,
"intent": "out",
"optional": False,
"units": "W/m^2",
"mapping": "cell",
"doc": "net incident shortwave radiation over the time step",
},
"radiation__ratio_to_flat_surface": {
"dtype": float,
"intent": "in",
"optional": False,
"units": "None",
"mapping": "cell",
"doc": "ratio of total incident shortwave radiation on sloped surface to flat surface",
},
"surface__potential_evapotranspiration_rate": {
"dtype": float,
"intent": "out",
"optional": False,
"units": "mm",
"mapping": "cell",
"doc": "potential sum of evaporation and potential transpiration",
},
}
def __init__(
self,
grid,
method="Cosine",
priestley_taylor_const=1.26,
albedo=0.6,
latent_heat_of_vaporization=28.34,
psychometric_const=0.066,
stefan_boltzmann_const=0.0000000567,
solar_const=1366.67,
latitude=34.0,
elevation_of_measurement=300,
adjustment_coeff=0.18,
lt=0.0,
nd=365.0,
MeanTmaxF=12.0,
delta_d=5.0,
current_time=None,
const_potential_evapotranspiration=12.0,
Tmin=0.0,
Tmax=1.0,
Tavg=0.5,
obs_radiation=350.0,
):
"""
Parameters
----------
grid: RasterModelGrid
A grid.
method: {'Constant', 'PriestleyTaylor', 'MeasuredRadiationPT', 'Cosine'}, optional
Priestley Taylor method will spit out radiation outputs too.
priestley_taylor_constant: float, optional
Alpha used in Priestley Taylor method.
albedo: float, optional
Albedo.
latent_heat_of_vaporization: float, optional
Latent heat of vaporization for water Pwhv (Wd/(m*mm^2)).
psychometric_const: float, optional
Psychometric constant (kPa (deg C)^-1).
stefan_boltzmann_const: float, optional
Stefan Boltzmann's constant (W/(m^2K^-4)).
solar_const: float, optional
Solar constant (W/m^2).
latitude: float, optional
Latitude (radians).
elevation_of_measurement: float, optional
Elevation at which measurement was taken (m).
adjustment_coeff: float, optional
adjustment coeff to predict Rs from air temperature (deg C)^-0.5.
lt: float, optional
lag between peak TmaxF and solar forcing (days).
nd: float, optional
Number of days in year (days).
MeanTmaxF: float, optional
Mean annual rate of TmaxF (mm/d).
delta_d: float, optional
Calibrated difference between max & min daily TmaxF (mm/d).
current_time: float, required only for 'Cosine' method
Current time (Years)
const_potential_evapotranspiration: float, optional for
'Constant' method
Constant PET value to be spatially distributed.
Tmin: float, required for 'Priestley Taylor' method
Minimum temperature of the day (deg C)
Tmax: float, required for 'Priestley Taylor' method
Maximum temperature of the day (deg C)
Tavg: float, required for 'Priestley Taylor' and 'MeasuredRadiationPT'
methods
Average temperature of the day (deg C)
obs_radiation float, required for 'MeasuredRadiationPT' method
Observed radiation (W/m^2)
"""
super(PotentialEvapotranspiration, self).__init__(grid)
self.current_time = current_time
self.const_potential_evapotranspiration = const_potential_evapotranspiration
self.Tmin = Tmin
self.Tmax = Tmax
self.Tavg = Tavg
self.obs_radiation = obs_radiation
self._method = method
# For Priestley Taylor
self._alpha = priestley_taylor_const
self._a = albedo
self._pwhv = latent_heat_of_vaporization
self._y = psychometric_const
self._sigma = stefan_boltzmann_const
self._Gsc = solar_const
self._phi = (np.pi / 180.0) * latitude
self._z = elevation_of_measurement
self._Krs = adjustment_coeff
self._LT = lt
self._ND = nd
self._TmaxF_mean = MeanTmaxF
self._DeltaD = delta_d
_assert_method_is_valid(self._method)
self.initialize_output_fields()
self._cell_values = self._grid["cell"]
@property
def const_potential_evapotranspiration(self):
"""Constant PET value to be spatially distributed.
Used by 'Constant' method.
"""
return self._const_potential_evapotranspiration
@const_potential_evapotranspiration.setter
def const_potential_evapotranspiration(self, const_potential_evapotranspiration):
self._const_potential_evapotranspiration = const_potential_evapotranspiration
@property
def obs_radiation(self):
"""Observed radiation (W/m^2)
obs_radiation float, required for 'MeasuredRadiationPT' method.
"""
return self._obs_radiation
@obs_radiation.setter
def obs_radiation(self, obs_radiation):
self._obs_radiation = obs_radiation
@property
def Tmin(self):
"""Minimum temperature of the day (deg C)
Tmin: float, required for 'Priestley Taylor' method.
"""
return self._Tmin
@Tmin.setter
def Tmin(self, Tmin):
self._Tmin = Tmin
@property
def Tmax(self):
"""Maximum temperature of the day (deg C)
Tmax: float, required for 'Priestley Taylor' method.
"""
return self._Tmax
@Tmax.setter
def Tmax(self, Tmax):
self._Tmax = Tmax
@property
def Tavg(self):
"""Average temperature of the day (deg C)
Tavg: float, required for 'Priestley Taylor' and 'MeasuredRadiationPT'
methods.
"""
return self._Tavg
@Tavg.setter
def Tavg(self, Tavg):
self._Tavg = Tavg
def update(self):
"""Update fields with current conditions.
If the 'Constant' method is used, this method looks to the value of
the ``const_potential_evapotranspiration`` property.
If the 'PriestleyTaylor' method is used, this method looks to the
values of the ``Tmin``, ``Tmax``, and ``Tavg`` properties.
If the 'MeasuredRadiationPT' method is use this method looks to the
values of the ``Tavg`` and ``obs_radiation`` property.
"""
if self._method == "Constant":
self._PET_value = self._const_potential_evapotranspiration
elif self._method == "PriestleyTaylor":
self._PET_value = self._PriestleyTaylor(
self._current_time, self._Tmax, self._Tmin, self._Tavg
)
self._cell_values["radiation__incoming_shortwave_flux"] = (
self._Rs * self._cell_values["radiation__ratio_to_flat_surface"]
)
self._cell_values["radiation__net_shortwave_flux"] = (
self._Rns * self._cell_values["radiation__ratio_to_flat_surface"]
)
self._cell_values["radiation__net_longwave_flux"] = (
self._Rnl * self._cell_values["radiation__ratio_to_flat_surface"]
)
self._cell_values["radiation__net_flux"] = (
self._Rn * self._cell_values["radiation__ratio_to_flat_surface"]
)
elif self._method == "MeasuredRadiationPT":
Robs = self._obs_radiation
self._PET_value = self._MeasuredRadPT(self._Tavg, (1 - self._a) * Robs)
elif self._method == "Cosine":
self._J = np.floor(
(self._current_time - np.floor(self._current_time)) * 365.0
)
self._PET_value = max(
(
self._TmaxF_mean
+ self._DeltaD
/ 2.0
* np.cos(
(2 * np.pi) * (self._J - self._LT - self._ND / 2) / self._ND
)
),
0.0,
)
self._PET = (
self._PET_value * self._cell_values["radiation__ratio_to_flat_surface"]
)
self._cell_values["surface__potential_evapotranspiration_rate"][:] = self._PET
def _PriestleyTaylor(self, current_time, Tmax, Tmin, Tavg):
# Julian Day - ASCE-EWRI Task Committee Report, Jan-2005 - Eqn 25, (52)
self._J = np.floor((current_time - np.floor(current_time)) * 365)
# Saturation Vapor Pressure - ASCE-EWRI Task Committee Report,
# Jan-2005 - Eqn 6, (37)
self._es = 0.6108 * np.exp((17.27 * Tavg) / (237.7 + Tavg))
# Actual Vapor Pressure - ASCE-EWRI Task Committee Report,
# Jan-2005 - Eqn 8, (38)
self._ea = 0.6108 * np.exp((17.27 * Tmin) / (237.7 + Tmin))
# Slope of Saturation Vapor Pressure - ASCE-EWRI Task Committee Report,
# Jan-2005 - Eqn 5, (36)
self._delta = (4098.0 * self._es) / ((237.3 + Tavg) ** 2.0)
# Solar Declination Angle - ASCE-EWRI Task Committee Report,
# Jan-2005 - Eqn 24,(51)
self._sdecl = 0.409 * np.sin(((np.pi / 180.0) * self._J) - 1.39)
# Inverse Relative Distance Factor - ASCE-EWRI Task Committee Report,
# Jan-2005 - Eqn 23,(50)
self._dr = 1 + (0.033 * np.cos(np.pi / 180.0 * self._J))
# To calculate ws - ASCE-EWRI Task Committee Report,
# Jan-2005 - Eqn 29,(61)
self._x = 1.0 - (((np.tan(self._phi)) ** 2.0) * (np.tan(self._sdecl) ** 2.0))
if self._x <= 0:
self._x = 0.00001
# Sunset Hour Angle - ASCE-EWRI Task Committee Report,
# Jan-2005 - Eqn 28,(60)
self._ws = (np.pi / 2.0) - np.arctan(
(-1 * np.tan(self._phi) * np.tan(self._sdecl)) / (self._x ** 2.0)
)
# Extraterrestrial radmodel.docx - ASCE-EWRI Task Committee Report,
# Jan-2005 - Eqn 21, (48)
# 11.57 converts 1 MJ/m^2/day to W/m^2
self._Ra = (
11.57
* (24.0 / np.pi)
* 4.92
* self._dr
* (
(self._ws * np.sin(self._phi) * np.sin(self._sdecl))
+ (np.cos(self._phi) * np.cos(self._sdecl) * (np.sin(self._ws)))
)
)
# Clear-sky Solar Radiation - ASCE-EWRI Task Committee Report,
# Jan-2005 - Eqn 19, (47)
self._Rso = (0.75 + ((2.0 * (10 ** (-5.0))) * self._z)) * self._Ra
self._Rs = min(self._Krs * self._Ra * np.sqrt(Tmax - Tmin), self._Rso)
# Net Short Wave Radiation - ASCE-EWRI Task Committee Report,
# Jan-2005 - Eqn 16, (43)
self._Rns = self._Rs * (1 - self._a)
# Relative Cloudiness - ASCE-EWRI Task Committee Report,
# Jan-2005 - Page 20,35
if self._Rso > 0:
self._u = self._Rs / self._Rso
else:
self._u = 0
if self._u < 0.3:
self._u = 0.3
elif self._u > 1:
self._u = 1.0
# Cloudiness Function - ASCE-EWRI Task Committee Report,
# Jan-2005 - Eqn 18, (45)
self._fcd = (1.35 * self._u) - 0.35
# Net Long Wave Radiation - ASCE-EWRI Task Committee Report,
# Jan-2005 - Eqn 17, (44)
self._Rnl = (
self._sigma
* self._fcd
* (
0.34
- (0.14 * np.sqrt(self._ea))
* (((Tmax + 273.16) ** 4.0 + (Tmin + 273.16) ** 4.0) / 2.0)
)
)
# Net Radiation - ASCE-EWRI Task Committee Report,
# Jan-2005 - Eqn 15, (42)
self._Rn = self._Rns - self._Rnl
self._ETp = max(
self._alpha
* (self._delta / (self._delta + self._y))
* (self._Rn / self._pwhv),
0,
)
return self._ETp
def _MeasuredRadPT(self, Tavg, Rnobs):
# Saturation Vapor Pressure - ASCE-EWRI Task Committee Report,
# Jan-2005 - Eqn 6, (37)
self._es = 0.6108 * np.exp((17.27 * Tavg) / (237.7 + Tavg))
# Slope of Saturation Vapor Pressure - ASCE-EWRI Task Committee Report,
# Jan-2005 - Eqn 5, (36)
self._delta = (4098.0 * self._es) / ((237.3 + Tavg) ** 2.0)
self._ETp = max(
self._alpha
* (self._delta / (self._delta + self._y))
* (Rnobs / self._pwhv),
0,
)
return self._ETp
| 35.495798 | 99 | 0.583629 |
05dad70634ec11ff88759aa73b73361879c5e7c2 | 437 | py | Python | foppl/runtime.py | Tobias-Kohn/PyFOPPL-2 | 88122db0e689725543512080aab8dff76a6f7e9c | [
"MIT"
] | 4 | 2018-01-22T17:20:48.000Z | 2021-11-06T17:27:46.000Z | pyfo/foppl/runtime.py | bradleygramhansen/pyfo | 559678080f27e7d9f3f194a0c28e9e8bfe71a7f3 | [
"MIT"
] | 8 | 2018-01-22T10:12:12.000Z | 2018-01-30T15:47:37.000Z | pyfo/foppl/runtime.py | bradleygramhansen/pyfo | 559678080f27e7d9f3f194a0c28e9e8bfe71a7f3 | [
"MIT"
] | 4 | 2018-01-25T14:20:08.000Z | 2021-11-06T17:28:03.000Z | #
# This file is part of PyFOPPL, an implementation of a First Order Probabilistic Programming Language in Python.
#
# License: MIT (see LICENSE.txt)
#
# 08. Jan 2018, Tobias Kohn
# 23. Jan 2018, Tobias Kohn
#
__all__ = ['conj']
def conj(seq, *items):
return seq + list(items)
def index(idx):
if type(idx) is int:
return idx
if hasattr(idx, 'data'):
return int(idx.data[0])
else:
return int(idx) | 21.85 | 112 | 0.640732 |
823c404de82a7d9f2c591c8005f854da4b81c8b4 | 548 | py | Python | backend/home/migrations/0001_load_initial_data.py | crowdbotics-apps/laundriemepleaz-33179 | 889c173629d902227cb4596151a7c8182e7b0dcc | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/home/migrations/0001_load_initial_data.py | crowdbotics-apps/laundriemepleaz-33179 | 889c173629d902227cb4596151a7c8182e7b0dcc | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/home/migrations/0001_load_initial_data.py | crowdbotics-apps/laundriemepleaz-33179 | 889c173629d902227cb4596151a7c8182e7b0dcc | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "laundriemepleaz-33179.botics.co"
site_params = {
"name": "LaundrieMePleaz",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
| 21.076923 | 61 | 0.664234 |
5508ad4ed233345c9e7ce2507da9dbe418fc38fd | 1,020 | py | Python | .github/workflows/deployment-scripts/ls_latest_csv.py | COVIDAnalytics/website | 482f29bfd3064af2c0b0c839624eff0800e04623 | [
"MIT"
] | 12 | 2020-04-07T03:30:13.000Z | 2020-09-06T05:45:32.000Z | .github/workflows/deployment-scripts/ls_latest_csv.py | COVIDAnalytics/website | 482f29bfd3064af2c0b0c839624eff0800e04623 | [
"MIT"
] | 58 | 2020-04-06T21:25:34.000Z | 2020-11-19T18:50:06.000Z | .github/workflows/deployment-scripts/ls_latest_csv.py | COVIDAnalytics/website | 482f29bfd3064af2c0b0c839624eff0800e04623 | [
"MIT"
] | 11 | 2020-04-14T11:38:21.000Z | 2021-09-06T13:00:18.000Z | # Usage: lsLatestCSV prefix dir
# Finds the latest CSV in current directory with prefix prefix using
# DELPHI team's dating convention
import os
import sys
import datetime
from datetime import timedelta
prefix = sys.argv[1]
new_dir = sys.argv[2]
os.chdir(new_dir)
print("[*] Looking for latest CSV with prefix: " + prefix)
targets = []
for fname in os.listdir():
if fname.startswith(prefix):
targets.append(fname)
print("[*] Candidates: " + str(targets))
date = datetime.datetime.now()
delta = timedelta(days=1)
target = None
while target is None and date.year >= 2020:
match = date.strftime("%Y%m%d")
candidate = prefix + match + ".csv"
if candidate in targets:
target = prefix + match + ".csv"
date = date - delta
if target is None:
print("[*] Could not find latest CSV with prefix " + prefix)
sys.exit(1)
print("[*] Found latest CSV: " + target + "...")
# This last print statement can get fed into bash through tail pipe
print(new_dir + "/" + target)
| 23.72093 | 71 | 0.668627 |
336037e4cf7fe868e910b1600ec3cd3dcd9517a3 | 3,618 | py | Python | applications/easysvm/scripts/datagen.py | ShankarNara/shogun | 8ab196de16b8d8917e5c84770924c8d0f5a3d17c | [
"BSD-3-Clause"
] | 2,753 | 2015-01-02T11:34:13.000Z | 2022-03-25T07:04:27.000Z | applications/easysvm/scripts/datagen.py | ShankarNara/shogun | 8ab196de16b8d8917e5c84770924c8d0f5a3d17c | [
"BSD-3-Clause"
] | 2,404 | 2015-01-02T19:31:41.000Z | 2022-03-09T10:58:22.000Z | applications/easysvm/scripts/datagen.py | ShankarNara/shogun | 8ab196de16b8d8917e5c84770924c8d0f5a3d17c | [
"BSD-3-Clause"
] | 1,156 | 2015-01-03T01:57:21.000Z | 2022-03-26T01:06:28.000Z | #!/usr/bin/env python
# This software is distributed under BSD 3-clause license (see LICENSE file).
#
# Authors: Soeren Sonnenburg
import sys
import random
from numpy import array
import esvm.parse
import esvm.plots
from esvm.datafuncs import MotifDataDef, fastawrite_sequence, arffwrite_sequence, arffwrite_real
from esvm.mldata import init_datasetfile
if __name__ == '__main__':
if len(sys.argv)<3 or (sys.argv[1]=='motif' and sys.argv[2]!='arff' and sys.argv[2]!='fasta') \
or (sys.argv[1]=='motif' and sys.argv[2]=='fasta' and len(sys.argv)<9) \
or (sys.argv[1]=='motif' and sys.argv[2]=='arff' and len(sys.argv)<14) \
or (sys.argv[1]=='cloud' and len(sys.argv)<7) or (sys.argv[1]!='motif') \
and (sys.argv[1]!='cloud'):
sys.stderr.write( "usage: %s motif fasta MOTIF numSeq seqLenRange"+\
"positionRange mutationRate output.fa\n"+\
"or: %s motif arff MOTIFPOS numSeq-pos seqLenRange-pos "+\
"positionRange-pos mutationRate-pos \\\n"+\
"motif-neg numSeq-neg seqLenRange-neg positionRange-neg "+\
"mutationRange-neg output.arff\n"+\
"or: %s cloud numpoints dimensions fractionOfPositives "+\
"cloudWidth output.arff\n" % (sys.argv[0],sys.argv[0],sys.argv[0]) )
sys.exit(-1)
random.seed()
if sys.argv[1] == 'motif':
if sys.argv[2]=='fasta':
# generate sequences in FASTA format
p = MotifDataDef()
p.motif = sys.argv[3]
p.numseq = int(sys.argv[4])
(p.seqlenmin,p.seqlenmax) = esvm.parse.parse_range(sys.argv[5])
(p.posstart,p.posend) = esvm.parse.parse_range(sys.argv[6])
p.mutrate = float(sys.argv[7])
filename = sys.argv[8]
fastawrite_sequence(filename, p)
else:
# generate sequences in ARFF format
assert(sys.argv[2]=='arff')
p = MotifDataDef()
p.motif = sys.argv[3]
p.numseq = int(sys.argv[4])
(p.seqlenmin,p.seqlenmax) = esvm.parse.parse_range(sys.argv[5])
(p.posstart,p.posend) = esvm.parse.parse_range(sys.argv[6])
p.mutrate = float(sys.argv[7])
n = MotifDataDef()
n.motif = sys.argv[8]
n.numseq = int(sys.argv[9])
(n.seqlenmin,n.seqlenmax) = esvm.parse.parse_range(sys.argv[10])
(n.posstart,n.posend) = esvm.parse.parse_range(sys.argv[11])
n.mutrate = float(sys.argv[12])
filename = sys.argv[13]
arffwrite_sequence(filename, p, n)
elif sys.argv[1] == 'cloud':
# generate a data cloud in ARFF format
numpoint = int(sys.argv[2])
numfeat = int(sys.argv[3])
fracpos = float(sys.argv[4])
width = float(sys.argv[5])
filename = sys.argv[6]
arffwrite_real(filename, numpoint, numfeat, fracpos, width)
if len(sys.argv)>=8:
fp = init_datasetfile(filename,'vec')
(examples,labels) = fp.readlines()
pointcloud = []
for ix in xrange(numpoint):
pointcloud.append(array([labels[ix],examples[0,ix],examples[1,ix]]))
esvm.plots.plotcloud(pointcloud,sys.argv[7],'Pointcloud')
#(examples,labels,metadata)=arffwrite_real(filename, numpoint, numfeat, fracpos, width)
#if len(sys.argv)>=8:
# plots.plotcloud(pointcloud,sys.argv[7],metadata)
else:
print 'Unknown option %s\n' % sys.argv[1]
| 40.651685 | 99 | 0.57435 |
a8e1af00d096d8c79749d8fb18305c76ac2beb4e | 602 | py | Python | setup.py | arangaraju/graph-stix | 635d94f81e1651ccba0cea89b8be0fbaf80779dc | [
"MIT"
] | null | null | null | setup.py | arangaraju/graph-stix | 635d94f81e1651ccba0cea89b8be0fbaf80779dc | [
"MIT"
] | null | null | null | setup.py | arangaraju/graph-stix | 635d94f81e1651ccba0cea89b8be0fbaf80779dc | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Setup file for graph_stix.
This file was generated with PyScaffold 2.5.6, a tool that easily
puts up a scaffold for your new Python project. Learn more under:
http://pyscaffold.readthedocs.org/
"""
import sys
from setuptools import setup
def setup_package():
needs_sphinx = {'build_sphinx', 'upload_docs'}.intersection(sys.argv)
sphinx = ['sphinx'] if needs_sphinx else []
setup(setup_requires=['six', 'pyscaffold>=2.5a0,<2.6a0'] + sphinx,
use_pyscaffold=True)
if __name__ == "__main__":
setup_package()
| 25.083333 | 73 | 0.677741 |
1b4ea7a04714652f4a0a5f4366ae0ed9537205b8 | 1,806 | py | Python | src/cogent3/parse/gcg.py | StephenRogers1/cogent3 | 1116a0ab14d9c29a560297205546714e2db1896c | [
"BSD-3-Clause"
] | null | null | null | src/cogent3/parse/gcg.py | StephenRogers1/cogent3 | 1116a0ab14d9c29a560297205546714e2db1896c | [
"BSD-3-Clause"
] | null | null | null | src/cogent3/parse/gcg.py | StephenRogers1/cogent3 | 1116a0ab14d9c29a560297205546714e2db1896c | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
__author__ = "Matthew Wakefield"
__copyright__ = "Copyright 2007-2020, The Cogent Project"
__credits__ = ["Matthew Wakefield", "Peter Maxwell", "Gavin Huttley"]
__license__ = "BSD-3"
__version__ = "2020.12.21a"
__maintainer__ = "Matthew Wakefield"
__email__ = "wakefield@wehi.edu.au"
__status__ = "Production"
import warnings
def MsfParser(f):
"""Read sequences from a msf format file"""
alignmentdict = {}
# parse optional header
# parse optional text information
# file header and sequence header are seperated by a line ending in '..'
line = f.readline().strip()
for line in f:
line = line.strip()
if line.endswith(".."):
break
# parse sequence info
seqinfo = {}
for line in f:
line = line.strip()
if line.startswith("//"):
break
line = line.split()
if line and line[0] == "Name:":
seqinfo[line[1]] = int(line[3])
# parse sequences
sequences = {}
for line in f:
line = line.strip().split()
if line and line[0] in sequences:
sequences[line[0]] += "".join(line[1:])
elif line and line[0] in seqinfo:
sequences[line[0]] = "".join(line[1:])
# consistency check
if len(sequences) != len(seqinfo):
warnings.warn(
"Number of loaded seqs[%s] not same as "
"expected[%s]." % (len(sequences), len(seqinfo))
)
for name in sequences:
if len(sequences[name]) != seqinfo[name]:
warnings.warn(
"Length of loaded seqs [%s] is [%s] not "
"[%s] as expected." % (name, len(sequences[name]), seqinfo[name])
)
# yield sequences
for name in sequences:
yield (name, sequences[name])
| 30.610169 | 81 | 0.575305 |
6730afe28e59cf0320435f1d3b58a458c5083e2c | 17,962 | py | Python | train.py | guxd/DialoGPT | 4f042c9682f11e3143c585e75071a9038d00f273 | [
"MIT"
] | null | null | null | train.py | guxd/DialoGPT | 4f042c9682f11e3143c585e75071a9038d00f273 | [
"MIT"
] | null | null | null | train.py | guxd/DialoGPT | 4f042c9682f11e3143c585e75071a9038d00f273 | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
'''
* @Desc: train GPT2 from scratch/ fine tuning. Modified based on Huggingface GPT-2 implementation
'''
import json
import os
import sys
import argparse
import logging
import time
import tqdm
import datetime
import torch
from collections import defaultdict
import numpy as np
from os.path import join
from transformers import GPT2LMHeadModel, GPT2Tokenizer, GPT2Config, AdamW
from transformers import get_linear_schedule_with_warmup
from data_loader import END_OF_TEXT_TOKEN
from data_loader import BucketingDataLoader, DynamicBatchingLoader, DistributedBucketingDataLoader
from data_loader import (InputFeatures, InputFeatures_train, RedditExample)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)
logger = logging.getLogger(__name__)
INF = 100000000
EVAL_STEP = 100000
########################################################################################################
###### Train Utils ###################
SEQ_LENGTH_SHRINK_PROP = 0.9
def boolean_string(s):
if s.lower() not in {'false', 'true'}: raise ValueError('Not a valid boolean string')
return s.lower() == 'true'
def get_eval_list_same_length(input_file, tokenizer, max_batch_size, norm=True):
examples = []
with open(input_file, 'r', encoding="utf-8") as f:
content = [l.split('\t') for l in f.read().splitlines()]
context, response = [c[0] for c in content], [c[1:] for c in content]
i = 0
for src, tgt_all in zip(context, response):
for tgt in tgt_all:
if norm:
src_line = ' '.join(src.strip().split())
tgt_line = ' '.join(tgt.strip().split())
else:
src_line = src.strip()
tgt_line = tgt.strip()
examples.append(RedditExample(i, src_line, tgt_line))
i += 1
def featurize(example):
conv_id = example.conv_id
context_id = tokenizer.encode(example.context)
end_of_text_id = tokenizer.encoder[END_OF_TEXT_TOKEN]
response_id = tokenizer.encode(example.response)
input_ids = context_id + [end_of_text_id]
lm_labels = response_id
position_ids = list(range(len(input_ids)))
token_type_id = [0] * len(input_ids)
return InputFeatures(conv_id, input_ids, position_ids, token_type_id, lm_labels, len(context_id), len(response_id))
def batch_feature_same_len(features):
input_ids = torch.stack([torch.tensor(f.choices_features['input_ids'], dtype=torch.long) for f in features])
position_ids = torch.stack([torch.tensor(f.choices_features['position_ids'], dtype=torch.long) for f in features])
token_type_ids = torch.stack([torch.tensor(f.choices_features['token_type_ids'], dtype=torch.long) for f in features])
labels = torch.nn.utils.rnn.pad_sequence([torch.tensor(f.lm_labels, dtype=torch.long) for f in features],
batch_first=True, padding_value=-1)
context_len = torch.tensor([f.context_len for f in features], dtype=torch.long)
response_len = torch.tensor([f.response_len for f in features], dtype=torch.long)
return (input_ids, position_ids, token_type_ids, labels, context_len, response_len)
features = [featurize(e) for e in examples]
dataloader_pre = defaultdict(list)
for f in features:
dataloader_pre[f.context_len].append(f)
dataloader = []
for l in sorted(dataloader_pre):
f = batch_feature_same_len(dataloader_pre[l])
if len(f[0]) <= max_batch_size:
dataloader.append(f)
else:
start_index = 0
while True:
dataloader.append([ff[start_index:start_index + max_batch_size] for ff in f])
start_index += max_batch_size
if start_index >= len(f[0]): break
return dataloader
#### Eval Utils ######
#from pycocoevalcap.bleu.bleu import Bleu
EOS_ID = 50256
def cal_BLEU_4(generated, reference, is_corpus=False):
BLEUscore = [0.0, 0.0, 0.0, 0.0]
for idx, g in enumerate(generated):
if is_corpus:
score, scores = Bleu(4).compute_score(reference, {0: [g]})
else:
score, scores = Bleu(4).compute_score({0: [reference[0][idx]]}, {0: [g]})
for i, s in zip([0, 1, 2, 3], score): BLEUscore[i] += s
BLEUscore[0] = BLEUscore[0]/len(generated)
BLEUscore[1] = BLEUscore[1]/len(generated)
BLEUscore[2] = BLEUscore[2]/len(generated)
BLEUscore[3] = BLEUscore[3]/len(generated)
return BLEUscore
def cal_entropy(generated):
etp_score = [0.0, 0.0, 0.0, 0.0]
div_score = [0.0, 0.0, 0.0, 0.0]
counter = [defaultdict(int), defaultdict(int), defaultdict(int), defaultdict(int)]
for gg in generated:
g = gg.rstrip().split()
for n in range(4):
for idx in range(len(g)-n):
ngram = ' '.join(g[idx:idx+n+1])
counter[n][ngram] += 1
for n in range(4):
total = sum(counter[n].values()) + 1e-10
for v in counter[n].values():
etp_score[n] += - (v+0.0) / total * (np.log(v+0.0) - np.log(total))
div_score[n] = (len(counter[n].values())+0.0) / total
return etp_score, div_score
#######################################################################################################################
def train(args, train_dataloader, model, tokenizer, train_logger, eval_logger):
t_total = args.num_optim_steps
no_decay = ['bias', 'ln'] # no decay for bias and LayerNorm (ln)
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': 0.01},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
global_step = 0
step = 0
epoch = 0
if args.continue_from:
global_step = args.continue_from
step = global_step*2 - 1
if args.local_rank != -1: n_gpu = 1
if args.local_rank == -1 or torch.distributed.get_rank() == 0:
pbar = tqdm.tqdm(total=args.num_optim_steps, desc=f"training") if args.pbar else None
while True:
model.train()
(tr_loss, nb_tr_examples, nb_tr_steps) = 0.0, 0, 0
n_token_real, n_token_total = 0, 0
train_start_time_epoch = time.time()
for batch in train_dataloader:
batch = tuple(t.to(args.device) for t in batch)
input_ids, position_ids, token_ids, label_ids, *_ = batch
if args.no_token_id: token_ids = None
loss, *_ = model(input_ids, None, None, token_ids, position_ids, None, None, label_ids)
if args.n_gpu > 1:
loss = loss.mean()
loss = loss / (args.train_batch_size / input_ids.shape[0])
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += float(loss.item()) * (args.train_batch_size / input_ids.shape[0])
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
mean_loss = tr_loss / nb_tr_steps
n_token_total += input_ids.shape[0] * input_ids.shape[1]
n_token_real += (input_ids != 0).sum().item()
# gradient update
step += 1
if step % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
# Print log info to file
if args.local_rank != -1:
n_token_real_all_proc = sum(all_gather_list(n_token_real))
n_token_total_all_proc = sum(all_gather_list(n_token_total))
else:
n_token_real_all_proc = n_token_real
n_token_total_all_proc = n_token_total
if args.local_rank == -1 or torch.distributed.get_rank() == 0:
epoch_time = time.time() - train_start_time_epoch
if pbar is not None:
pbar.set_postfix_str(
f"tok/s: {n_token_real_all_proc//epoch_time//1000}k epoch: {epoch}")
pbar.update(1)
print(f'{epoch+1},{global_step+1},{step+1},{mean_loss},\
{n_token_real_all_proc},{n_token_total_all_proc},{epoch_time}',
file=train_logger)
if global_step % args.valid_step == 0:
if args.local_rank == -1 or torch.distributed.get_rank() == 0:
# only rank 0 process evaluate
torch.save(
{k: (v.cpu() if v is not None else None) # save to cpu tensors
for k, v in model.state_dict().items()},
join(output_dir, f'GP2-pretrain-step-{global_step}.pkl'))
eval_loss = evaluate(model, tokenizer, epoch, args)
# enable generation step evaluation for now
# gen_response = generation(model, tokenizer, epoch, args)
'''
# probably use beam search only for test set
if False:
gen_response_beam = generation(model, tokenizer, epoch, args, use_beam_search=True, beam_width=3)
'''
print('{},{},{},{},{}'.format(epoch+1, global_step+1, step+1, eval_loss), file=eval_logger)
logger.info('current learning rate: '+ str(optimizer.param_groups[0]['lr']))
model.train()
if global_step >= args.num_optim_steps: break
if global_step >= args.num_optim_steps: break
epoch += 1
if args.local_rank == -1 or torch.distributed.get_rank() == 0:
if pbar is not None: pbar.close()
train_logger.close()
eval_logger.close()
def evaluate(model, tokenizer, epoch_id, args):
# use the same signature with eval_model_generation
logger.info('compute eval model loss, using eval mode, please change it back to train after calling this function')
model.eval()
eval_dataloader = DynamicBatchingLoader(args.eval_input_file, tokenizer, args.normalize_data, args.eval_batch_size, args.max_seq_length)
tot_loss = []
tot_sample = []
with torch.no_grad():
for step, batch in enumerate(eval_dataloader):
batch = tuple(t.to(args.device) for t in batch)
input_ids, position_ids, token_ids, label_ids, src_len, _ = batch
if args.no_token_id: token_ids = None
n_sample = input_ids.shape[0]
loss = model(input_ids, position_ids, token_ids, label_ids)
tot_loss.append(loss.mean().item() * n_sample)
tot_sample.append(n_sample)
print(f"\n Epoch {epoch_id}: Val loss {np.sum(tot_loss) / np.sum(tot_sample)} ")
return np.sum(tot_loss) / np.sum(tot_sample)
def generation(model, tokenizer, epoch, args):
gen_dataloader = get_eval_list_same_length(args.eval_input_file, tokenizer, args.eval_batch_size, True)
return ''
##############################################################################################################
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model_name_or_path', type=str, default='gpt2', help='pretrained model name or path to local checkpoint')
parser.add_argument("--train_input_file", type=str, default='data/train.128len.db')
parser.add_argument("--eval_input_file", type=str, default='./data/dummy_data.tsv')
parser.add_argument("--output_dir", type=str, default='output')
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--max_seq_length", type=int, default=128)
parser.add_argument("--skip_eval", action='store_true', help='If true, skip evaluation.')
parser.add_argument("--continue_from", type=int, default=0)
parser.add_argument("--train_batch_size", type=int, default=4, help="batch size now means per GPU per step")
parser.add_argument("--gradient_accumulation_steps", type=int, default=2, help="to increase effective batch size and reduce synchronization")
parser.add_argument("--eval_batch_size", type=int, default=4)
parser.add_argument("--learning_rate", type=float, default=1e-5)
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument("--num_optim_steps", type=int, default=1000000, help="new API specifies num update steps")
parser.add_argument("--valid_step", type=int, default=10000, help="how many optim steps between validations")
parser.add_argument("--warmup_proportion", type=float, default=0.1)
parser.add_argument("--warmup_steps", type=int, default=16000)
parser.add_argument("--normalize_data", type=boolean_string, default=True)
parser.add_argument("--fp16", type=boolean_string, default=True)
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--lr_schedule", type=str, choices=['noam', 'noamwd', 'BERT', 'None'], default='noam')
parser.add_argument("--loss_scale", type=float, default=0)
parser.add_argument("--no_token_id", type=boolean_string, default=True)
parser.add_argument("--log_dir", type=str)
parser.add_argument('--pbar', type=boolean_string, default=True, help='turn on progress bar')
# distributed
parser.add_argument('--local_rank', type=int, default=-1, help='for torch.distributed')
args = parser.parse_args()
assert args.train_batch_size % args.gradient_accumulation_steps == 0, 'batch size % gradient accumulation steps != 0!'
args.train_batch_size = (args.train_batch_size// args.gradient_accumulation_steps)
logger.info(f'train batch size = {args.train_batch_size*args.gradient_accumulation_steps}, '
'new train batch size (after gradient accumulation) = {args.train_batch_size}')
if args.local_rank == -1:
logger.info(f'CUDA available? {str(torch.cuda.is_available())}')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
args.device, args.n_gpu = device, n_gpu
else:
# distributed training
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
n_gpu = torch.distributed.get_world_size()
args.device, args.n_gpu = device, 1
logger.info(f"device: {device} n_gpu: {n_gpu}, distributed training: {bool(args.local_rank != -1)},16-bits training: {args.fp16}")
np.random.seed(args.seed)
torch.random.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
if n_gpu > 0: torch.cuda.manual_seed_all(args.seed)
timestamp = datetime.datetime.now().strftime('%Y-%m-%d%H%M%S')
output_dir = join(args.output_dir, 'GPT2.{}.{}.{}gpu.{}'.format(args.learning_rate, args.train_batch_size, n_gpu, timestamp))
log_dir = args.log_dir if args.log_dir is not None and len(args.log_dir) > 0 else output_dir
if args.local_rank == -1 or torch.distributed.get_rank() == 0:
os.makedirs(output_dir, exist_ok=True)
train_logger = open(join(log_dir, 'train_log.txt'), 'a+', buffering=1)
eval_logger = open(join(log_dir, 'eval_log.txt'), 'a+', buffering=1)
print('epoch,global_step,step,mean_loss,n_token_real,n_token_total,epoch_time', file=train_logger)
print('epoch,global_step,step,eval_loss', file=eval_logger)
tokenizer = GPT2Tokenizer.from_pretrained(args.model_name_or_path)
config = GPT2Config.from_pretrained(args.model_name_or_path)
if args.local_rank == -1:
train_dataloader = BucketingDataLoader(args.train_input_file, args.train_batch_size, args.max_seq_length)
else:
train_dataloader = DistributedBucketingDataLoader(
torch.distributed.get_rank(), torch.distributed.get_world_size(),
args.train_input_file, args.train_batch_size, args.max_seq_length)
model = GPT2LMHeadModel.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config)
model = model.to(args.device)
global_step, tr_loss = train(args, train_dataloader, model, tokenizer, train_logger, eval_logger)
if __name__ == "__main__":
main() | 47.518519 | 145 | 0.628939 |
c808c39f8c2de035886e720e5c0740d467a9c928 | 55 | py | Python | Module6/hw/01_hw_sort.py | xm4dn355x/specialist_python3_2nd_lvl | 4ea8c82eb0f32aa92c82914f6599c2c47a2f7032 | [
"MIT"
] | null | null | null | Module6/hw/01_hw_sort.py | xm4dn355x/specialist_python3_2nd_lvl | 4ea8c82eb0f32aa92c82914f6599c2c47a2f7032 | [
"MIT"
] | null | null | null | Module6/hw/01_hw_sort.py | xm4dn355x/specialist_python3_2nd_lvl | 4ea8c82eb0f32aa92c82914f6599c2c47a2f7032 | [
"MIT"
] | null | null | null | # Доделайте все задачи Module-5/practice/02_tasks_sort/ | 55 | 55 | 0.836364 |
8ba91338da044a060e50a457949a76c85e4502e9 | 8,930 | py | Python | tests/test_c_source.py | natgavrilenko/asn1tools | 6d5b5abeacad22b03ff91fcc8301c29ae6c7f5f0 | [
"MIT"
] | null | null | null | tests/test_c_source.py | natgavrilenko/asn1tools | 6d5b5abeacad22b03ff91fcc8301c29ae6c7f5f0 | [
"MIT"
] | null | null | null | tests/test_c_source.py | natgavrilenko/asn1tools | 6d5b5abeacad22b03ff91fcc8301c29ae6c7f5f0 | [
"MIT"
] | null | null | null | import unittest
import asn1tools
CODECS_AND_MODULES = [
('oer', asn1tools.source.c.oer),
('uper', asn1tools.source.c.uper)
]
class Asn1ToolsCSourceTest(unittest.TestCase):
def test_compile_error_unsupported_type(self):
for codec, module in CODECS_AND_MODULES:
foo = asn1tools.compile_string(
'Foo DEFINITIONS AUTOMATIC TAGS ::= BEGIN '
' A ::= OBJECT IDENTIFIER '
'END',
codec)
with self.assertRaises(asn1tools.errors.Error) as cm:
module.generate(foo, 'foo')
self.assertEqual(str(cm.exception),
"Foo.A: Unsupported type 'OBJECT IDENTIFIER'.")
def test_compile_error_unsupported_type_in_sequence(self):
for codec, module in CODECS_AND_MODULES:
foo = asn1tools.compile_string(
'Foo DEFINITIONS AUTOMATIC TAGS ::= BEGIN '
' A ::= SEQUENCE { '
' a NumericString '
' } '
'END',
codec)
with self.assertRaises(asn1tools.errors.Error) as cm:
module.generate(foo, 'foo')
self.assertEqual(str(cm.exception),
"Foo.A.a: Unsupported type 'NumericString'.")
def test_compile_error_integer_no_minimum_nor_maximum(self):
for codec, module in CODECS_AND_MODULES:
foo = asn1tools.compile_string(
'Foo DEFINITIONS AUTOMATIC TAGS ::= BEGIN '
' A ::= INTEGER '
'END',
codec)
with self.assertRaises(asn1tools.errors.Error) as cm:
module.generate(foo, 'foo')
self.assertEqual(str(cm.exception),
"Foo.A: INTEGER has no minimum value.")
def test_compile_error_integer_no_minimum(self):
for codec, module in CODECS_AND_MODULES:
foo = asn1tools.compile_string(
'Foo DEFINITIONS AUTOMATIC TAGS ::= BEGIN '
' A ::= INTEGER (MIN..10) '
'END',
codec)
with self.assertRaises(asn1tools.errors.Error) as cm:
module.generate(foo, 'foo')
self.assertEqual(str(cm.exception),
"Foo.A: INTEGER has no minimum value.")
def test_compile_error_integer_no_maximum(self):
for codec, module in CODECS_AND_MODULES:
foo = asn1tools.compile_string(
'Foo DEFINITIONS AUTOMATIC TAGS ::= BEGIN '
' A ::= INTEGER (1..MAX) '
'END',
codec)
with self.assertRaises(asn1tools.errors.Error) as cm:
module.generate(foo, 'foo')
self.assertEqual(str(cm.exception),
"Foo.A: INTEGER has no maximum value.")
def test_compile_error_unsigned_integer_over_64_bits(self):
for codec, module in CODECS_AND_MODULES:
foo = asn1tools.compile_string(
'Foo DEFINITIONS AUTOMATIC TAGS ::= BEGIN '
' A ::= INTEGER (0..18446744073709551616) '
'END',
codec)
with self.assertRaises(asn1tools.errors.Error) as cm:
module.generate(foo, 'foo')
self.assertEqual(str(cm.exception),
"Foo.A: 18446744073709551616 does not fit in uint64_t.")
def test_compile_error_unsigned_integer_over_64_signed_bits(self):
for codec, module in CODECS_AND_MODULES:
foo = asn1tools.compile_string(
'Foo DEFINITIONS AUTOMATIC TAGS ::= BEGIN '
' A ::= INTEGER (-1..9223372036854775808) '
'END',
codec)
with self.assertRaises(asn1tools.errors.Error) as cm:
module.generate(foo, 'foo')
self.assertEqual(str(cm.exception),
"Foo.A: 9223372036854775808 does not fit in int64_t.")
def test_compile_error_signed_integer_over_64_bits(self):
for codec, module in CODECS_AND_MODULES:
foo = asn1tools.compile_string(
'Foo DEFINITIONS AUTOMATIC TAGS ::= BEGIN '
' A ::= INTEGER (-9223372036854775809..0) '
'END',
codec)
with self.assertRaises(asn1tools.errors.Error) as cm:
module.generate(foo, 'foo')
self.assertEqual(str(cm.exception),
"Foo.A: -9223372036854775809 does not fit in int64_t.")
def test_compile_error_octet_string_no_size(self):
for codec, module in CODECS_AND_MODULES:
foo = asn1tools.compile_string(
'Foo DEFINITIONS AUTOMATIC TAGS ::= BEGIN '
' A ::= OCTET STRING '
'END',
codec)
with self.assertRaises(asn1tools.errors.Error) as cm:
module.generate(foo, 'foo')
self.assertEqual(str(cm.exception),
"Foo.A: OCTET STRING has no maximum length.")
def test_compile_error_octet_string_no_maximum(self):
for codec, module in CODECS_AND_MODULES:
foo = asn1tools.compile_string(
'Foo DEFINITIONS AUTOMATIC TAGS ::= BEGIN '
' A ::= OCTET STRING (SIZE(1..MAX)) '
'END',
codec)
with self.assertRaises(asn1tools.errors.Error) as cm:
module.generate(foo, 'foo')
self.assertEqual(str(cm.exception),
"Foo.A: OCTET STRING has no maximum length.")
def test_compile_error_sequence_of_no_size(self):
for codec, module in CODECS_AND_MODULES:
foo = asn1tools.compile_string(
'Foo DEFINITIONS AUTOMATIC TAGS ::= BEGIN '
' A ::= SEQUENCE OF BOOLEAN '
'END',
codec)
with self.assertRaises(asn1tools.errors.Error) as cm:
module.generate(foo, 'foo')
self.assertEqual(str(cm.exception),
"Foo.A: SEQUENCE OF has no maximum length.")
def test_compile_error_sequence_of_no_maximum(self):
for codec, module in CODECS_AND_MODULES:
foo = asn1tools.compile_string(
'Foo DEFINITIONS AUTOMATIC TAGS ::= BEGIN '
' A ::= SEQUENCE (SIZE(1..MAX)) OF BOOLEAN '
'END',
codec)
with self.assertRaises(asn1tools.errors.Error) as cm:
module.generate(foo, 'foo')
self.assertEqual(str(cm.exception),
"Foo.A: SEQUENCE OF has no maximum length.")
def test_compile_error_oer_real_not_ieee754(self):
foo = asn1tools.compile_string(
'Foo DEFINITIONS AUTOMATIC TAGS ::= BEGIN '
' A ::= REAL '
'END',
'oer')
with self.assertRaises(asn1tools.errors.Error) as cm:
asn1tools.source.c.oer.generate(foo, 'foo')
self.assertEqual(str(cm.exception),
"Foo.A: REAL not IEEE 754 binary32 or binary64.")
def test_compile_error_members_backtrace(self):
for codec, module in CODECS_AND_MODULES:
foo = asn1tools.compile_string(
'Foo DEFINITIONS AUTOMATIC TAGS ::= BEGIN '
' A ::= SEQUENCE { '
' a CHOICE { '
' b INTEGER '
' } '
' } '
'END',
codec)
with self.assertRaises(asn1tools.errors.Error) as cm:
module.generate(foo, 'foo')
self.assertEqual(str(cm.exception),
"Foo.A.a.b: INTEGER has no minimum value.")
def test_compile_error_oer_enumerated_min(self):
foo = asn1tools.compile_string(
'Foo DEFINITIONS AUTOMATIC TAGS ::= BEGIN '
' A ::= ENUMERATED { a(-2147483649) } '
'END',
'oer')
with self.assertRaises(asn1tools.errors.Error) as cm:
asn1tools.source.c.oer.generate(foo, 'foo')
self.assertEqual(str(cm.exception),
"Foo.A: -2147483649 does not fit in int32_t.")
def test_compile_error_oer_enumerated_max(self):
foo = asn1tools.compile_string(
'Foo DEFINITIONS AUTOMATIC TAGS ::= BEGIN '
' A ::= ENUMERATED { a(2147483649) } '
'END',
'oer')
with self.assertRaises(asn1tools.errors.Error) as cm:
asn1tools.source.c.oer.generate(foo, 'foo')
self.assertEqual(str(cm.exception),
"Foo.A: 2147483649 does not fit in int32_t.")
if __name__ == '__main__':
unittest.main()
| 36.748971 | 85 | 0.538298 |
3b350d74a5ebe04308b93fdaf8ffe824b9c7b3e5 | 35,260 | py | Python | rasa/nlu/components.py | praneethgb/rasa | 5bf227f165d0b041a367d2c0bbf712ebb6a54792 | [
"Apache-2.0"
] | 8 | 2020-09-16T17:22:13.000Z | 2022-02-01T00:11:30.000Z | rasa/nlu/components.py | praneethgb/rasa | 5bf227f165d0b041a367d2c0bbf712ebb6a54792 | [
"Apache-2.0"
] | 216 | 2020-09-20T13:05:58.000Z | 2022-03-28T12:10:24.000Z | rasa/nlu/components.py | praneethgb/rasa | 5bf227f165d0b041a367d2c0bbf712ebb6a54792 | [
"Apache-2.0"
] | 1 | 2022-02-01T18:23:23.000Z | 2022-02-01T18:23:23.000Z | from collections import defaultdict
import itertools
import logging
import typing
from typing import Any, Dict, Hashable, List, Optional, Set, Text, Tuple, Type, Iterable
import rasa.utils.train_utils
from rasa.exceptions import MissingDependencyException
from rasa.nlu.constants import COMPONENT_INDEX
from rasa.shared.exceptions import RasaException
from rasa.shared.nlu.constants import TRAINABLE_EXTRACTORS
from rasa.shared.constants import DOCS_URL_COMPONENTS
from rasa.nlu.config import RasaNLUModelConfig
from rasa.shared.exceptions import InvalidConfigException
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.shared.nlu.training_data.message import Message
import rasa.shared.utils.io
import rasa.utils.common
if typing.TYPE_CHECKING:
from rasa.nlu.model import Metadata
logger = logging.getLogger(__name__)
def validate_requirements(component_names: List[Optional[Text]]) -> None:
"""Validates that all required importable python packages are installed.
Raises:
InvalidConfigException: If one of the component names is `None`, likely
indicates that a custom implementation is missing this property
or that there is an invalid configuration file that we did not
catch earlier.
Args:
component_names: The list of component names.
"""
from rasa.nlu import registry
# Validate that all required packages are installed
failed_imports = {}
for component_name in component_names:
if component_name is None:
raise InvalidConfigException(
"Your pipeline configuration contains a component that is missing "
"a name. Please double check your configuration or if this is a "
"custom component make sure to implement the name property for "
"the component."
)
component_class = registry.get_component_class(component_name)
unavailable_packages = rasa.utils.common.find_unavailable_packages(
component_class.required_packages()
)
if unavailable_packages:
failed_imports[component_name] = unavailable_packages
if failed_imports: # pragma: no cover
dependency_component_map = defaultdict(list)
for component, missing_dependencies in failed_imports.items():
for dependency in missing_dependencies:
dependency_component_map[dependency].append(component)
missing_lines = [
f"{d} (needed for {', '.join(cs)})"
for d, cs in dependency_component_map.items()
]
missing = "\n - ".join(missing_lines)
raise MissingDependencyException(
f"Not all required importable packages are installed to use "
f"the configured NLU pipeline. "
f"To use this pipeline, you need to install the "
f"missing modules: \n"
f" - {missing}\n"
f"Please install the packages that contain the missing modules."
)
def validate_component_keys(
component: "Component", component_config: Dict[Text, Any]
) -> None:
"""Validates that all keys for a component are valid.
Args:
component: The component class
component_config: The user-provided config for the component in the pipeline
"""
component_name = component_config.get("name")
allowed_keys = set(component.defaults.keys())
provided_keys = set(component_config.keys())
provided_keys.discard("name")
list_separator = "\n- "
for key in provided_keys:
if key not in allowed_keys:
rasa.shared.utils.io.raise_warning(
f"You have provided an invalid key `{key}` "
f"for component `{component_name}` in your pipeline. "
f"Valid options for `{component_name}` are:\n- "
f"{list_separator.join(allowed_keys)}"
)
def validate_empty_pipeline(pipeline: List["Component"]) -> None:
"""Ensures the pipeline is not empty.
Args:
pipeline: the list of the :class:`rasa.nlu.components.Component`.
"""
if len(pipeline) == 0:
raise InvalidConfigException(
"Can not train an empty pipeline. "
"Make sure to specify a proper pipeline in "
"the configuration using the 'pipeline' key."
)
def validate_only_one_tokenizer_is_used(pipeline: List["Component"]) -> None:
"""Validates that only one tokenizer is present in the pipeline.
Args:
pipeline: the list of the :class:`rasa.nlu.components.Component`.
"""
from rasa.nlu.tokenizers.tokenizer import Tokenizer
tokenizer_names = []
for component in pipeline:
if isinstance(component, Tokenizer):
tokenizer_names.append(component.name)
if len(tokenizer_names) > 1:
raise InvalidConfigException(
f"The pipeline configuration contains more than one tokenizer, "
f"which is not possible at this time. You can only use one tokenizer. "
f"The pipeline contains the following tokenizers: {tokenizer_names}. "
)
def _required_component_in_pipeline(
required_component: Type["Component"], pipeline: List["Component"]
) -> bool:
"""Checks that required component present in the pipeline.
Args:
required_component: A class name of the required component.
pipeline: The list of the :class:`rasa.nlu.components.Component`.
Returns:
`True` if required_component is in the pipeline, `False` otherwise.
"""
for previous_component in pipeline:
if isinstance(previous_component, required_component):
return True
return False
def validate_required_components(pipeline: List["Component"]) -> None:
"""Validates that all required components are present in the pipeline.
Args:
pipeline: The list of the :class:`rasa.nlu.components.Component`.
"""
for i, component in enumerate(pipeline):
missing_components = []
for required_component in component.required_components():
if not _required_component_in_pipeline(required_component, pipeline[:i]):
missing_components.append(required_component.name)
missing_components_str = ", ".join(f"'{c}'" for c in missing_components)
if missing_components:
raise InvalidConfigException(
f"The pipeline configuration contains errors. The component "
f"'{component.name}' requires {missing_components_str} to be "
f"placed before it in the pipeline. Please "
f"add the required components to the pipeline."
)
def validate_pipeline(pipeline: List["Component"]) -> None:
"""Validates the pipeline.
Args:
pipeline: The list of the :class:`rasa.nlu.components.Component`.
"""
validate_empty_pipeline(pipeline)
validate_only_one_tokenizer_is_used(pipeline)
validate_required_components(pipeline)
def any_components_in_pipeline(
components: Iterable[Text], pipeline: List["Component"]
) -> bool:
"""Check if any of the provided components are listed in the pipeline.
Args:
components: Component class names to check.
pipeline: A list of :class:`rasa.nlu.components.Component`s.
Returns:
`True` if any of the `components` are in the `pipeline`, else `False`.
"""
return len(find_components_in_pipeline(components, pipeline)) > 0
def find_components_in_pipeline(
components: Iterable[Text], pipeline: List["Component"]
) -> Set[Text]:
"""Finds those of the given components that are present in the pipeline.
Args:
components: A list of str of component class names to check.
pipeline: A list of :class:`rasa.nlu.components.Component`s.
Returns:
A list of str of component class names that are present in the pipeline.
"""
pipeline_component_names = {c.name for c in pipeline}
return pipeline_component_names.intersection(components)
def validate_required_components_from_data(
pipeline: List["Component"], data: TrainingData
) -> None:
"""Validates that all components are present in the pipeline based on data.
Args:
pipeline: The list of the :class:`rasa.nlu.components.Component`s.
data: The :class:`rasa.shared.nlu.training_data.training_data.TrainingData`.
"""
if data.response_examples and not any_components_in_pipeline(
["ResponseSelector"], pipeline
):
rasa.shared.utils.io.raise_warning(
"You have defined training data with examples for training a response "
"selector, but your NLU pipeline does not include a response selector "
"component. To train a model on your response selector data, add a "
"'ResponseSelector' to your pipeline."
)
if data.entity_examples and not any_components_in_pipeline(
TRAINABLE_EXTRACTORS, pipeline
):
rasa.shared.utils.io.raise_warning(
"You have defined training data consisting of entity examples, but "
"your NLU pipeline does not include an entity extractor trained on "
"your training data. To extract non-pretrained entities, add one of "
f"{TRAINABLE_EXTRACTORS} to your pipeline."
)
if data.entity_examples and not any_components_in_pipeline(
{"DIETClassifier", "CRFEntityExtractor"}, pipeline
):
if data.entity_roles_groups_used():
rasa.shared.utils.io.raise_warning(
"You have defined training data with entities that have roles/groups, "
"but your NLU pipeline does not include a 'DIETClassifier' or a "
"'CRFEntityExtractor'. To train entities that have roles/groups, "
"add either 'DIETClassifier' or 'CRFEntityExtractor' to your "
"pipeline."
)
if data.regex_features and not any_components_in_pipeline(
["RegexFeaturizer", "RegexEntityExtractor"], pipeline
):
rasa.shared.utils.io.raise_warning(
"You have defined training data with regexes, but "
"your NLU pipeline does not include a 'RegexFeaturizer' or a "
"'RegexEntityExtractor'. To use regexes, include either a "
"'RegexFeaturizer' or a 'RegexEntityExtractor' in your pipeline."
)
if data.lookup_tables and not any_components_in_pipeline(
["RegexFeaturizer", "RegexEntityExtractor"], pipeline
):
rasa.shared.utils.io.raise_warning(
"You have defined training data consisting of lookup tables, but "
"your NLU pipeline does not include a 'RegexFeaturizer' or a "
"'RegexEntityExtractor'. To use lookup tables, include either a "
"'RegexFeaturizer' or a 'RegexEntityExtractor' in your pipeline."
)
if data.lookup_tables:
if not any_components_in_pipeline(
["CRFEntityExtractor", "DIETClassifier"], pipeline
):
rasa.shared.utils.io.raise_warning(
"You have defined training data consisting of lookup tables, but "
"your NLU pipeline does not include any components that use these "
"features. To make use of lookup tables, add a 'DIETClassifier' or a "
"'CRFEntityExtractor' with the 'pattern' feature to your pipeline."
)
elif any_components_in_pipeline(["CRFEntityExtractor"], pipeline):
crf_components = [c for c in pipeline if c.name == "CRFEntityExtractor"]
# check to see if any of the possible CRFEntityExtractors will
# featurize `pattern`
has_pattern_feature = False
for crf in crf_components:
crf_features = crf.component_config.get("features")
# iterate through [[before],[word],[after]] features
has_pattern_feature = "pattern" in itertools.chain(*crf_features)
if not has_pattern_feature:
rasa.shared.utils.io.raise_warning(
"You have defined training data consisting of lookup tables, but "
"your NLU pipeline's 'CRFEntityExtractor' does not include the "
"'pattern' feature. To featurize lookup tables, add the 'pattern' "
"feature to the 'CRFEntityExtractor' in your pipeline."
)
if data.entity_synonyms and not any_components_in_pipeline(
["EntitySynonymMapper"], pipeline
):
rasa.shared.utils.io.raise_warning(
"You have defined synonyms in your training data, but "
"your NLU pipeline does not include an 'EntitySynonymMapper'. "
"To map synonyms, add an 'EntitySynonymMapper' to your pipeline."
)
def warn_of_competing_extractors(pipeline: List["Component"]) -> None:
"""Warns the user when using competing extractors.
Competing extractors are e.g. `CRFEntityExtractor` and `DIETClassifier`.
Both of these look for the same entities based on the same training data
leading to ambiguity in the results.
Args:
pipeline: The list of the :class:`rasa.nlu.components.Component`s.
"""
extractors_in_pipeline = find_components_in_pipeline(TRAINABLE_EXTRACTORS, pipeline)
if len(extractors_in_pipeline) > 1:
rasa.shared.utils.io.raise_warning(
f"You have defined multiple entity extractors that do the same job "
f"in your pipeline: "
f"{', '.join(extractors_in_pipeline)}. "
f"This can lead to the same entity getting "
f"extracted multiple times. Please read the documentation section "
f"on entity extractors to make sure you understand the implications: "
f"{DOCS_URL_COMPONENTS}#entity-extractors"
)
def warn_of_competition_with_regex_extractor(
pipeline: List["Component"], data: TrainingData
) -> None:
"""Warns when regex entity extractor is competing with a general one.
This might be the case when the following conditions are all met:
* You are using a general entity extractor and the `RegexEntityExtractor`
* AND you have regex patterns for entity type A
* AND you have annotated text examples for entity type A
Args:
pipeline: The list of the :class:`rasa.nlu.components.Component`s.
data: The :class:`rasa.shared.nlu.training_data.training_data.TrainingData`.
"""
present_general_extractors = find_components_in_pipeline(
TRAINABLE_EXTRACTORS, pipeline
)
has_general_extractors = len(present_general_extractors) > 0
has_regex_extractor = any_components_in_pipeline(["RegexEntityExtractor"], pipeline)
regex_entity_types = {rf["name"] for rf in data.regex_features}
overlap_between_types = data.entities.intersection(regex_entity_types)
has_overlap = len(overlap_between_types) > 0
if has_general_extractors and has_regex_extractor and has_overlap:
rasa.shared.utils.io.raise_warning(
f"You have an overlap between the RegexEntityExtractor and the "
f"statistical entity extractors {', '.join(present_general_extractors)} "
f"in your pipeline. Specifically both types of extractors will "
f"attempt to extract entities of the types "
f"{', '.join(overlap_between_types)}. This can lead to multiple "
f"extraction of entities. Please read RegexEntityExtractor's "
f"documentation section to make sure you understand the "
f"implications: {DOCS_URL_COMPONENTS}#regexentityextractor"
)
class MissingArgumentError(ValueError):
"""Raised when not all parameters can be filled from the context / config.
Attributes:
message -- explanation of which parameter is missing
"""
def __init__(self, message: Text) -> None:
super().__init__(message)
self.message = message
def __str__(self) -> Text:
return self.message
class UnsupportedLanguageError(RasaException):
"""Raised when a component is created but the language is not supported.
Attributes:
component -- component name
language -- language that component doesn't support
"""
def __init__(self, component: Text, language: Text) -> None:
self.component = component
self.language = language
super().__init__(component, language)
def __str__(self) -> Text:
return (
f"component '{self.component}' does not support language '{self.language}'."
)
class ComponentMetaclass(type):
"""Metaclass with `name` class property."""
@property
def name(cls) -> Text:
"""The name property is a function of the class - its __name__."""
return cls.__name__
class Component(metaclass=ComponentMetaclass):
"""A component is a message processing unit in a pipeline.
Components are collected sequentially in a pipeline. Each component
is called one after another. This holds for
initialization, training, persisting and loading the components.
If a component comes first in a pipeline, its
methods will be called first.
E.g. to process an incoming message, the ``process`` method of
each component will be called. During the processing
(as well as the training, persisting and initialization)
components can pass information to other components.
The information is passed to other components by providing
attributes to the so called pipeline context. The
pipeline context contains all the information of the previous
components a component can use to do its own
processing. For example, a featurizer component can provide
features that are used by another component down
the pipeline to do intent classification.
"""
@property
def name(self) -> Text:
"""Returns the name of the component to be used in the model configuration.
Component class name is used when integrating it in a
pipeline. E.g. `[ComponentA, ComponentB]`
will be a proper pipeline definition where `ComponentA`
is the name of the first component of the pipeline.
"""
# cast due to https://github.com/python/mypy/issues/7945
return typing.cast(str, type(self).name)
@property
def unique_name(self) -> Text:
"""Gets a unique name for the component in the pipeline.
The unique name can be used to distinguish components in
a pipeline, e.g. when the pipeline contains multiple
featurizers of the same type.
"""
index = self.component_config.get(COMPONENT_INDEX)
return self.name if index is None else f"component_{index}_{self.name}"
@classmethod
def required_components(cls) -> List[Type["Component"]]:
"""Specifies which components need to be present in the pipeline.
Which components are required by this component.
Listed components should appear before the component itself in the pipeline.
Returns:
The class names of the required components.
"""
return []
# Defines the default configuration parameters of a component
# these values can be overwritten in the pipeline configuration
# of the model. The component should choose sensible defaults
# and should be able to create reasonable results with the defaults.
defaults = {}
# Defines what language(s) this component can handle.
# This attribute is designed for instance method: `can_handle_language`.
# Default value is None. if both `support_language_list` and
# `not_supported_language_list` are None, it means it can handle
# all languages. Also, only one of `support_language_list` and
# `not_supported_language_list` can be set to not None.
# This is an important feature for backwards compatibility of components.
supported_language_list = None
# Defines what language(s) this component can NOT handle.
# This attribute is designed for instance method: `can_handle_language`.
# Default value is None. if both `support_language_list` and
# `not_supported_language_list` are None, it means it can handle
# all languages. Also, only one of `support_language_list` and
# `not_supported_language_list` can be set to not None.
# This is an important feature for backwards compatibility of components.
not_supported_language_list = None
def __init__(self, component_config: Optional[Dict[Text, Any]] = None) -> None:
if not component_config:
component_config = {}
# makes sure the name of the configuration is part of the config
# this is important for e.g. persistence
component_config["name"] = self.name
self.component_config: Dict[
Text, Any
] = rasa.utils.train_utils.override_defaults(self.defaults, component_config)
self.partial_processing_pipeline = None
self.partial_processing_context = None
@classmethod
def required_packages(cls) -> List[Text]:
"""Specifies which python packages need to be installed.
E.g. ``["spacy"]``. More specifically, these should be
importable python package names e.g. `sklearn` and not package
names in the dependencies sense e.g. `scikit-learn`
This list of requirements allows us to fail early during training
if a required package is not installed.
Returns:
The list of required package names.
"""
return []
@classmethod
def load(
cls,
meta: Dict[Text, Any],
model_dir: Text,
model_metadata: Optional["Metadata"] = None,
cached_component: Optional["Component"] = None,
**kwargs: Any,
) -> "Component":
"""Loads this component from file.
After a component has been trained, it will be persisted by
calling `persist`. When the pipeline gets loaded again,
this component needs to be able to restore itself.
Components can rely on any context attributes that are
created by :meth:`components.Component.create`
calls to components previous to this one.
Args:
meta: Any configuration parameter related to the model.
model_dir: The directory to load the component from.
model_metadata: The model's :class:`rasa.nlu.model.Metadata`.
cached_component: The cached component.
Returns:
the loaded component
"""
if cached_component:
return cached_component
return cls(meta)
@classmethod
def create(
cls, component_config: Dict[Text, Any], config: RasaNLUModelConfig
) -> "Component":
"""Creates this component (e.g. before a training is started).
Method can access all configuration parameters.
Args:
component_config: The components configuration parameters.
config: The model configuration parameters.
Returns:
The created component.
"""
# Check language supporting
language = config.language
if not cls.can_handle_language(language):
# check failed
raise UnsupportedLanguageError(cls.name, language)
return cls(component_config)
def provide_context(self) -> Optional[Dict[Text, Any]]:
"""Initializes this component for a new pipeline.
This function will be called before the training
is started and before the first message is processed using
the interpreter. The component gets the opportunity to
add information to the context that is passed through
the pipeline during training and message parsing. Most
components do not need to implement this method.
It's mostly used to initialize framework environments
like MITIE and spacy
(e.g. loading word vectors for the pipeline).
Returns:
The updated component configuration.
"""
pass
def train(
self,
training_data: TrainingData,
config: Optional[RasaNLUModelConfig] = None,
**kwargs: Any,
) -> None:
"""Trains this component.
This is the components chance to train itself provided
with the training data. The component can rely on
any context attribute to be present, that gets created
by a call to :meth:`rasa.nlu.components.Component.create`
of ANY component and
on any context attributes created by a call to
:meth:`rasa.nlu.components.Component.train`
of components previous to this one.
Args:
training_data: The
:class:`rasa.shared.nlu.training_data.training_data.TrainingData`.
config: The model configuration parameters.
"""
pass
def process(self, message: Message, **kwargs: Any) -> None:
"""Processes an incoming message.
This is the components chance to process an incoming
message. The component can rely on
any context attribute to be present, that gets created
by a call to :meth:`rasa.nlu.components.Component.create`
of ANY component and
on any context attributes created by a call to
:meth:`rasa.nlu.components.Component.process`
of components previous to this one.
Args:
message: The :class:`rasa.shared.nlu.training_data.message.Message` to
process.
"""
pass
def persist(self, file_name: Text, model_dir: Text) -> Optional[Dict[Text, Any]]:
"""Persists this component to disk for future loading.
Args:
file_name: The file name of the model.
model_dir: The directory to store the model to.
Returns:
An optional dictionary with any information about the stored model.
"""
pass
@classmethod
def cache_key(
cls, component_meta: Dict[Text, Any], model_metadata: "Metadata"
) -> Optional[Text]:
"""This key is used to cache components.
If a component is unique to a model it should return None.
Otherwise, an instantiation of the
component will be reused for all models where the
metadata creates the same key.
Args:
component_meta: The component configuration.
model_metadata: The component's :class:`rasa.nlu.model.Metadata`.
Returns:
A unique caching key.
"""
return None
def __getstate__(self) -> Any:
"""Gets a copy of picklable parts of the component."""
d = self.__dict__.copy()
# these properties should not be pickled
if "partial_processing_context" in d:
del d["partial_processing_context"]
if "partial_processing_pipeline" in d:
del d["partial_processing_pipeline"]
return d
def __eq__(self, other: Any) -> bool:
return self.__dict__ == other.__dict__
def prepare_partial_processing(
self, pipeline: List["Component"], context: Dict[Text, Any]
) -> None:
"""Sets the pipeline and context used for partial processing.
The pipeline should be a list of components that are
previous to this one in the pipeline and
have already finished their training (and can therefore
be safely used to process messages).
Args:
pipeline: The list of components.
context: The context of processing.
"""
self.partial_processing_pipeline = pipeline
self.partial_processing_context = context
def partially_process(self, message: Message) -> Message:
"""Allows the component to process messages during
training (e.g. external training data).
The passed message will be processed by all components
previous to this one in the pipeline.
Args:
message: The :class:`rasa.shared.nlu.training_data.message.Message` to
process.
Returns:
The processed :class:`rasa.shared.nlu.training_data.message.Message`.
"""
if self.partial_processing_context is not None:
for component in self.partial_processing_pipeline:
component.process(message, **self.partial_processing_context)
else:
logger.info("Failed to run partial processing due to missing pipeline.")
return message
@classmethod
def can_handle_language(cls, language: Hashable) -> bool:
"""Check if component supports a specific language.
This method can be overwritten when needed. (e.g. dynamically
determine which language is supported.)
Args:
language: The language to check.
Returns:
`True` if component can handle specific language, `False` otherwise.
"""
# If both `supported_language_list` and `not_supported_language_list` are set
# to `None`,
# it means: support all languages
if language is None or (
cls.supported_language_list is None
and cls.not_supported_language_list is None
):
return True
# check language supporting settings
if cls.supported_language_list and cls.not_supported_language_list:
# When user set both language supporting settings to not None, it will lead
# to ambiguity.
raise RasaException(
"Only one of `supported_language_list` and"
"`not_supported_language_list` can be set to not None"
)
# convert to `list` for membership test
supported_language_list = (
cls.supported_language_list
if cls.supported_language_list is not None
else []
)
not_supported_language_list = (
cls.not_supported_language_list
if cls.not_supported_language_list is not None
else []
)
# check if user provided a valid setting
if not supported_language_list and not not_supported_language_list:
# One of language settings must be valid (not None and not a empty list),
# There are three combinations of settings are not valid:
# (None, []), ([], None) and ([], [])
raise RasaException(
"Empty lists for both "
"`supported_language_list` and `not_supported language_list` "
"is not a valid setting. If you meant to allow all languages "
"for the component use `None` for both of them."
)
if supported_language_list:
return language in supported_language_list
else:
return language not in not_supported_language_list
class ComponentBuilder:
"""Creates trainers and interpreters based on configurations.
Caches components for reuse.
"""
def __init__(self, use_cache: bool = True) -> None:
self.use_cache = use_cache
# Reuse nlp and featurizers where possible to save memory,
# every component that implements a cache-key will be cached
self.component_cache = {}
def __get_cached_component(
self, component_meta: Dict[Text, Any], model_metadata: "Metadata"
) -> Tuple[Optional[Component], Optional[Text]]:
"""Load a component from the cache, if it exists.
Returns the component, if found, and the cache key.
"""
from rasa.nlu import registry
# try to get class name first, else create by name
component_name = component_meta.get("class", component_meta["name"])
component_class = registry.get_component_class(component_name)
cache_key = component_class.cache_key(component_meta, model_metadata)
if (
cache_key is not None
and self.use_cache
and cache_key in self.component_cache
):
return self.component_cache[cache_key], cache_key
return None, cache_key
def __add_to_cache(self, component: Component, cache_key: Optional[Text]) -> None:
"""Add a component to the cache."""
if cache_key is not None and self.use_cache:
self.component_cache[cache_key] = component
logger.info(
f"Added '{component.name}' to component cache. Key '{cache_key}'."
)
def load_component(
self,
component_meta: Dict[Text, Any],
model_dir: Text,
model_metadata: "Metadata",
**context: Any,
) -> Optional[Component]:
"""Loads a component.
Tries to retrieve a component from the cache, else calls
``load`` to create a new component.
Args:
component_meta:
The metadata of the component to load in the pipeline.
model_dir:
The directory to read the model from.
model_metadata (Metadata):
The model's :class:`rasa.nlu.model.Metadata`.
Returns:
The loaded component.
"""
from rasa.nlu import registry
try:
cached_component, cache_key = self.__get_cached_component(
component_meta, model_metadata
)
component = registry.load_component_by_meta(
component_meta, model_dir, model_metadata, cached_component, **context
)
if not cached_component:
# If the component wasn't in the cache,
# let us add it if possible
self.__add_to_cache(component, cache_key)
return component
except MissingArgumentError as e: # pragma: no cover
raise RasaException(
f"Failed to load component from file '{component_meta.get('file')}'. "
f"Error: {e}"
)
def create_component(
self, component_config: Dict[Text, Any], cfg: RasaNLUModelConfig
) -> Component:
"""Creates a component.
Tries to retrieve a component from the cache,
calls `create` to create a new component.
Args:
component_config: The component configuration.
cfg: The model configuration.
Returns:
The created component.
"""
from rasa.nlu import registry
from rasa.nlu.model import Metadata
try:
component, cache_key = self.__get_cached_component(
component_config, Metadata(cfg.as_dict())
)
if component is None:
component = registry.create_component_by_config(component_config, cfg)
self.__add_to_cache(component, cache_key)
return component
except MissingArgumentError as e: # pragma: no cover
raise RasaException(
f"Failed to create component '{component_config['name']}'. "
f"Error: {e}"
)
| 38.160173 | 88 | 0.653885 |
6b7165eed86970a5217e2147cf9f92d7977a5320 | 9,644 | py | Python | MCTS.py | HenningBuhl/alpha-zero-general | 6cf547ec2e84404254ec7f130e03ba31e18c0655 | [
"MIT"
] | null | null | null | MCTS.py | HenningBuhl/alpha-zero-general | 6cf547ec2e84404254ec7f130e03ba31e18c0655 | [
"MIT"
] | null | null | null | MCTS.py | HenningBuhl/alpha-zero-general | 6cf547ec2e84404254ec7f130e03ba31e18c0655 | [
"MIT"
] | null | null | null | import math
import numpy as np
import time
import itertools
class MCTS():
"""
This class handles the MCTS tree.
"""
def __init__(self, game, nnet, args):
self.game = game
self.nnet = nnet
self.args = args
self.Qsa = {} # stores Q values for s,a (as defined in the paper)
self.Nsa = {} # stores #times edge s,a was visited
self.Ns = {} # stores #times board s was visited
self.Ps = {} # stores initial policy (returned by neural net)
self.Es = {} # stores game.getGameEnded ended for board s
self.Vs = {} # stores game.getValidMoves for board s
def getActionProb(self, canonicalBoard, temp=1, customInputData=None):
"""
This function performs numMCTSSims simulations of MCTS starting from
canonicalBoard.
Returns:
probs: a policy vector where the probability of the ith action is
proportional to Nsa[(s,a)]**(1./temp)
"""
start = time.time()
sum_v = 0
actual_sims = 0
for i in range(self.args.numMCTSSims) if self.args.numMCTSSims is not None else itertools.count():
actual_sims += 1
sum_v += self.search(canonicalBoard, depth=0, rootNode=True, customInputData=customInputData)
elapsed = time.time() - start
if self.args.maxTime is not None and elapsed > self.args.maxTime:
break
s = self.game.stringRepresentation(canonicalBoard)
counts = [self.Nsa[(s,a)] if (s,a) in self.Nsa else 0 for a in range(self.game.getActionSize())]
if temp==0:
bestA = np.argmax(counts)
probs = [0]*len(counts)
probs[bestA] = 1
else:
counts = [x**(1./temp) for x in counts]
counts_sum = float(sum(counts))
probs = [x/counts_sum for x in counts]
return probs, sum_v / actual_sims
def search(self, canonicalBoard, depth=0, rootNode=False, customInputData=None):
"""
This function performs one iteration of MCTS. It is recursively called
till a leaf node is found. The action chosen at each node is one that
has the maximum upper confidence bound as in the paper.
Once a leaf node is found, the neural network is called to return an
initial policy P and a value v for the state. This value is propagated
up the search path. In case the leaf node is a terminal state, the
outcome is propagated up the search path. The values of Ns, Nsa, Qsa are
updated.
NOTE: the return values are the negative of the value of the current
state. This is done since v is in [-1,1] and if v is the value of a
state for the current player, then its value is -v for the other player.
Returns:
v: the negative of the value of the current canonicalBoard
"""
s = self.game.stringRepresentation(canonicalBoard)
if s not in self.Es:
self.Es[s] = self.game.getGameEnded(canonicalBoard, 1)
if self.Es[s] != 0:
# Terminal node.
return -self.Es[s]
if self.args.maxDepth is not None and depth == self.args.maxDepth: # Max depth reached.
#print(f'ABORT MCTS: MAX DEPTH REACHED')
return 0 # Game ongoing.
if s not in self.Ps:
# Leaf node.
valids = self.game.getValidMoves(canonicalBoard, 1)
if self.args.rollout == 'single':
if self.game.args.useCustomInput:
boardHistory, customInput = customInputData
self.Ps[s], v = self.nnet.predict(customInput)
else:
self.Ps[s], v = self.nnet.predict(canonicalBoard)
v = v[0]
self.Ps[s] = self.Ps[s] * valids # masking invalid moves
sum_Ps_s = np.sum(self.Ps[s])
if sum_Ps_s > 0:
self.Ps[s] /= sum_Ps_s # renormalize
else:
# if all valid moves were masked make all valid moves equally probable
# NB! All valid moves may be masked if either your NNet architecture is insufficient or you've get overfitting or something else.
# If you have got dozens or hundreds of these messages you should pay attention to your NNet and/or training process.
print("All valid moves were masked, do workaround.")
self.Ps[s] = self.Ps[s] + valids
self.Ps[s] /= np.sum(self.Ps[s])
elif self.args.rollout == 'random':
self.Ps[s] = valids / np.sum(valids)
board = canonicalBoard
cur_player = 1
while True: # Random rollout.
vs = self.game.getValidMoves(board, cur_player)
a = np.random.choice(vs.shape[0], p=vs/np.sum(vs))
board, cur_player = self.game.getNextState(board, cur_player, a)
r = cur_player*self.game.getGameEnded(board, cur_player)
if r != 0:
break
v = r
elif self.args.rollout == 'fast':
self.Ps[s] = valids / np.sum(valids)
board = canonicalBoard
cur_player = 1
if self.game.useCustomInput:
_, v = self.nnet.predict(customInputData[1])
else:
_, v = self.nnet.predict(board)
v = v[0]
while True: # Fast rollout.
vs = self.game.getValidMoves(board, cur_player)
pi_fast = self.nnet.predict_fast(board)
pi_fast = pi_fast * vs
a = np.random.choice(vs.shape[0], p=pi_fast/np.sum(pi_fast))
board, cur_player = self.game.getNextState(board, cur_player, a)
r = cur_player*self.game.getGameEnded(board, cur_player)
if r != 0:
break
lmbda = self.args.lambdaWeight
v = (1 - lmbda) * v + lmbda * r
elif self.args.rollout == 'slow':
self.Ps[s] = valids / np.sum(valids)
board = canonicalBoard
cur_player = 1
if self.game.args.useCustomInput:
boardHistory, customInput = customInputData
_, v = self.nnet.predict(customInput)
else:
_, v = self.nnet.predict(board)
v = v[0]
while True: # Slow rollout.
vs = self.game.getValidMoves(board, cur_player)
if self.game.args.useCustomInput:
pi, _ = self.nnet.predict(customInput)
else:
pi, _ = self.nnet.predict(board)
pi = pi * vs
a = np.random.choice(vs.shape[0], p=pi/np.sum(pi))
board, cur_player = self.game.getNextState(board, cur_player, a)
if self.game.args.useCustomInput:
boardHistory, customInput = self.game.getCustomInput(board, cur_player, boardHistory, customInput)
r = cur_player*self.game.getGameEnded(board, cur_player)
if r != 0:
break
lmbda = self.args.lambdaWeight
v = (1 - lmbda) * v + lmbda * r
else:
raise ValueError(f'rollout {self.args.rollout} is not supported.')
self.Vs[s] = valids
self.Ns[s] = 0
return -int(v)
valids = self.Vs[s]
cur_best = -float('inf')
best_act = -1
# Dirichlet Noise.
useDirNoise = False
dirEpsilon = self.args.dirEpsilon
if rootNode and dirEpsilon > 0:
useDirNoise = True
dirAlpha = self.args.dirAlpha
dirEta = np.random.dirichlet([dirAlpha] * len(valids))
# pick the action with the highest upper confidence bound
for i, a in enumerate(range(self.game.getActionSize())):
if valids[a]:
if useDirNoise:
p = (1 - dirEpsilon) * self.Ps[s][a] + dirEpsilon * dirEta[i]
else:
p = self.Ps[s][a]
if (s,a) in self.Qsa:
u = self.Qsa[(s,a)] + self.args.cpuct*p*math.sqrt(self.Ns[s])/(1+self.Nsa[(s,a)])
else:
u = self.args.cpuct*p*math.sqrt(self.Ns[s])
if u > cur_best:
cur_best = u
best_act = a
a = best_act
next_s, next_player = self.game.getNextState(canonicalBoard, 1, a)
next_s = self.game.getCanonicalForm(next_s, next_player)
if self.game.args.useCustomInput:
boardHistory, customInput = customInputData
boardHistory, customInput = self.game.getCustomInput(next_s, next_player, boardHistory, customInput)
customInputData = (boardHistory, customInput)
v = self.search(next_s, depth=depth+1, customInputData=customInputData)
if (s,a) in self.Qsa:
self.Qsa[(s,a)] = (self.Nsa[(s,a)]*self.Qsa[(s,a)] + v)/(self.Nsa[(s,a)]+1)
self.Nsa[(s,a)] += 1
else:
self.Qsa[(s,a)] = v
self.Nsa[(s,a)] = 1
self.Ns[s] += 1
return -v
| 43.053571 | 149 | 0.530382 |
e5df415d47ae5054244e301d153a857b64e2d5ad | 759 | py | Python | gevent/gevent-demo-select.py | all3g/pieces | bc378fd22ddc700891fe7f34ab0d5b341141e434 | [
"CNRI-Python"
] | 34 | 2016-10-31T02:05:24.000Z | 2018-11-08T14:33:13.000Z | gevent/gevent-demo-select.py | join-us/python-programming | bc378fd22ddc700891fe7f34ab0d5b341141e434 | [
"CNRI-Python"
] | 2 | 2017-05-11T03:00:31.000Z | 2017-11-01T23:37:37.000Z | gevent/gevent-demo-select.py | join-us/python-programming | bc378fd22ddc700891fe7f34ab0d5b341141e434 | [
"CNRI-Python"
] | 21 | 2016-08-19T09:05:45.000Z | 2018-11-08T14:33:16.000Z | #!/usr/bin/env python
# -*- coding: utf8 -*-
import time
import gevent
from gevent import select
start = time.time()
tic = lambda: 'at %1.1f seconds' % (time.time() - start)
def gr1():
# Busy waits for a second, but we don't want to stick around...
print('Started Polling: %s' % tic())
select.select([], [], [], 2)
print('Ended Polling: %s' % tic())
def gr2():
# Busy waits for a second, but we don't want to stick around...
print('Started Polling: %s' % tic())
select.select([], [], [], 2)
print('Ended Polling: %s' % tic())
def gr3():
print('Hey lets do some stuff while the greenlets poll, %s' % tic())
gevent.sleep()
gevent.joinall([
gevent.spawn(gr1),
gevent.spawn(gr2),
gevent.spawn(gr3)
])
| 21.083333 | 72 | 0.59552 |
ba362a8733676f21712d3da607e890bb31858409 | 7,154 | py | Python | bionumpy/file_buffers.py | knutdrand/bionumpy | 2a520ebfce19f346284bd5cf21d6197f6ba801ba | [
"MIT"
] | null | null | null | bionumpy/file_buffers.py | knutdrand/bionumpy | 2a520ebfce19f346284bd5cf21d6197f6ba801ba | [
"MIT"
] | null | null | null | bionumpy/file_buffers.py | knutdrand/bionumpy | 2a520ebfce19f346284bd5cf21d6197f6ba801ba | [
"MIT"
] | 1 | 2022-03-07T21:58:03.000Z | 2022-03-07T21:58:03.000Z | import numpy as np
from npstructures import RaggedArray, RaggedView, RaggedShape, npdataclass
from .encodings import BaseEncoding, QualityEncoding
from .datatypes import SequenceEntry, SequenceEntryWithQuality
from .sequences import Sequences
NEWLINE = 10
class FileBuffer:
_buffer_divisor = 1
COMMENT = 0
def __init__(self, data, new_lines):
self._data = np.asanyarray(data)
self._new_lines = np.asanyarray(new_lines)
self._is_validated = False
self.size = self._data.size
@classmethod
def from_raw_buffer(cls, raw_buffer) -> "FileBuffer":
"""Create a buffer with full entries
A raw buffer can end with data that does not represent full entries.
This method extracts all the full entries, so that the next buffer can
start from the last incomplete entry.
Parameters
----------
chunk : np.ndarray
Raw buffer with data that might end with incomplete entry
Returns
-------
'FileBuffer'
Buffer with complete entries
Examples
--------
"""
return NotImplemented
@classmethod
def from_data(cls, data: npdataclass) -> "FileBuffer":
"""Create FileBuffer from a data set
Create a FileBuffer that can be written to file
Parameters
----------
data : npdataclass
Data set containing the data to be written
Returns
-------
'FileBuffer'
FileBuffer containing the data
"""
return NotImplemented
def validate_if_not(self):
if not self._is_validated:
self._validate()
def get_data(self) -> npdataclass:
"""Extract the data from the buffer
The default way to extract data from the the buffer
Returns
-------
npdataclass
Data set containing the data from the buffer
"""
return NotImplemented
def _move_intervals_to_2d_array(self, starts, ends, fill_value=0):
n_intervals = starts.size
n_chars = ends - starts
from_indices, _ = RaggedView(starts, n_chars).get_flat_indices()
max_chars = np.max(n_chars)
array = np.full(n_intervals * max_chars, fill_value, dtype=np.uint8)
to_indices, _ = RaggedView(
max_chars * np.arange(1, n_intervals + 1) - n_chars, n_chars
).get_flat_indices()
array[to_indices] = self._data[from_indices]
return array.reshape((n_intervals, max_chars))
def _move_intervals_to_ragged_array(self, starts, ends=None, lens=None):
if lens is None:
lens = ends - starts
indices, shape = RaggedView(starts, lens).get_flat_indices()
return Sequences(self._data[indices], shape)
class OneLineBuffer(FileBuffer):
n_lines_per_entry = 2
_buffer_divisor = 32
@classmethod
def from_raw_buffer(cls, chunk) -> "OneLineBuffer":
"""Create a buffer with full entries
Extract complete entries, i. e. a number of lines that is divisible by lines per entry
Parameters
----------
chunk : np.ndarray
Raw buffer with data that might end with incomplete entry
Returns
-------
'OneLineBuffer'
Buffer with complete entries
Examples
--------
8
"""
new_lines = np.flatnonzero(chunk == NEWLINE)
n_lines = new_lines.size
assert n_lines >= cls.n_lines_per_entry, "No complete entry in buffer"
new_lines = new_lines[: n_lines - (n_lines % cls.n_lines_per_entry)]
return cls(chunk[: new_lines[-1] + 1], new_lines)
def get_sequences(self) -> Sequences:
self.validate_if_not()
sequence_starts = self._new_lines[:: self.n_lines_per_entry] + 1
sequence_lens = self._new_lines[1 :: self.n_lines_per_entry] - sequence_starts
indices, shape = RaggedView(sequence_starts, sequence_lens).get_flat_indices()
m = indices.size
d = m % self._buffer_divisor
seq = np.empty(m - d + self._buffer_divisor, dtype=self._data.dtype)
seq[:m] = self._data[indices]
return Sequences(seq, shape)
def get_data(self):
self.validate_if_not()
starts = np.insert(self._new_lines, 0, -1)
lengths = np.diff(starts)
self.lines = Sequences(self._data, RaggedShape(lengths))
sequences = self.lines[1 :: self.n_lines_per_entry, :-1]
headers = self.lines[:: self.n_lines_per_entry, 1:-1]
return SequenceEntry(headers, sequences)
@classmethod
def from_data(cls, entries):
name_lengths = entries.name.shape.lengths
sequence_lengths = entries.sequence.shape.lengths
line_lengths = np.hstack(
(name_lengths[:, None] + 2, sequence_lengths[:, None] + 1)
).ravel()
buf = np.empty(line_lengths.sum(), dtype=np.uint8)
lines = RaggedArray(buf, line_lengths)
step = cls.n_lines_per_entry
lines[0::step, 1:-1] = entries.name
lines[1::step, :-1] = entries.sequence
lines[0::step, 0] = ord(">")
lines[:, -1] = ord("\n")
return buf
def _validate(self):
n_lines = self._new_lines.size
assert n_lines % self.n_lines_per_entry == 0, "Wrong number of lines in buffer"
header_idxs = (
self._new_lines[self.n_lines_per_entry - 1 : -1 : self.n_lines_per_entry]
+ 1
)
assert np.all(self._data[header_idxs] == self.HEADER)
self._is_validated = True
class TwoLineFastaBuffer(OneLineBuffer):
HEADER = 62
n_lines_per_entry = 2
_encoding = BaseEncoding
class FastQBuffer(OneLineBuffer):
HEADER = 64
n_lines_per_entry = 4
_encoding = BaseEncoding
dataclass = SequenceEntryWithQuality
def get_data(self):
seq_entry = super().get_data()
quality = QualityEncoding.encode(
self.lines[3 :: self.n_lines_per_entry, :-1]
)
return SequenceEntryWithQuality(seq_entry.name, seq_entry.sequence, quality)
@classmethod
def _get_line_lens(cls, entries):
name_lengths = entries.name.shape.lengths[:, None]
sequence_lengths = entries.sequence.shape.lengths[:, None]
return (
np.hstack(
(
name_lengths + 1,
sequence_lengths,
np.ones_like(sequence_lengths),
sequence_lengths,
)
).ravel()
+ 1
)
@classmethod
def from_data(cls, entries):
line_lengths = cls._get_line_lens(entries)
buf = np.empty(line_lengths.sum(), dtype=np.uint8)
lines = RaggedArray(buf, line_lengths)
step = cls.n_lines_per_entry
lines[0::step, 1:-1] = entries.name
lines[1::step, :-1] = entries.sequence
lines[2::step, 0] = ord("+")
lines[3::step, :-1] = QualityEncoding.decode(entries.quality)
lines[0::step, 0] = cls.HEADER
lines[:, -1] = ord("\n")
return buf
| 31.377193 | 94 | 0.609309 |
0d3520abe5ef46467b75149a6e8f2ee92360d2e2 | 573 | py | Python | Desafio054.py | Baeth/CeV-Python | 7d0952d096b2f945679f4f8fe938754f24c5775b | [
"Unlicense"
] | 2 | 2017-12-14T22:42:41.000Z | 2018-03-28T10:08:02.000Z | Desafio054.py | Baeth/CeV-Python | 7d0952d096b2f945679f4f8fe938754f24c5775b | [
"Unlicense"
] | null | null | null | Desafio054.py | Baeth/CeV-Python | 7d0952d096b2f945679f4f8fe938754f24c5775b | [
"Unlicense"
] | null | null | null | # ler o Nascimento de 5 pessoas e dizer quantas pessoas são maiores e menores de idade.
maior = []
menor = []
for f in range(1, 8):
i = int(input(f'Digite a idade da pessoa n°{f}: '))
if i >= 18:
maior.append(i)
elif 18 > i >= 0: # Comparação para não registrar idades negativas.
menor.append(i)
else:
print('Com essa idade aí, ou a pessoa nunca existiu, ou já virou presunto! Não vou contar essa.')
# Testes das listas finais.
# print('Maior', maior)
# print('Menor', menor)
print(f'Temos {len(menor)} pessoas menores de idade e {len(maior)} na maioridade.')
| 31.833333 | 99 | 0.682373 |
f888771cbd637c0b2fdf66ee7f0c7ef301228e8d | 1,268 | py | Python | qhub/provider/cloud/digital_ocean.py | ericdatakelly/qhub | 3275843543e1388e0d4b45c9bc542f5de10a716f | [
"BSD-3-Clause"
] | null | null | null | qhub/provider/cloud/digital_ocean.py | ericdatakelly/qhub | 3275843543e1388e0d4b45c9bc542f5de10a716f | [
"BSD-3-Clause"
] | null | null | null | qhub/provider/cloud/digital_ocean.py | ericdatakelly/qhub | 3275843543e1388e0d4b45c9bc542f5de10a716f | [
"BSD-3-Clause"
] | null | null | null | import os
import functools
import requests
def digital_ocean_request(url, method="GET", json=None):
BASE_DIGITALOCEAN_URL = "https://api.digitalocean.com/v2/"
for name in {"DIGITALOCEAN_TOKEN"}:
if name not in os.environ:
raise ValueError(
f"Digital Ocean api requests require environment variable={name} defined"
)
headers = {"Authorization": f'Bearer {os.environ["DIGITALOCEAN_TOKEN"]}'}
method_map = {
"GET": requests.get,
}
response = method_map[method](
f"{BASE_DIGITALOCEAN_URL}{url}", headers=headers, json=json
)
response.raise_for_status()
return response
@functools.lru_cache()
def _kubernetes_options():
return digital_ocean_request("kubernetes/options").json()
def instances():
return _kubernetes_options()["options"]["sizes"]
def regions():
return _kubernetes_options()["options"]["regions"]
# keep `region` parameter
def kubernetes_versions(region=None):
"""Return list of available kubernetes supported by cloud provider. Sorted from oldest to latest."""
supported_kubernetes_versions = sorted(
[_["slug"] for _ in _kubernetes_options()["options"]["versions"]]
)
return supported_kubernetes_versions
| 24.862745 | 104 | 0.684543 |
feb93a8a2091c8434832a18589d3f00bd8afb0e9 | 2,802 | py | Python | test/functional/rpc_invalidateblock.py | puzcoin/SyndicateQT | d49ebc0f0ba554bb41efb377b8c5bbc238677379 | [
"MIT"
] | null | null | null | test/functional/rpc_invalidateblock.py | puzcoin/SyndicateQT | d49ebc0f0ba554bb41efb377b8c5bbc238677379 | [
"MIT"
] | null | null | null | test/functional/rpc_invalidateblock.py | puzcoin/SyndicateQT | d49ebc0f0ba554bb41efb377b8c5bbc238677379 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Syndicate Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the invalidateblock RPC."""
from test_framework.test_framework import SyndicateTestFramework
from test_framework.util import *
class InvalidateTest(SyndicateTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self):
self.setup_nodes()
def run_test(self):
self.log.info("Make sure we repopulate setBlockIndexCandidates after InvalidateBlock:")
self.log.info("Mine 4 blocks on Node 0")
self.nodes[0].generate(4)
assert(self.nodes[0].getblockcount() == 4)
besthash = self.nodes[0].getbestblockhash()
self.log.info("Mine competing 6 blocks on Node 1")
self.nodes[1].generate(6)
assert(self.nodes[1].getblockcount() == 6)
self.log.info("Connect nodes to force a reorg")
connect_nodes_bi(self.nodes,0,1)
sync_blocks(self.nodes[0:2])
assert(self.nodes[0].getblockcount() == 6)
badhash = self.nodes[1].getblockhash(2)
self.log.info("Invalidate block 2 on node 0 and verify we reorg to node 0's original chain")
self.nodes[0].invalidateblock(badhash)
newheight = self.nodes[0].getblockcount()
newhash = self.nodes[0].getbestblockhash()
if (newheight != 4 or newhash != besthash):
raise AssertionError("Wrong tip for node0, hash %s, height %d"%(newhash,newheight))
self.log.info("Make sure we won't reorg to a lower work chain:")
connect_nodes_bi(self.nodes,1,2)
self.log.info("Sync node 2 to node 1 so both have 6 blocks")
sync_blocks(self.nodes[1:3])
assert(self.nodes[2].getblockcount() == 6)
self.log.info("Invalidate block 5 on node 1 so its tip is now at 4")
self.nodes[1].invalidateblock(self.nodes[1].getblockhash(5))
assert(self.nodes[1].getblockcount() == 4)
self.log.info("Invalidate block 3 on node 2, so its tip is now 2")
self.nodes[2].invalidateblock(self.nodes[2].getblockhash(3))
assert(self.nodes[2].getblockcount() == 2)
self.log.info("..and then mine a block")
self.nodes[2].generate(1)
self.log.info("Verify all nodes are at the right height")
time.sleep(5)
assert_equal(self.nodes[2].getblockcount(), 3)
assert_equal(self.nodes[0].getblockcount(), 4)
node1height = self.nodes[1].getblockcount()
if node1height < 4:
raise AssertionError("Node 1 reorged to a lower height: %d"%node1height)
if __name__ == '__main__':
InvalidateTest().main()
| 43.107692 | 100 | 0.660243 |
55a460d1d7d9632946bf4e96c822e4a1da3ec90c | 1,587 | py | Python | my_socket.py | Jay54520/python_socket | ce2351a99153beebf9fab546ff00c08f517593f7 | [
"Apache-2.0"
] | 1 | 2019-10-31T09:18:14.000Z | 2019-10-31T09:18:14.000Z | my_socket.py | Jay54520/python_socket | ce2351a99153beebf9fab546ff00c08f517593f7 | [
"Apache-2.0"
] | 9 | 2018-02-14T03:57:59.000Z | 2018-02-20T12:26:16.000Z | my_socket.py | Jay54520/python_socket | ce2351a99153beebf9fab546ff00c08f517593f7 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import socket
from settings import BUFFER_MAXSIZE, MSG_PREFIX_LENGTH
class MySocket:
def __init__(self, sock=None):
if sock is None:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
else:
self.sock = sock
def my_send(self, msg: bytes):
msg = self.complete_msg(msg)
msg_length = len(msg)
total_sent = 0
while total_sent != msg_length:
sent = self.sock.send(msg[total_sent:])
if sent == 0:
raise RuntimeError('连接已关闭')
total_sent += sent
def my_recv(self) -> bytes:
# 获取信息体长度
body_length = int(self._recv(MSG_PREFIX_LENGTH).decode())
return self._recv(body_length)
def complete_msg(self, msg):
"""给消息加上 5 位的消息长度前缀"""
bytes_body_length = str(len(msg)).encode()
bytes_body_length = b'0' * (MSG_PREFIX_LENGTH - len(bytes_body_length)) + bytes_body_length
msg = bytes_body_length + msg
return msg
def _recv(self, msg_length) -> bytes:
"""
获取 msg_length 长度的信息
:param msg_length: 要获取的信息的长度
:return:
"""
total_recv = 0
chunks = []
while total_recv != msg_length:
chunk = self.sock.recv(min(msg_length - total_recv, BUFFER_MAXSIZE))
if chunk == b'':
raise RuntimeError('连接已关闭')
total_recv += len(chunk)
chunks.append(chunk)
return b''.join(chunks)
@property
def socket(self):
return self.sock | 28.854545 | 99 | 0.574039 |
600955770c84db523b2733941390c98004e2e0e7 | 572 | py | Python | parlaskupine/admin.py | VesterDe/parlalize | b725fe4b55b95f2ad3505aa70dac2474269ea3da | [
"Unlicense"
] | 1 | 2021-04-19T07:30:06.000Z | 2021-04-19T07:30:06.000Z | parlaskupine/admin.py | VesterDe/parlalize | b725fe4b55b95f2ad3505aa70dac2474269ea3da | [
"Unlicense"
] | null | null | null | parlaskupine/admin.py | VesterDe/parlalize | b725fe4b55b95f2ad3505aa70dac2474269ea3da | [
"Unlicense"
] | null | null | null | from django.contrib import admin
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from parlaskupine.models import *
# Register your models here.
admin.site.register(Organization)
admin.site.register(PGStatic)
admin.site.register(PercentOFAttendedSession)
admin.site.register(MPOfPg)
admin.site.register(MostMatchingThem)
admin.site.register(LessMatchingThem)
admin.site.register(DeviationInOrganization)
admin.site.register(CutVotes)
admin.site.register(WorkingBodies)
admin.site.register(VocabularySize)
admin.site.register(StyleScores)
admin.site.register(Tfidf)
| 28.6 | 45 | 0.840909 |
c8f76538b6414b7292b5ff39375761f8a0d7135a | 683 | py | Python | Python/MaximumDepthOfBinaryTreeTest.py | TonnyL/Windary | 39f85cdedaaf5b85f7ce842ecef975301fc974cf | [
"MIT"
] | 205 | 2017-11-16T08:38:46.000Z | 2022-03-06T05:50:03.000Z | Python/MaximumDepthOfBinaryTreeTest.py | santosh241/Windary | 39f85cdedaaf5b85f7ce842ecef975301fc974cf | [
"MIT"
] | 3 | 2018-04-10T10:17:52.000Z | 2020-12-11T08:00:09.000Z | Python/MaximumDepthOfBinaryTreeTest.py | santosh241/Windary | 39f85cdedaaf5b85f7ce842ecef975301fc974cf | [
"MIT"
] | 28 | 2018-04-10T06:42:42.000Z | 2021-09-14T14:15:39.000Z | from unittest import TestCase
from MaximumDepthOfBinaryTree import MaximumDepthOfBinaryTree, TreeNode
class TestMaximumDepthOfBinaryTree(TestCase):
def test_maxDepth(self):
m = MaximumDepthOfBinaryTree()
self.assertTrue(m.maxDepth(None) == 0)
node0 = TreeNode(3)
node0.left = TreeNode(9)
node0.right = TreeNode(20)
node0.right.left = TreeNode(15)
node0.right.right = TreeNode(7)
self.assertTrue(m.maxDepth(node0) == 3)
node1 = TreeNode(1)
node1.left = TreeNode(2)
node1.left.left = TreeNode(3)
node1.left.left.left = TreeNode(4)
self.assertTrue(m.maxDepth(node1) == 4)
| 28.458333 | 71 | 0.650073 |
3a4d758be4ae086eab4f7710d223f08b34618cbd | 1,033 | py | Python | db/__init__.py | leonardodalinky/warframe-market-recorder | c2bbeb2a8005b5678abc894f561faa42dd75df47 | [
"MIT"
] | null | null | null | db/__init__.py | leonardodalinky/warframe-market-recorder | c2bbeb2a8005b5678abc894f561faa42dd75df47 | [
"MIT"
] | null | null | null | db/__init__.py | leonardodalinky/warframe-market-recorder | c2bbeb2a8005b5678abc894f561faa42dd75df47 | [
"MIT"
] | null | null | null | from dataclasses import dataclass, fields
import os
from dotenv import load_dotenv
from sqlalchemy.orm.decl_api import registry
from sqlalchemy.orm.decl_api import declared_attr
from sqlalchemy import create_engine, Column, Integer
load_dotenv()
@dataclass
class _Base:
__proto_enums__ = []
id: int = Column(Integer, primary_key=True)
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
def load_json_dict(self, d) -> None:
field_names = [field.name for field in fields(self.__class__)]
field_names = list(filter(lambda x: x not in self.__proto_enums__ and x != "id", field_names))
for field_name in field_names:
setattr(self, field_name, d[field_name])
self._load_proto_from_json_dict(d)
def _load_proto_from_json_dict(self, d) -> None:
# TO IMPLEMENT IN SUBCLASS
pass
Base = registry().generate_base(cls=_Base)
engine = create_engine(os.getenv("DB_URL"))
# constants
REPO_VERSION = 1
MIGRATE_REPO = "migrate_repo"
| 26.487179 | 102 | 0.717328 |
52942f969d1986ca7178d3dcab607eca0514ff32 | 659 | py | Python | python/test_2020_04_1.py | wensby/advent-of-code | 50cd7fa2d35674d868a79ac8c75be24a43267e2b | [
"MIT"
] | null | null | null | python/test_2020_04_1.py | wensby/advent-of-code | 50cd7fa2d35674d868a79ac8c75be24a43267e2b | [
"MIT"
] | null | null | null | python/test_2020_04_1.py | wensby/advent-of-code | 50cd7fa2d35674d868a79ac8c75be24a43267e2b | [
"MIT"
] | null | null | null | import importlib
import unittest
solution = importlib.import_module('2020_04_1')
class Test2020Day4Part1(unittest.TestCase):
def test_example1(self):
input = (
'ecl:gry pid:860033327 eyr:2020 hcl:#fffffd\n'
'byr:1937 iyr:2017 cid:147 hgt:183cm\n'
'\n'
'iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884\n'
'hcl:#cfa07d byr:1929\n'
'\n'
'hcl:#ae17e1 iyr:2013\n'
'eyr:2024\n'
'ecl:brn pid:760753108 byr:1931\n'
'hgt:179cm\n'
'\n'
'hcl:#cfa07d eyr:2025 pid:166559648\n'
'iyr:2011 ecl:brn hgt:59in\n'
)
self.assertEqual(solution.run(input), 2)
| 26.36 | 59 | 0.60091 |
7cf40fbf855a043249e8cd53464f1809488df9de | 4,665 | py | Python | cnns/base_networks/resnet_truncated.py | johnwlambert/dlupi-heteroscedastic-dropou | 057dd079fce7ec8833b818b77fd694c01a1adcbc | [
"MIT"
] | 39 | 2018-04-04T13:29:03.000Z | 2022-03-12T23:57:33.000Z | cnns/base_networks/resnet_truncated.py | johnwlambert/dlupi-heteroscedastic-dropou | 057dd079fce7ec8833b818b77fd694c01a1adcbc | [
"MIT"
] | 5 | 2018-04-30T12:14:38.000Z | 2021-04-26T23:52:18.000Z | cnns/base_networks/resnet_truncated.py | johnwlambert/dlupi-heteroscedastic-dropou | 057dd079fce7ec8833b818b77fd694c01a1adcbc | [
"MIT"
] | 10 | 2018-05-14T09:14:55.000Z | 2021-11-10T00:23:21.000Z | import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
# We don't use the model URLs because we are training from scratch.
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNetTruncated(nn.Module):
def __init__(self, block, layers):
self.inplanes = 64
super(ResNetTruncated, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0]) # 256
self.layer2 = self._make_layer(block, 128, layers[1], stride=2) # 512
self.layer3 = self._make_layer(block, 256, layers[2], stride=2) # 1024
self.layer4 = self._make_layer(block, 512, layers[3], stride=2) # 2048
# NO AvgPool or Final FC Layer...
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
# no avg_pool, flatten, or fc here
return x # return (300L, 512L)
def resnet18_truncated():
"""Constructs a ResNet-18 model."""
model = ResNetTruncated(BasicBlock, [2, 2, 2, 2] )
return model
def resnet152_truncated():
"""Constructs a ResNet-152 model. """
model = ResNetTruncated(Bottleneck, [3, 8, 36, 3] )
return model | 30.490196 | 78 | 0.58328 |
e6bcfc29279791b172c12464e5477f1ae94ba41f | 4,151 | py | Python | macaque/f_sql.py | pbujold/macaqueModules | 3f55ec45f691972e40cc8bd98071b7934ae24349 | [
"MIT"
] | 1 | 2021-08-25T08:45:52.000Z | 2021-08-25T08:45:52.000Z | macaque/f_sql.py | pbujold/macaqueModules | 3f55ec45f691972e40cc8bd98071b7934ae24349 | [
"MIT"
] | null | null | null | macaque/f_sql.py | pbujold/macaqueModules | 3f55ec45f691972e40cc8bd98071b7934ae24349 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
def correct_date(db):
import sqlite3
conn = sqlite3.connect(db)
dfTable = pd.read_sql("SELECT * FROM Trials_Digital", conn)
allDates = dfTable.date
uniques = np.unique(allDates)
index = {}
for unique in uniques:
index[tuple(unique)] = [
i for i, x in enumerate(allDates) if x == unique
]
for dateString, ii in zip(uniques, index):
if len(dateString) != 10:
dateString = dateString.split(sep='-')
dateString[2] = '0' + dateString[2]
allDates[index[ii]] = '-'.join(dateString)
dfTable.date = allDates
dfTable.to_sql('Trials_Digital', conn, if_exists='replace', index=False)
conn.close()
#%%
def sort_throughFolder(pre):
# 'C:\Users\phbuj\University Of Cambridge\OneDrive - University Of Cambridge\Lab Computer\DATA\data_Trident'
# pre = 'T68'
import sqlite3
import os
cwd = os.getcwd()
fileIDs = []
for file in os.listdir(cwd):
if file.endswith(".mat") and file.startswith(pre):
fileIDs.append(file)
# db = 'C:\Users\phbuj\Google Drive\Lab Data\database' + "\\" + pre
from pathlib import Path
home = str(Path.home())
db = home + r'\Google Drive\Lab Data\database' + "\\" + pre
conn = sqlite3.connect(db)
for fileID in fileIDs:
import_analog(fileID, conn)
conn.close()
#%%
def import_analog(fileID, conn):
import pandas as pd
import numpy as np
import datetime
from scipy.io import loadmat
try:
data = loadmat(fileID, squeeze_me=True, struct_as_record=False)
except:
return
cols = [
'date', 'time', 'trialNo', 'blockNo', 'analogTime', 'eye_X', 'eye_Y',
'Joystick_X', 'Joystick_Y'
]
analogDF = pd.DataFrame(columns=cols)
z = 0
dfs = []
for n in range(len(data['hh'].data.Trials)):
reps = len(data['hh'].data.Trials[n].analog_times)
date = data['hh'].data.Trials[n].clock[0:3]
if len(date) == 0:
continue
year = str(date[0])
month = str(date[1])
day = str(date[2])
if len(month) < 2:
month = '0' + month
if len(day) < 2:
day = '0' + day
time = str(data['hh'].data.Trials[n].clock[3:][0]) + ':' + str(
data['hh'].data.Trials[n].clock[3:][1]) + ':' + str(
data['hh'].data.Trials[n].clock[3:][2])
if len(data['hh'].data.Trials[n].events.shape) == 1:
if any(data['hh'].data.Trials[n].events) == 1002:
z += 1
elif any(data['hh'].data.Trials[n].events[:, 0] == 1002):
z += 1
if len(data['hh'].data.Trials[n].analog_data.shape) < 2:
continue
dfs.append(
pd.DataFrame({
'date': [year + '-' + month + '-' + day] * reps,
'time': [time] * reps,
'trialNo': [n] * reps,
'blockNo': [z] * reps,
'analogTime':
data['hh'].data.Trials[n].analog_times.tolist(),
'eye_X':
data['hh'].data.Trials[n].analog_data[:, 0].tolist(),
'eye_Y':
data['hh'].data.Trials[n].analog_data[:, 1].tolist(),
'Joystick_X':
data['hh'].data.Trials[n].analog_data[:, 2].tolist(),
'Joystick_Y':
data['hh'].data.Trials[n].analog_data[:, 3].tolist()
}))
print(fileID)
if dfs == []:
return
analogDF = pd.concat(dfs, ignore_index=True)
analogDF.to_sql('Trials_Analog', conn, if_exists='append', index=False)
#%%
def csv_to_database(mCode):
"""
create a database connection to a SQLite database
"""
from pathlib import Path
home = str(Path.home())
db = home + r'\Google Drive\Lab Data\database' + "\\" + mCode
conn = sqlite3.connect(db)
trials = pd.read_csv('trial_TableU74_.csv')
# dfTable = pd.read_sql("SELECT * FROM Trials_Digital", conn)
trials.to_sql('Trials_Digital', conn, if_exists='replace', index=False)
conn.close()
| 29.027972 | 115 | 0.540352 |
dc2eb7ce7a8eec9195a91eb6002212d80f2f1c36 | 7,584 | py | Python | python/uptune/opentuner/tuningrunmain.py | Hecmay/uptune | 20a1462c772041b8d1b99f326b372284896faaba | [
"BSD-3-Clause"
] | 29 | 2020-06-19T18:07:38.000Z | 2022-01-03T23:06:53.000Z | python/uptune/opentuner/tuningrunmain.py | Hecmay/uptune | 20a1462c772041b8d1b99f326b372284896faaba | [
"BSD-3-Clause"
] | 4 | 2020-07-14T16:20:23.000Z | 2021-05-15T13:56:24.000Z | python/uptune/opentuner/tuningrunmain.py | Hecmay/uptune | 20a1462c772041b8d1b99f326b372284896faaba | [
"BSD-3-Clause"
] | 2 | 2020-06-20T00:43:23.000Z | 2020-12-26T00:38:31.000Z | from __future__ import print_function
# vim: tabstop=2 shiftwidth=2 softtabstop=2 expandtab autoindent smarttab
from builtins import object
import argparse
import copy
import inspect
import logging
import math
import os
import socket
import sys
import time
import uuid
from datetime import datetime
from uptune.opentuner import resultsdb
from uptune.opentuner.search.driver import SearchDriver
from uptune.opentuner.measurement.driver import MeasurementDriver
log = logging.getLogger(__name__)
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument('--label',
help="name for the TuningRun")
argparser.add_argument('--print-search-space-size', action='store_true',
help="Print out the estimated size of the search space and exit")
argparser.add_argument('--database',
help=("database to store tuning results in, see: "
"http://docs.sqlalchemy.org/en/rel_0_8/core/engines.html#database-urls"))
argparser.add_argument('--print-params','-pp',action='store_true',
help='show parameters of the configuration being tuned')
class CleanStop(Exception):
pass
class LogFormatter(logging.Formatter):
def format(self, record):
record.relativeCreated /= 1000.0
try:
# python 2.7
return super(LogFormatter, self).format(record)
except:
# python 2.6
return _OldFormatter.format(self, record)
_OldFormatter = logging.Formatter
logging.Formatter = LogFormatter
try:
# python 2.7
from logging.config import dictConfig
except:
# python 2.6
from .utils.dictconfig import dictConfig
the_logging_config = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {'console': {'format': '[%(relativeCreated)6.0fs] '
'%(levelname)7s %(name)s: '
'%(message)s'},
'file': {'format': '[%(asctime)-15s] '
'%(levelname)7s %(name)s: '
'%(message)s '
'@%(filename)s:%(lineno)d'}},
'handlers': {'console': {'class': 'logging.StreamHandler',
'formatter': 'console',
'level': 'INFO'},
'file': {'class': 'logging.FileHandler',
'filename': 'uptune.opentuner.log',
'formatter': 'file',
'level': 'WARNING'}},
'loggers': {'': {'handlers': ['console', 'file'],
'level': 'INFO',
'propagate': True}}}
def init_logging():
dictConfig(the_logging_config)
global init_logging
init_logging = lambda: None
class TuningRunMain(object):
def __init__(self,
measurement_interface,
args,
search_driver=SearchDriver,
measurement_driver=MeasurementDriver):
init_logging()
manipulator = measurement_interface.manipulator()
if args.print_search_space_size:
print("10^{%.2f}" % math.log(manipulator.search_space_size(), 10))
sys.exit(0)
# show internal parameter representation
if args.print_params:
cfg = manipulator.seed_config()
d = manipulator.parameters_dict(cfg)
params_dict ={}
for k in d:
cls = d[k].__class__.__name__
p = (k, d[k].search_space_size())
if cls in params_dict:
params_dict[cls].append(p)
else:
params_dict[cls] = [p]
for k in params_dict:
print(k, params_dict[k])
print()
sys.exit(0)
input_manager = measurement_interface.input_manager()
objective = measurement_interface.objective()
if not args.database:
#args.database = 'sqlite://' #in memory
if not os.path.isdir('uptune.opentuner.db'):
os.mkdir('uptune.opentuner.db')
args.database = 'sqlite:///' + os.path.join('uptune.opentuner.db',
socket.gethostname() + '.db')
if '://' not in args.database:
args.database = 'sqlite:///' + args.database
if not args.label:
args.label = 'unnamed'
#self.fake_commit = ('sqlite' in args.database)
self.fake_commit = True
self.args = args
self.engine, self.Session = resultsdb.connect(args.database)
self.session = self.Session()
self.tuning_run = None
self.search_driver_cls = search_driver
self.measurement_driver_cls = measurement_driver
self.measurement_interface = measurement_interface
self.input_manager = input_manager
self.manipulator = manipulator
self.objective = objective
self.objective_copy = copy.copy(objective)
self.last_commit_time = time.time()
def init(self):
if self.tuning_run is None:
program_version = (self.measurement_interface
.db_program_version(self.session))
self.session.flush()
self.measurement_interface.prefix_hook(self.session)
self.tuning_run = (
resultsdb.models.TuningRun(
uuid=uuid.uuid4().hex,
name=self.args.label,
args=self.args,
start_date=datetime.now(),
program_version=program_version,
objective=self.objective_copy,
))
self.session.add(self.tuning_run)
driver_kwargs = {
'args': self.args,
'input_manager': self.input_manager,
'manipulator': self.manipulator,
'measurement_interface': self.measurement_interface,
'objective': self.objective,
'session': self.session,
'tuning_run_main': self,
'tuning_run': self.tuning_run,
'extra_seeds': self.measurement_interface.seed_configurations(),
'extra_criteria': self.measurement_interface.extra_convergence_criteria
}
self.search_driver = self.search_driver_cls(**driver_kwargs)
self.measurement_driver = self.measurement_driver_cls(**driver_kwargs)
self.measurement_interface.set_driver(self.measurement_driver)
self.input_manager.set_driver(self.measurement_driver)
self.tuning_run.machine_class = self.measurement_driver.get_machine_class()
self.tuning_run.input_class = self.input_manager.get_input_class()
def commit(self, force=False):
if (force or not self.fake_commit or
time.time() - self.last_commit_time > 30):
self.session.commit()
self.last_commit_time = time.time()
else:
self.session.flush()
def main(self):
self.init()
try:
self.tuning_run.state = 'RUNNING'
self.commit(force=True)
self.search_driver.main()
if self.search_driver.best_result:
self.measurement_interface.save_final_config(
self.search_driver.best_result.configuration)
self.tuning_run.final_config = self.search_driver.best_result.configuration
self.tuning_run.state = 'COMPLETE'
except:
self.tuning_run.state = 'ABORTED'
raise
finally:
self.tuning_run.end_date = datetime.now()
self.commit(force=True)
self.session.close()
def results_wait(self, generation):
"""called by search_driver to wait for results"""
#single process version:
self.measurement_interface.pre_process()
self.measurement_driver.process_all()
self.measurement_interface.post_process()
def main(interface, args, *pargs, **kwargs):
if inspect.isclass(interface):
interface = interface(args=args, *pargs, **kwargs)
return TuningRunMain(interface, args).main()
| 33.409692 | 102 | 0.637922 |
798f14f983ba4a3a2579b84968aea5d23db0ffaf | 47,378 | py | Python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2016_03_30/operations/_virtual_machine_scale_set_vms_operations.py | LianwMS/azure-sdk-for-python | 612d7bca9de86ee1bd1fa59291d7bf897ba9213f | [
"MIT"
] | 2 | 2019-05-17T21:24:53.000Z | 2020-02-12T11:13:42.000Z | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2016_03_30/operations/_virtual_machine_scale_set_vms_operations.py | LianwMS/azure-sdk-for-python | 612d7bca9de86ee1bd1fa59291d7bf897ba9213f | [
"MIT"
] | 15 | 2019-07-12T18:18:04.000Z | 2019-07-25T20:55:51.000Z | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2016_03_30/operations/_virtual_machine_scale_set_vms_operations.py | LianwMS/azure-sdk-for-python | 612d7bca9de86ee1bd1fa59291d7bf897ba9213f | [
"MIT"
] | 2 | 2020-05-21T22:51:22.000Z | 2020-05-26T20:53:01.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VirtualMachineScaleSetVMsOperations(object):
"""VirtualMachineScaleSetVMsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2016_03_30.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _reimage_initial(
self,
resource_group_name, # type: str
vm_scale_set_name, # type: str
instance_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.OperationStatusResponse"
cls = kwargs.pop('cls', None) # type: ClsType["models.OperationStatusResponse"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-03-30"
# Construct URL
url = self._reimage_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'instanceId': self._serialize.url("instance_id", instance_id, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_reimage_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/reimage'} # type: ignore
def begin_reimage(
self,
resource_group_name, # type: str
vm_scale_set_name, # type: str
instance_id, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller
"""Reimages (upgrade the operating system) a specific virtual machine in a VM scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either OperationStatusResponse or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2016_03_30.models.OperationStatusResponse]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.OperationStatusResponse"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._reimage_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reimage.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/reimage'} # type: ignore
def _deallocate_initial(
self,
resource_group_name, # type: str
vm_scale_set_name, # type: str
instance_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.OperationStatusResponse"
cls = kwargs.pop('cls', None) # type: ClsType["models.OperationStatusResponse"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-03-30"
# Construct URL
url = self._deallocate_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'instanceId': self._serialize.url("instance_id", instance_id, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_deallocate_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/deallocate'} # type: ignore
def begin_deallocate(
self,
resource_group_name, # type: str
vm_scale_set_name, # type: str
instance_id, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller
"""Deallocates a specific virtual machine in a VM scale set. Shuts down the virtual machine and
releases the compute resources it uses. You are not billed for the compute resources of this
virtual machine once it is deallocated.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either OperationStatusResponse or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2016_03_30.models.OperationStatusResponse]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.OperationStatusResponse"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._deallocate_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_deallocate.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/deallocate'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
vm_scale_set_name, # type: str
instance_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.OperationStatusResponse"
cls = kwargs.pop('cls', None) # type: ClsType["models.OperationStatusResponse"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-03-30"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'instanceId': self._serialize.url("instance_id", instance_id, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
vm_scale_set_name, # type: str
instance_id, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller
"""Deletes a virtual machine from a VM scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either OperationStatusResponse or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2016_03_30.models.OperationStatusResponse]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.OperationStatusResponse"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}'} # type: ignore
def get(
self,
resource_group_name, # type: str
vm_scale_set_name, # type: str
instance_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.VirtualMachineScaleSetVM"
"""Gets a virtual machine from a VM scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualMachineScaleSetVM, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2016_03_30.models.VirtualMachineScaleSetVM
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualMachineScaleSetVM"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-03-30"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'instanceId': self._serialize.url("instance_id", instance_id, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualMachineScaleSetVM', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}'} # type: ignore
def get_instance_view(
self,
resource_group_name, # type: str
vm_scale_set_name, # type: str
instance_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.VirtualMachineScaleSetVMInstanceView"
"""Gets the status of a virtual machine from a VM scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualMachineScaleSetVMInstanceView, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2016_03_30.models.VirtualMachineScaleSetVMInstanceView
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualMachineScaleSetVMInstanceView"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-03-30"
# Construct URL
url = self.get_instance_view.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'instanceId': self._serialize.url("instance_id", instance_id, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualMachineScaleSetVMInstanceView', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_instance_view.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/instanceView'} # type: ignore
def list(
self,
resource_group_name, # type: str
virtual_machine_scale_set_name, # type: str
filter=None, # type: Optional[str]
select=None, # type: Optional[str]
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.VirtualMachineScaleSetVMListResult"]
"""Gets a list of all virtual machines in a VM scale sets.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the VM scale set.
:type virtual_machine_scale_set_name: str
:param filter: The filter to apply to the operation.
:type filter: str
:param select: The list parameters.
:type select: str
:param expand: The expand expression to apply to the operation.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualMachineScaleSetVMListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2016_03_30.models.VirtualMachineScaleSetVMListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualMachineScaleSetVMListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-03-30"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualMachineScaleSetVMListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines'} # type: ignore
def _power_off_initial(
self,
resource_group_name, # type: str
vm_scale_set_name, # type: str
instance_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.OperationStatusResponse"
cls = kwargs.pop('cls', None) # type: ClsType["models.OperationStatusResponse"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-03-30"
# Construct URL
url = self._power_off_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'instanceId': self._serialize.url("instance_id", instance_id, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_power_off_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/poweroff'} # type: ignore
def begin_power_off(
self,
resource_group_name, # type: str
vm_scale_set_name, # type: str
instance_id, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller
"""Power off (stop) a virtual machine in a VM scale set. Note that resources are still attached
and you are getting charged for the resources. Instead, use deallocate to release resources and
avoid charges.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either OperationStatusResponse or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2016_03_30.models.OperationStatusResponse]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.OperationStatusResponse"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._power_off_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_power_off.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/poweroff'} # type: ignore
def _restart_initial(
self,
resource_group_name, # type: str
vm_scale_set_name, # type: str
instance_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.OperationStatusResponse"
cls = kwargs.pop('cls', None) # type: ClsType["models.OperationStatusResponse"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-03-30"
# Construct URL
url = self._restart_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'instanceId': self._serialize.url("instance_id", instance_id, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_restart_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/restart'} # type: ignore
def begin_restart(
self,
resource_group_name, # type: str
vm_scale_set_name, # type: str
instance_id, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller
"""Restarts a virtual machine in a VM scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either OperationStatusResponse or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2016_03_30.models.OperationStatusResponse]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.OperationStatusResponse"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._restart_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_restart.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/restart'} # type: ignore
def _start_initial(
self,
resource_group_name, # type: str
vm_scale_set_name, # type: str
instance_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.OperationStatusResponse"
cls = kwargs.pop('cls', None) # type: ClsType["models.OperationStatusResponse"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-03-30"
# Construct URL
url = self._start_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmScaleSetName': self._serialize.url("vm_scale_set_name", vm_scale_set_name, 'str'),
'instanceId': self._serialize.url("instance_id", instance_id, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_start_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/start'} # type: ignore
def begin_start(
self,
resource_group_name, # type: str
vm_scale_set_name, # type: str
instance_id, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller
"""Starts a virtual machine in a VM scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either OperationStatusResponse or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2016_03_30.models.OperationStatusResponse]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.OperationStatusResponse"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._start_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/start'} # type: ignore
| 49.455115 | 237 | 0.666048 |
c94dabbdff70fb5f5f8c6bd413793010ba08f0ba | 417 | py | Python | webappdjango/wsgi.py | atanbhardwaj/Session_Python_Django | 249bce0e15b45aa9f6b02a5f7722dbafeabd5053 | [
"MIT"
] | null | null | null | webappdjango/wsgi.py | atanbhardwaj/Session_Python_Django | 249bce0e15b45aa9f6b02a5f7722dbafeabd5053 | [
"MIT"
] | null | null | null | webappdjango/wsgi.py | atanbhardwaj/Session_Python_Django | 249bce0e15b45aa9f6b02a5f7722dbafeabd5053 | [
"MIT"
] | null | null | null | """
WSGI config for webappdjango project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'webappdjango.settings')
application = get_wsgi_application()
| 24.529412 | 79 | 0.760192 |
edf6f5be297b9907de813be43688125ab6edc968 | 3,250 | py | Python | mcrouter/test/McrouterTestCase.py | mbrickn/mcrouter | 9ac4d710723d82cec310f6eaa82eba005858513c | [
"MIT"
] | null | null | null | mcrouter/test/McrouterTestCase.py | mbrickn/mcrouter | 9ac4d710723d82cec310f6eaa82eba005858513c | [
"MIT"
] | null | null | null | mcrouter/test/McrouterTestCase.py | mbrickn/mcrouter | 9ac4d710723d82cec310f6eaa82eba005858513c | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import time
from mcrouter.test.MCProcess import Mcrouter, Memcached, MockMemcached
class McrouterTestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(McrouterTestCase, self).__init__(*args, **kwargs)
self.use_mock_mc = False
def ensureClassVariables(self):
if 'open_servers' not in self.__dict__:
self.open_servers = []
if 'open_ports' not in self.__dict__:
self.open_ports = []
def add_server(self, server, logical_port=None):
self.ensureClassVariables()
server.ensure_connected()
self.open_servers.append(server)
self.open_ports.append(server.getport())
if logical_port:
if 'port_map' not in self.__dict__:
self.port_map = {}
if logical_port in self.port_map:
raise Exception("logical_port %d was already used"
% logical_port)
self.port_map[logical_port] = server.getport()
return server
def add_mcrouter(self, config, route=None, extra_args=None,
replace_map=None, bg_mcrouter=False, replace_ports=True):
self.ensureClassVariables()
substitute_ports = None
if replace_ports:
substitute_ports = (self.open_ports
if 'port_map' not in self.__dict__
else self.port_map)
mcrouter = Mcrouter(config,
substitute_config_ports=substitute_ports,
default_route=route,
extra_args=extra_args,
replace_map=replace_map)
mcrouter.ensure_connected()
if bg_mcrouter:
self.open_ports.append(mcrouter.getport())
if 'open_mcrouters' not in self.__dict__:
self.open_mcrouters = []
self.open_mcrouters.append(mcrouter)
return mcrouter
def make_memcached(self):
return MockMemcached() if self.use_mock_mc else Memcached()
def get_open_ports(self):
self.ensureClassVariables()
return self.open_ports
def tearDown(self):
# Stop mcrouters first to close connections to servers
# (some mock severs might be blocked on recv() calls)
if 'open_mcrouters' in self.__dict__:
for mcr in self.open_mcrouters:
mcr.terminate()
if 'open_servers' in self.__dict__:
for server in self.open_servers:
server.terminate()
def eventually_get(self, key, expVal, timeout=5):
start_time = time.time()
interval = 0.5
while (True):
if (self.mc.get(key) == expVal):
return True
time.sleep(interval)
now = time.time()
if (now - start_time > timeout):
return False
| 34.210526 | 78 | 0.608923 |
e9afc55e1e79e5a78803d4efbe7fe02574d99b6c | 21,207 | py | Python | mbed_flasher/main.py | bridadan/mbed-flasher | 39d27a41926b7a3c8e8c29690cae9bf48583eb1d | [
"Apache-2.0"
] | null | null | null | mbed_flasher/main.py | bridadan/mbed-flasher | 39d27a41926b7a3c8e8c29690cae9bf48583eb1d | [
"Apache-2.0"
] | null | null | null | mbed_flasher/main.py | bridadan/mbed-flasher | 39d27a41926b7a3c8e8c29690cae9bf48583eb1d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
Copyright 2016 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import sys
import argparse
import logging
import logging.handlers
import os
from os.path import isdir, join
import json
import time
from mbed_flasher.common import Common, FlashError, EraseError, ResetError, GeneralFatalError,\
check_file, check_file_extension, check_file_exists
from mbed_flasher.flash import Flash
from mbed_flasher.erase import Erase
from mbed_flasher.reset import Reset
from mbed_flasher.return_codes import EXIT_CODE_SUCCESS
from mbed_flasher.return_codes import EXIT_CODE_UNHANDLED_EXCEPTION
from mbed_flasher.return_codes import EXIT_CODE_NOT_SUPPORTED_PLATFORM
from mbed_flasher.return_codes import EXIT_CODE_TARGET_ID_MISSING
from mbed_flasher.return_codes import EXIT_CODE_DEVICES_MISSING
from mbed_flasher.return_codes import EXIT_CODE_COULD_NOT_MAP_DEVICE
from mbed_flasher.return_codes import EXIT_CODE_PLATFORM_REQUIRED
LOGS_TTL = 172800 # 2 days, when log file is older it will be deleted
def get_subparser(subparsers, name, func, **kwargs):
"""
Create a subcmd parser for command "name".
Arguments
subparsers The subparsers object from add_subparsers method
name Name of the command this subparser is for
kwargs Keyword arguments are passed to the subparser.add_parser call
Returns
subparser object
"""
tmp_parser = subparsers.add_parser(name, **kwargs)
tmp_parser.set_defaults(func=func)
return tmp_parser
def get_resource_subparser(subparsers, name, func, **kwargs):
"""
Create a resource specific subcmd parser for command "name".
This adds necessary arguments for specifying resource
etc that are common for all resource command parsers.
Arguments
subparsers The subparsers object from add_subparsers method
name Name of the command this subparser is for
kwargs Keyword arguments are passed to the subparser.add_parser call
Returns
subparser object
"""
tmp_parser = get_subparser(subparsers, name, func=func, **kwargs)
return tmp_parser
class FlasherCLI(object):
"""
FlasherCLI module
"""
def __init__(self, args=None):
self.logger = logging.getLogger('mbed-flasher')
self.logger.handlers = []
self.logs_folder = join(os.getcwd(), 'logs')
if not isdir(self.logs_folder):
os.mkdir(self.logs_folder)
log_file = 'logs/%s_mbed-flasher.txt' % time.strftime("%Y%m%d-%H%M%S")
self.log_file_handler = logging.handlers.RotatingFileHandler(log_file)
self.log_file_handler.setFormatter(
logging.Formatter(
'%(asctime)s [%(levelname)s]'
'(%(name)s:%(funcName)s:%(lineno)d):%(thread)d: %(message)s'))
self.log_file_handler.setLevel(logging.DEBUG)
self.logger.addHandler(self.log_file_handler)
# also log to the console at a level determined by the --verbose flag
self.console_handler = logging.StreamHandler() # sys.stderr
# set later by set_log_level_from_verbose() in interactive sessions
self.console_handler.setLevel(logging.CRITICAL)
self.console_handler.setFormatter(
logging.Formatter('[%(levelname)s](%(name)s): %(message)s'))
self.logger.addHandler(self.console_handler)
self.logger.info('Writing logs to file %s', log_file)
self.logger.setLevel(logging.DEBUG)
if args is None:
args = sys.argv[1:]
self.args = self.argparser_setup(args)
self.set_log_level_from_verbose()
# always write everything to the rotating log files
if not os.path.exists('logs'):
os.mkdir('logs')
files_to_be_removed = []
old_logs = time.time()-LOGS_TTL
for root, _, files in os.walk('logs/'):
for name in files:
if str(name).find('_mbed-flasher.txt') != -1:
if old_logs > time.mktime(
time.strptime(str(name).split('_')[0], "%Y%m%d-%H%M%S")):
files_to_be_removed.append(str(os.path.join(root, name)))
elif str(name).find('mbed-flasher.log') != -1:
files_to_be_removed.append(str(os.path.join(root, name)))
if files_to_be_removed:
for filename in files_to_be_removed:
try:
os.remove(filename)
except OSError:
self.logger.exception("Failed to remove log file: %s", filename)
def execute(self):
"""
:return: 0 or args.func()
"""
if self.args.func:
return self.args.func(self.args)
self.parser.print_usage()
return EXIT_CODE_SUCCESS
def argparser_setup(self, sysargs):
"""! Configure CLI (Command Line options) options
@return Returns ArgumentParser's tuple of (options, arguments)
@details Add new command line options
"""
parser = argparse.ArgumentParser('mbedflash',
description="For specific command help, "
"run: mbedflash <command> --help")
parser.add_argument('-v', '--verbose',
dest="verbose",
action="count",
help="Verbose level... repeat up to three times.")
parser.add_argument('-s', '--silent',
dest="silent",
default=False,
action="store_true",
help="Silent - only errors will be printed.")
subparsers = parser.add_subparsers(title='command',
dest='command',
help='command help',
metavar='<command>')
subparsers.required = True
get_subparser(subparsers,
'list',
func=self.subcmd_list_platforms,
help='Prints a list of supported platforms.')
get_subparser(subparsers,
'flashers',
func=self.subcmd_list_flashers,
help='Prints a list of supported flashers.')
get_subparser(subparsers,
'version',
func=self.subcmd_version_handler,
help='Display version information')
# Initialize flash command
parser_flash = get_resource_subparser(subparsers,
'flash',
func=self.subcmd_flash_handler,
help='Flash given resource')
parser_flash.add_argument('-i', '--input',
help='Binary input to be flashed.',
default=None, metavar='INPUT')
parser_flash.add_argument('--tid', '--target_id',
help='Target to be flashed, '
'ALL will flash all connected devices '
'with given platform-name, '
'also multiple targets can be given. '
'Short target_id matches boards by prefix',
default=None, metavar='TARGET_ID', action='append')
parser_flash.add_argument('--target_filename',
help='Custom target filename',
default=None, metavar='TARGET_FILENAME')
parser_flash.add_argument('-t', '--platform_name',
help='Platform of the target device(s)',
default=None)
parser_flash.add_argument('--no-reset',
help='Do not reset device before or after flashing',
default=None, dest='no_reset', action='store_true')
parser_flash.add_argument('method', help='<simple|pyocd|edbg>, used for flashing',
metavar='method',
choices=['simple', 'pyocd', 'edbg'],
nargs='?')
# Initialize reset command
parser_reset = get_resource_subparser(subparsers, 'reset',
func=self.subcmd_reset_handler,
help='Reset given resource')
parser_reset.add_argument('--tid', '--target_id',
help='Target to be reset or ALL, '
'also multiple targets can be given.'
'Does not continue flashing next device in case of failures.'
'Short target_id matches boards by prefix',
default=None, metavar='TARGET_ID', action='append')
parser_reset.add_argument('method',
help='<simple|pyocd|edbg>, used for reset',
metavar='method',
choices=['simple', 'pyocd', 'edbg'],
nargs='?')
# Initialize erase command
parser_erase = get_resource_subparser(subparsers, 'erase',
func=self.subcmd_erase_handler,
help='Erase given resource')
parser_erase.add_argument('--tid', '--target_id',
help='Target to be erased or ALL, '
'also multiple targets can be given. '
'Short target_id matches boards by prefix',
default=None, metavar='TARGET_ID', action='append')
parser_erase.add_argument('--no-reset',
help='Do not reset device after erase',
default=None, dest='no_reset', action='store_true')
parser_erase.add_argument('method',
help='<simple|pyocd|edbg>, used for erase',
metavar='method',
choices=['simple', 'pyocd', 'edbg'],
nargs='?')
#parser.add_argument('-m', '--mapping',
# dest='device_mapping_table', help='Device mapping table.')
args = parser.parse_args(args=sysargs)
if 'method' in args:
if args.method is None:
args.method = 'simple'
self.parser = parser
return args
def set_log_level_from_verbose(self):
""" set logging level, silent, or some of verbose level
:param args: command line arguments
"""
if self.args.silent:
self.console_handler.setLevel('NOTSET')
elif not self.args.verbose:
self.console_handler.setLevel('ERROR')
elif self.args.verbose == 1:
self.console_handler.setLevel('WARNING')
elif self.args.verbose == 2:
self.console_handler.setLevel('INFO')
elif self.args.verbose >= 3:
self.console_handler.setLevel('DEBUG')
else:
self.logger.critical("UNEXPLAINED NEGATIVE COUNT!")
# the cli decorator doesn't need self as a arg,
# operation wrapper is used
# pylint: disable=no-self-argument, not-callable
def cli_decorator(operation):
"""
cli decorator
"""
def operation_wrapper(self, args):
"""
wrapper
"""
retcode = operation(self, args)
return retcode
return operation_wrapper
# pylint: disable=too-many-return-statements
@cli_decorator
def subcmd_flash_handler(self, args):
"""
flash command handler
"""
if not args.tid:
msg = "Target_id is missing"
raise FlashError(message=msg,
return_code=EXIT_CODE_TARGET_ID_MISSING)
check_file(self.logger, args.target_filename or args.input)
check_file(self.logger, args.input)
check_file_exists(self.logger, args.input)
check_file_extension(self.logger, args.target_filename or args.input)
flasher = Flash()
available = Common(self.logger).get_available_device_mapping(
flasher.get_all_flashers(), args.tid)
available_target_ids = []
retcode = EXIT_CODE_SUCCESS
if args.platform_name:
if args.platform_name not in flasher.get_supported_targets():
self.logger.error("Not supported platform: %s", args.platform_name)
self.logger.error("Supported platforms: %s", flasher.get_supported_targets())
raise FlashError(message="Platform {} not supported".format(args.platform_name),
return_code=EXIT_CODE_NOT_SUPPORTED_PLATFORM)
if 'all' in args.tid:
retcode = flasher.flash(build=args.input, target_id='all',
platform_name=args.platform_name,
target_filename=args.target_filename,
method=args.method, no_reset=args.no_reset)
if len(available) <= 0:
msg = "Could not find any connected device"
raise FlashError(message=msg, return_code=EXIT_CODE_DEVICES_MISSING)
available_platforms, target_ids_to_flash = \
self.prepare_platforms_and_targets(available, args.tid, available_target_ids)
if not target_ids_to_flash:
self.logger.error("Could not find given target_id from attached devices")
self.logger.error("Available target_ids: %s", available_target_ids)
raise FlashError(message="Could not map device",
return_code=EXIT_CODE_COULD_NOT_MAP_DEVICE)
elif len(available_platforms) > 1:
if not args.platform_name:
self.logger.error("More than one platform detected for given target_id")
self.logger.error("Please specify the platform with -t <PLATFORM_NAME>")
self.logger.error("Found platforms: %s", available_platforms)
raise FlashError(message="More than one platform detected for given target id",
return_code=EXIT_CODE_PLATFORM_REQUIRED)
else:
retcode = flasher.flash(build=args.input,
target_id=target_ids_to_flash,
target_filename=args.target_filename,
platform_name=available_platforms[0],
method=args.method,
no_reset=args.no_reset)
return retcode
@staticmethod
def prepare_platforms_and_targets(available, tid, available_target_ids):
"""
prepare available platforms and target ids to flash
"""
available_platforms = []
target_ids_to_flash = []
for device in available:
available_target_ids.append(device['target_id'])
if isinstance(tid, list):
for item in tid:
if device['target_id'] == item \
or device['target_id'].startswith(item):
if device['target_id'] not in target_ids_to_flash:
target_ids_to_flash.append(device['target_id'])
if 'platform_name' in device \
and device['platform_name'] not in available_platforms:
available_platforms.append(device['platform_name'])
else:
if device['target_id'] == tid \
or device['target_id'].startswith(tid):
if device['target_id'] not in target_ids_to_flash:
target_ids_to_flash.append(device['target_id'])
if 'platform_name' in device and \
device['platform_name'] not in available_platforms:
available_platforms.append(device['platform_name'])
return available_platforms, target_ids_to_flash
def subcmd_reset_handler(self, args):
"""
reset command handler
"""
resetter = Reset()
if not args.tid:
msg = "Target_id is missing"
raise ResetError(message=msg, return_code=EXIT_CODE_TARGET_ID_MISSING)
ids = self.parse_id_to_devices(args.tid)
return resetter.reset(target_id=ids, method=args.method)
def subcmd_erase_handler(self, args):
"""
erase command handler
"""
eraser = Erase()
if not args.tid:
msg = "Target_id is missing"
raise EraseError(message=msg, return_code=EXIT_CODE_TARGET_ID_MISSING)
ids = self.parse_id_to_devices(args.tid)
return eraser.erase(target_id=ids, no_reset=args.no_reset, method=args.method)
# args not used, but the logic to call sub cmd handler is passing two args
# pylint: disable=unused-argument
def subcmd_version_handler(self, args):
"""
version command handler
"""
import pkg_resources # part of setuptools
versions = pkg_resources.require("mbed-flasher")
if self.args.verbose:
for version in versions:
print(version)
else:
print(versions[0].version)
return EXIT_CODE_SUCCESS
# pylint: disable=no-self-use
def subcmd_list_platforms(self, args):
"""
list platform command
"""
flasher = Flash()
print(json.dumps(flasher.get_supported_targets()))
return EXIT_CODE_SUCCESS
def subcmd_list_flashers(self, args):
"""
list flasher command handler
"""
flasher = Flash()
print(json.dumps(flasher.get_supported_flashers()))
return EXIT_CODE_SUCCESS
def parse_id_to_devices(self, tid):
"""
:param tid: target id
"""
flasher = Flash()
available = Common(self.logger).get_available_device_mapping(
flasher.get_all_flashers(), tid)
target_ids = []
available_target_ids = []
if not available:
msg = "Could not find any connected device"
raise GeneralFatalError(message=msg, return_code=EXIT_CODE_DEVICES_MISSING)
if 'all' in tid:
for device in available:
target_ids.append(device['target_id'])
else:
for item in tid:
for device in available:
available_target_ids.append(device['target_id'])
if device['target_id'] == item or \
device['target_id'].startswith(item):
if device['target_id'] not in target_ids:
target_ids.append(device['target_id'])
if not target_ids:
self.logger.error("Could not find given target_id from attached devices")
self.logger.error("Available target_ids: %s", available_target_ids)
raise GeneralFatalError(message="Could not map device",
return_code=EXIT_CODE_COULD_NOT_MAP_DEVICE)
if len(target_ids) == 1:
return target_ids[0]
return target_ids
def mbedflash_main():
"""
Function used to drive CLI (command line interface) application.
Function exits back to command line with ERRORLEVEL
Returns:
Function exits with success-code
"""
cli = FlasherCLI()
# Catch all exceptions to be able to set specific error format.
# pylint: disable=broad-except
try:
retcode = cli.execute()
if retcode:
cli.logger.error("Failed with return code: %s", str(retcode))
exit(retcode)
except (FlashError, EraseError, ResetError, GeneralFatalError) as error:
cli.logger.error("Failed: %s", error.message)
exit(error.return_code)
except Exception as error:
cli.logger.error("Failed with unknown reason: %s", str(error))
exit(EXIT_CODE_UNHANDLED_EXCEPTION)
if __name__ == '__main__':
mbedflash_main()
| 41.994059 | 100 | 0.570613 |
af23df9c6c86292150e4137280ce2c667618ecf8 | 7,633 | py | Python | Inference/InferConsensusGrooming.py | KumarLabJax/MouseGrooming | 811b0382592c5a4010f7bc90468105c4a1ba452f | [
"MIT"
] | 4 | 2021-04-07T11:15:28.000Z | 2021-11-15T16:45:59.000Z | Inference/InferConsensusGrooming.py | KumarLabJax/MouseGrooming | 811b0382592c5a4010f7bc90468105c4a1ba452f | [
"MIT"
] | null | null | null | Inference/InferConsensusGrooming.py | KumarLabJax/MouseGrooming | 811b0382592c5a4010f7bc90468105c4a1ba452f | [
"MIT"
] | null | null | null | import keras
from keras.models import load_model, Model
from keras.layers import Input, concatenate
from keras.layers.core import Reshape
import imageio
import os
import numpy as np
from scipy.misc import imresize
import sys, getopt, re, argparse
import tensorflow as tf
import matplotlib.cm as cm
from time import time
from CompressNPY import read_data
import cv2
# Keras' definition converted to numpy...
def softmax(x, axis=-1):
ndim = np.ndim(x)
if ndim >= 2:
e = np.exp(x - np.max(x, axis=axis, keepdims=True))
s = np.sum(e, axis=axis, keepdims=True)
return e / s
else:
raise ValueError('Cannot apply softmax to a tensor that is 1D')
def load_multigpu_model(model_to_load):
mgpu_net = load_model(model_to_load, custom_objects={'tf':tf}, compile=False)
return mgpu_net.layers[-2]
# Loads the models as-is
def consensus_models(list_of_models, model_load_function=load_model):
all_models = [model_load_function(model_name) for model_name in list_of_models]
new_model_input = Input(shape=(16, 112, 112, 1))
all_outputs = [indv_model(new_model_input) for indv_model in all_models]
if len(all_outputs)==1:
new_model = Model(input=new_model_input, output=all_outputs[0])
else:
new_model = Model(input=new_model_input, output=Reshape((len(list_of_models),2))(concatenate(all_outputs, axis=-1)))
new_model.compile('adam','categorical_crossentropy')
return new_model
# Actually removes the last layer in the network (softmax)...
def consensus_models_softmax(list_of_models, model_load_function=load_model):
all_models = [model_load_function(model_name) for model_name in list_of_models]
for model in all_models:
model.pop()
new_model_input = Input(shape=(16, 112, 112, 1))
all_outputs = [indv_model(new_model_input) for indv_model in all_models]
if len(all_outputs)==1:
new_model = Model(input=new_model_input, output=all_outputs[0])
else:
new_model = Model(input=new_model_input, output=Reshape((len(list_of_models),2))(concatenate(all_outputs, axis=-1)))
new_model.compile('adam','categorical_crossentropy')
return new_model
def flip_input_batch(batch_input_single):
assert len(np.shape(batch_input_single))==4 or (len(np.shape(batch_input_single))==5 and np.shape(batch_input_single)[0]==1)
if len(np.shape(batch_input_single))==5:
transpose_shape = (0,1,3,2,4)
else:
transpose_shape = (0,2,1,3)
batch_input = np.reshape([batch_input_single,
np.flipud(batch_input_single),
np.fliplr(batch_input_single),
np.fliplr(np.flipud(batch_input_single)),
np.transpose(batch_input_single,transpose_shape),
np.transpose(np.flipud(batch_input_single),transpose_shape),
np.transpose(np.fliplr(batch_input_single),transpose_shape),
np.transpose(np.fliplr(np.flipud(batch_input_single)),transpose_shape)], [8, np.shape(batch_input_single)[-4], np.shape(batch_input_single)[-3], np.shape(batch_input_single)[-2], np.shape(batch_input_single)[-1]])
return batch_input
# Function to process all the data based on an image iterator
def process_video_frames(net, im_iter, video_pattern):
file_raw = open(video_pattern + '_raw.npy', 'ab')
file_consensus = open(video_pattern + '_meancons.npy', 'ab')
input_size = 112
time_depth = 16
frames = [np.zeros([input_size, input_size, 1]) for x in range(time_depth)]
framenum = 0
while True:
try:
start_time = time()
frames[0:time_depth-1] = np.copy(frames[1:time_depth])
frame = np.uint8(next(im_iter))
frame = imresize(frame, (input_size, input_size, 3))
frame = frame[:,:,0]
frame = np.reshape(frame, [input_size, input_size, 1])
frames[time_depth-1] = frame
batch_input_single = np.reshape(frames,[time_depth, input_size, input_size, 1])
batch_input = flip_input_batch(batch_input_single)
# Time logging...
if framenum % 1000 == 0:
print('Batch ' + str(framenum))
print('Batch Assembled in: ' + str(time()-start_time))
start_time = time()
# Run the prediction
results_nosoftmax = net.predict(batch_input, batch_size=8)
# Time logging...
if framenum % 1000 == 0:
print('Batch Computed in: ' + str(time()-start_time))
start_time = time()
# Compute the other items from the prediction (softmax, argmax)
results_nosoftmax = np.reshape(results_nosoftmax, [-1, 2])
results = softmax(results_nosoftmax)
predictions = np.argmax(results, 1)
# Consensus predictions
mean_pred = np.mean(results[:,1])
# mean_pred = np.mean(results[:,1]) > 0.5
# vote_pred = np.sum(predictions) > int(np.shape(results)[0]/2) # only majority, not half
# maxpool_pred = np.argmax(np.diag(results_nosoftmax[np.argmax(results_nosoftmax,0)])) == 1
# Just incase we get a better post-processing than mean_pred...
raw_out = np.reshape(results_nosoftmax, -1)
np.save(file_raw, raw_out, allow_pickle=False)
np.save(file_consensus, mean_pred, allow_pickle=False)
# Time logging...
if framenum % 1000 == 0:
print('Batch Saved in: ' + str(time()-start_time))
framenum = framenum + 1
except StopIteration:
break
file_raw.close()
file_consensus.close()
# Wrapper for cropped video processing
def process_cropped_movie(net, video_pattern):
reader = imageio.get_reader(video_pattern+'.avi')
im_iter = reader.iter_data()
process_video_frames(net, im_iter, video_pattern)
reader.close()
# Wrapper for not-cropped video processing
def process_full_movie(net, video_pattern, ellfit_extension):
reader = imageio.get_reader(video_pattern+'.avi')
im_iter = reader.iter_data()
track_data = read_data(video_pattern + ellfit_extension)
frame_iter = crop_frame(im_iter, track_data)
process_video_frames(net, frame_iter, video_pattern)
reader.close()
# Applies a crop based on center location in tracking data
def crop_frame(im_iter, track_data):
track_iter = np.nditer(track_data)
while True:
frame = next(im_iter)
ell_data = np.array([next(track_iter) for x in range(6)])
# Apply the crop
affine_mat = np.float32([[1,0,-ell_data[0]+112/2],[0,1,-ell_data[1]+112/2]])
crop_frame = cv2.warpAffine(frame, affine_mat, (112, 112));
yield crop_frame
def main(argv):
parser = argparse.ArgumentParser(description='Inference 3DConv Models')
parser.add_argument('--mov_name', help='Name of movie to process')
parser.add_argument('--mov_list', help='File containing a list of movies to process')
parser.add_argument('--fullframe_video', help='Video is full-frame (not cropped)', dest='video_cropped', action='store_false', default=True)
parser.add_argument('--ellfit_extension', help='Ellipse-fit data extension', default='_ellfit.npy')
parser.add_argument('--network', '--networks', help='Networks to use during inference', default='3Dconv_Keras.h5', nargs='+')
args = parser.parse_args()
arg_dict = args.__dict__
# Actually load the models for full consensus
net = consensus_models_softmax(args.network, load_multigpu_model)
if 'mov_name' in arg_dict.keys() and arg_dict['mov_name'] is not None:
video_pattern = os.path.splitext(args.mov_name)[0]
if args.video_cropped:
process_cropped_movie(net, video_pattern)
else:
process_full_movie(net, video_pattern, args.ellfit_extension)
elif 'mov_list' in arg_dict.keys() and arg_dict['mov_list'] is not None:
f = open(args.mov_list, 'r')
lines = f.read().split('\n')
lines = lines[0:-1] # Remove the last split '' string
f.close()
list_of_vids = [os.path.splitext(line)[0] for line in lines]
for video_pattern in list_of_vids:
if args.video_cropped:
process_cropped_movie(net, video_pattern)
else:
process_full_movie(net, video_pattern, args.ellfit_extension)
if __name__ == '__main__':
main(sys.argv[1:])
| 39.345361 | 220 | 0.741124 |
77673f06eb2ca856c42db364dbaa556ab5a6a49f | 8,487 | py | Python | Lib/hashlib.py | idobatter/cpython | c7b03e7b57cedccb77e37f65f9bbcb82050c2bb5 | [
"PSF-2.0"
] | 9 | 2015-11-06T02:38:00.000Z | 2021-11-14T05:34:23.000Z | Lib/hashlib.py | idobatter/cpython | c7b03e7b57cedccb77e37f65f9bbcb82050c2bb5 | [
"PSF-2.0"
] | null | null | null | Lib/hashlib.py | idobatter/cpython | c7b03e7b57cedccb77e37f65f9bbcb82050c2bb5 | [
"PSF-2.0"
] | 2 | 2019-08-03T20:16:15.000Z | 2020-03-20T21:51:40.000Z | #. Copyright (C) 2005-2010 Gregory P. Smith (greg@krypto.org)
# Licensed to PSF under a Contributor Agreement.
#
__doc__ = """hashlib module - A common interface to many hash functions.
new(name, data=b'') - returns a new hash object implementing the
given hash function; initializing the hash
using the given binary data.
Named constructor functions are also available, these are faster
than using new(name):
md5(), sha1(), sha224(), sha256(), sha384(), and sha512()
More algorithms may be available on your platform but the above are guaranteed
to exist. See the algorithms_guaranteed and algorithms_available attributes
to find out what algorithm names can be passed to new().
NOTE: If you want the adler32 or crc32 hash functions they are available in
the zlib module.
Choose your hash function wisely. Some have known collision weaknesses.
sha384 and sha512 will be slow on 32 bit platforms.
Hash objects have these methods:
- update(arg): Update the hash object with the bytes in arg. Repeated calls
are equivalent to a single call with the concatenation of all
the arguments.
- digest(): Return the digest of the bytes passed to the update() method
so far.
- hexdigest(): Like digest() except the digest is returned as a unicode
object of double length, containing only hexadecimal digits.
- copy(): Return a copy (clone) of the hash object. This can be used to
efficiently compute the digests of strings that share a common
initial substring.
For example, to obtain the digest of the string 'Nobody inspects the
spammish repetition':
>>> import hashlib
>>> m = hashlib.md5()
>>> m.update(b"Nobody inspects")
>>> m.update(b" the spammish repetition")
>>> m.digest()
b'\\xbbd\\x9c\\x83\\xdd\\x1e\\xa5\\xc9\\xd9\\xde\\xc9\\xa1\\x8d\\xf0\\xff\\xe9'
More condensed:
>>> hashlib.sha224(b"Nobody inspects the spammish repetition").hexdigest()
'a4337bc45a8fc544c03f52dc550cd6e1e87021bc896588bd79e901e2'
"""
# This tuple and __get_builtin_constructor() must be modified if a new
# always available algorithm is added.
__always_supported = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512',
'sha3_224', 'sha3_256', 'sha3_384', 'sha3_512')
algorithms_guaranteed = set(__always_supported)
algorithms_available = set(__always_supported)
__all__ = __always_supported + ('new', 'algorithms_guaranteed',
'algorithms_available', 'pbkdf2_hmac')
__builtin_constructor_cache = {}
def __get_builtin_constructor(name):
cache = __builtin_constructor_cache
constructor = cache.get(name)
if constructor is not None:
return constructor
try:
if name in ('SHA1', 'sha1'):
import _sha1
cache['SHA1'] = cache['sha1'] = _sha1.sha1
elif name in ('MD5', 'md5'):
import _md5
cache['MD5'] = cache['md5'] = _md5.md5
elif name in ('SHA256', 'sha256', 'SHA224', 'sha224'):
import _sha256
cache['SHA224'] = cache['sha224'] = _sha256.sha224
cache['SHA256'] = cache['sha256'] = _sha256.sha256
elif name in ('SHA512', 'sha512', 'SHA384', 'sha384'):
import _sha512
cache['SHA384'] = cache['sha384'] = _sha512.sha384
cache['SHA512'] = cache['sha512'] = _sha512.sha512
elif name in {'sha3_224', 'sha3_256', 'sha3_384', 'sha3_512',
'SHA3_224', 'SHA3_256', 'SHA3_384', 'SHA3_512'}:
import _sha3
cache['SHA3_224'] = cache['sha3_224'] = _sha3.sha3_224
cache['SHA3_256'] = cache['sha3_256'] = _sha3.sha3_256
cache['SHA3_384'] = cache['sha3_384'] = _sha3.sha3_384
cache['SHA3_512'] = cache['sha3_512'] = _sha3.sha3_512
except ImportError:
pass # no extension module, this hash is unsupported.
constructor = cache.get(name)
if constructor is not None:
return constructor
raise ValueError('unsupported hash type ' + name)
def __get_openssl_constructor(name):
try:
f = getattr(_hashlib, 'openssl_' + name)
# Allow the C module to raise ValueError. The function will be
# defined but the hash not actually available thanks to OpenSSL.
f()
# Use the C function directly (very fast)
return f
except (AttributeError, ValueError):
return __get_builtin_constructor(name)
def __py_new(name, data=b''):
"""new(name, data=b'') - Return a new hashing object using the named algorithm;
optionally initialized with data (which must be bytes).
"""
return __get_builtin_constructor(name)(data)
def __hash_new(name, data=b''):
"""new(name, data=b'') - Return a new hashing object using the named algorithm;
optionally initialized with data (which must be bytes).
"""
try:
return _hashlib.new(name, data)
except ValueError:
# If the _hashlib module (OpenSSL) doesn't support the named
# hash, try using our builtin implementations.
# This allows for SHA224/256 and SHA384/512 support even though
# the OpenSSL library prior to 0.9.8 doesn't provide them.
return __get_builtin_constructor(name)(data)
try:
import _hashlib
new = __hash_new
__get_hash = __get_openssl_constructor
algorithms_available = algorithms_available.union(
_hashlib.openssl_md_meth_names)
except ImportError:
new = __py_new
__get_hash = __get_builtin_constructor
try:
# OpenSSL's PKCS5_PBKDF2_HMAC requires OpenSSL 1.0+ with HMAC and SHA
from _hashlib import pbkdf2_hmac
except ImportError:
_trans_5C = bytes((x ^ 0x5C) for x in range(256))
_trans_36 = bytes((x ^ 0x36) for x in range(256))
def pbkdf2_hmac(hash_name, password, salt, iterations, dklen=None):
"""Password based key derivation function 2 (PKCS #5 v2.0)
This Python implementations based on the hmac module about as fast
as OpenSSL's PKCS5_PBKDF2_HMAC for short passwords and much faster
for long passwords.
"""
if not isinstance(hash_name, str):
raise TypeError(hash_name)
if not isinstance(password, (bytes, bytearray)):
password = bytes(memoryview(password))
if not isinstance(salt, (bytes, bytearray)):
salt = bytes(memoryview(salt))
# Fast inline HMAC implementation
inner = new(hash_name)
outer = new(hash_name)
blocksize = getattr(inner, 'block_size', 64)
if len(password) > blocksize:
password = new(hash_name, password).digest()
password = password + b'\x00' * (blocksize - len(password))
inner.update(password.translate(_trans_36))
outer.update(password.translate(_trans_5C))
def prf(msg, inner=inner, outer=outer):
# PBKDF2_HMAC uses the password as key. We can re-use the same
# digest objects and and just update copies to skip initialization.
icpy = inner.copy()
ocpy = outer.copy()
icpy.update(msg)
ocpy.update(icpy.digest())
return ocpy.digest()
if iterations < 1:
raise ValueError(iterations)
if dklen is None:
dklen = outer.digest_size
if dklen < 1:
raise ValueError(dklen)
dkey = b''
loop = 1
from_bytes = int.from_bytes
while len(dkey) < dklen:
prev = prf(salt + loop.to_bytes(4, 'big'))
# endianess doesn't matter here as long to / from use the same
rkey = int.from_bytes(prev, 'big')
for i in range(iterations - 1):
prev = prf(prev)
# rkey = rkey ^ prev
rkey ^= from_bytes(prev, 'big')
loop += 1
dkey += rkey.to_bytes(inner.digest_size, 'big')
return dkey[:dklen]
for __func_name in __always_supported:
# try them all, some may not work due to the OpenSSL
# version not supporting that algorithm.
try:
globals()[__func_name] = __get_hash(__func_name)
except ValueError:
import logging
logging.exception('code for hash %s was not found.', __func_name)
# Cleanup locals()
del __always_supported, __func_name, __get_hash
del __py_new, __hash_new, __get_openssl_constructor
| 37.553097 | 83 | 0.641334 |
b55596c9db5f079bbb61ef67c28e4f4ef90a6974 | 1,432 | py | Python | paper/tests/paper_synthetic1_itr.py | ascillitoe/defragTrees | e2284ad79c2017e0d0813d6fc09aec28637774ca | [
"MIT"
] | null | null | null | paper/tests/paper_synthetic1_itr.py | ascillitoe/defragTrees | e2284ad79c2017e0d0813d6fc09aec28637774ca | [
"MIT"
] | null | null | null | paper/tests/paper_synthetic1_itr.py | ascillitoe/defragTrees | e2284ad79c2017e0d0813d6fc09aec28637774ca | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
@author: Satoshi Hara
"""
import sys
import os
sys.path.append(os.path.abspath('./'))
sys.path.append(os.path.abspath('../'))
import numpy as np
import paper_sub_itr
# setting
prefix = 'synthetic1'
seed = 0
num = 1000
dim = 2
trial = 10
# data
b = 0.9
# data
if not os.path.exists('./result/'):
os.mkdir('./result/')
dirname = './result/result_%s_itr' % (prefix,)
if not os.path.exists(dirname):
os.mkdir(dirname)
for t in range(trial):
# data - train
np.random.seed(seed + t)
Xtr = np.random.rand(num, dim)
ytr = np.zeros(num)
ytr = np.logical_xor(Xtr[:, 0] > 0.5, Xtr[:, 1] > 0.5)
ytr = np.logical_xor(ytr, np.random.rand(num) > b)
# data - test
Xte = np.random.rand(num, dim)
yte = np.zeros(num)
yte = np.logical_xor(Xte[:, 0] > 0.5, Xte[:, 1] > 0.5)
yte = np.logical_xor(yte, np.random.rand(num) > b)
# save
dirname2 = '%s/result_%02d' % (dirname, t)
if not os.path.exists(dirname2):
os.mkdir(dirname2)
trfile = '%s/%s_train_%02d.csv' % (dirname2, prefix, t)
tefile = '%s/%s_test_%02d.csv' % (dirname2, prefix, t)
np.savetxt(trfile, np.c_[Xtr, ytr], delimiter=',')
np.savetxt(tefile, np.c_[Xte, yte], delimiter=',')
# demo_R
Kmax = 10
restart = 20
njobs = 4
treenum = 100
paper_sub_itr.run(prefix, Kmax, restart, trial, treenum=treenum, modeltype='classification', njobs=njobs, rftype='SL')
| 23.866667 | 118 | 0.611732 |
8aab8501b015758dc5312fb9ec7ba145778c8c42 | 7,958 | py | Python | apps/package_CalSim.py | pyRobShrk/calsim_toolkit | ca6d63f6a89757f06b53d646da9310ea77446f13 | [
"MIT"
] | 1 | 2020-01-09T22:18:13.000Z | 2020-01-09T22:18:13.000Z | apps/package_CalSim.py | pyRobShrk/calsim_toolkit | ca6d63f6a89757f06b53d646da9310ea77446f13 | [
"MIT"
] | 15 | 2020-01-07T01:05:47.000Z | 2021-06-16T16:12:21.000Z | apps/package_CalSim.py | pyRobShrk/calsim_toolkit | ca6d63f6a89757f06b53d646da9310ea77446f13 | [
"MIT"
] | 3 | 2020-03-06T18:10:09.000Z | 2021-06-16T16:20:16.000Z | """
Summary
-------
The purpose of this module is to create a zip package for a CalSim study.
Notes
-----
1. How to make a file hidden:
https://stackoverflow.com/questions/43441883/how-can-i-make-a-file-hidden-on-windows
"""
# %% Import libraries.
# Import standard libraries.
import os
import shutil
import glob
import re
import json
import stat
import subprocess as sb
import datetime as dt
import zipfile
import argparse
# Import custom modules.
try:
import custom_modules
from variable_dependencies import remove_comments
from tools.variables import external_apps_config
except(ModuleNotFoundError):
from .variable_dependencies import remove_comments
from ..tools.variables import external_apps_config
# %% Define functions.
def win_zip(dist_name):
wzzip = external_apps_config('wzzip')
if 'WZZIP.EXE' not in wzzip:
msg = 'Unable to find external application "WZZIP.exe".'
raise RuntimeError(msg)
wz_app = [wzzip]
wz_flg = '-a -P -r -Jhrs -whs'.split()
wz_arg = r'{}.zip {}\*.*'.format(dist_name, dist_name).split()
wz_zip = wz_app + wz_flg + wz_arg
stream = sb.run(wz_zip, cwd=os.getcwd(), encoding='utf-8', stdout=sb.PIPE)
shutil.rmtree(dist_name)
return 0
def python_zip(dist_name):
zip_fp = '{}.zip'.format(dist_name)
files = glob.glob(os.path.join(dist_name, '*'))
files = glob.glob(os.path.join(dist_name, '.*'))
files += glob.glob(os.path.join(dist_name, '**', '*'), recursive=True)
files += glob.glob(os.path.join(dist_name, '.git', '**', '*'), recursive=True)
files = list(set(files))
with zipfile.ZipFile(zip_fp, 'w') as f:
for file in files:
f.write(file)
shutil.rmtree(dist_name)
return 0
def obtain_DLLs(study):
# Search CalSim3 *.wresl files for all *.dll external references.
paths = os.path.join(study, '**', '*.wresl')
wresl_files = glob.glob(paths, recursive=True)
WRESL = ''
for wresl_file in wresl_files:
with open(wresl_file) as f:
content = f.read()
code = remove_comments(content)
WRESL += code + '\n'
DLLs = list(re.findall(r'\b\w+\.dll\b', WRESL))
DLLs = list(set(DLLs))
# Add supporting DLL not directly found in WRESL code.
if 'interfacetogw_x64.dll' in DLLs:
DLLs += ['CVGroundwater_x64.dll']
if 'interfacetocamdll_x64.dll' in DLLs:
DLLs += ['CAMDLL_x64.dll']
# Acquire relative paths for all *.dll binaries.
dll_paths = list()
for DLL in DLLs:
dll_paths += glob.glob(os.path.join(study, '**', DLL), recursive=True)
# Return list of DLLs.
return dll_paths
def obtain_IO(study):
"""
Notes
-----
1. Future development to also add groundwater output files to list.
"""
# Initialize regex code.
re_base = r'(?<=_{}\" value=\").+(?=\"/>)'
re_init = re_base.format('INIT')
re_svar = re_base.format('SVAR')
re_dvar = re_base.format('DVAR')
# Search CalSim3 *.launch files for all I/O binary file references.
paths = os.path.join(study, '*.launch')
launch_files = glob.glob(paths)
launch = ''
for launch_file in launch_files:
with open(launch_file) as f:
content = f.read()
launch += content + '\n'
binaries = list(re.findall(re_init, launch))
binaries += list(re.findall(re_svar, launch))
binaries += list(re.findall(re_dvar, launch))
binaries = list(set(binaries))
# Acquire relative paths for all I/O binary files.
binary_paths = list()
for binary in binaries:
b_file = os.path.basename(binary)
b_pth = os.path.join(study, '**', b_file)
binary_paths += glob.glob(b_pth, recursive=True)
# Return list of DLLs.
return binary_paths
def main(study_dir, dist_name='', verbose=True, compress=True):
"""
Summary
-------
Function to package a CalSim study for distribution.
Parameters
----------
study_dir : path
Absolute or relative path to study directory.
dist_name : string, default '', optional
Name of the study *.zip file for distribution. If not provided, a name
is automatically generated.
verbose : boolean, default True, optional
Option to allow messages to print to console.
compress: boolean, default True, optional
Option to compress study package.
Returns
-------
_ : int
The value of 0 is returned to indicate success.
"""
# Switch working directory.
CWD = os.getcwd()
wd, study = os.path.split(os.path.abspath(study_dir))
os.chdir(wd)
if not dist_name:
today = dt.date.today().isoformat()
dist_name = 'USBR_{}_{}'.format(study, today)
# Initialize variables, stash changes, and add version control note.
git = external_apps_config('git')
# Clone current branch.
git_clone = (git + f' clone {study} {dist_name}').split()
if os.path.exists(dist_name):
msg = f'{dist_name} already exists; overwrite denied.'
raise RuntimeError(msg)
stream = sb.run(git_clone, cwd=wd, encoding='utf-8', stdout=sb.PIPE)
# Hide .gitignore.
fp = os.path.join(dist_name, '.gitignore')
if os.path.exists(fp):
p = os.popen('attrib +h ' + fp)
p.close()
else:
print('No .gitignore file found.')
# Remove remote.
git_rm = (git + ' remote rm origin').split()
stream = sb.run(git_rm, cwd=dist_name, encoding='utf-8', stdout=sb.PIPE)
# Acquire list of binaries.
files = obtain_DLLs(study)
files += glob.glob(os.path.join(study, '**', '*.class'), recursive=True)
files += obtain_IO(study)
# Copy binaries to package.
for file in files:
d_path = os.path.join(dist_name, os.path.relpath(file, start=study))
if not os.path.exists(os.path.dirname(d_path)):
os.makedirs(os.path.dirname(d_path))
shutil.copyfile(file, d_path)
# Zip package.
if compress:
try:
_ = win_zip(dist_name)
msg = 'Successfully compressed {} to {}.zip with WinZip.'
print(msg.format(study, dist_name))
except RuntimeError:
_ = python_zip(dist_name)
msg = 'Successfully compressed {} to {}.zip with Python.'
print(msg.format(study, dist_name))
# Return to original working directory.
os.chdir(CWD)
# Return success indicator.
return 0
# %% Execute script.
if __name__ == '__main__':
# Initialize argument parser.
intro = 'Main function to package a CalSim study for distribution.'
parser = argparse.ArgumentParser(description=intro)
# Add positional arguments to parser.
parser.add_argument('study_dir', metavar='study directory', type=str,
nargs='?',
help='Absolute or relative path to study directory.')
# Add optional arguments.
parser.add_argument('-d', '--dist_name', metavar='distribution name',
type=str, nargs='?', default='',
help='''
Name of the study *.zip file for distribution. If
not provided, a name is automatically generated.
''')
parser.add_argument('-s', '--silent', dest='verbose', action='store_false',
default=True,
help='Option to suppress messages to console.')
parser.add_argument('-u', '--uncompressed', dest='compress',
action='store_false', default=True,
help='Option to suppress messages to console.')
# Parse arguments.
args = parser.parse_args()
study_dir = args.study_dir.strip('"')
dist_name = args.dist_name.strip('"')
verbose = args.verbose
compress = args.compress
# Pass arguments to function.
_ = main(study_dir, dist_name=dist_name, verbose=verbose,
compress=compress)
| 34.903509 | 87 | 0.622895 |
7d89b86a8a241e86498b7f86530af632396ac692 | 186 | py | Python | smlb/features/__init__.py | CitrineInformatics/smlb | 28a3689bd36aa8d51031b4faf7e2331bbd8148a9 | [
"Apache-2.0"
] | 6 | 2020-07-27T21:08:55.000Z | 2021-05-04T07:00:29.000Z | smlb/features/__init__.py | CitrineInformatics/smlb | 28a3689bd36aa8d51031b4faf7e2331bbd8148a9 | [
"Apache-2.0"
] | 18 | 2020-09-01T00:47:04.000Z | 2021-09-15T22:16:56.000Z | smlb/features/__init__.py | CitrineInformatics/smlb | 28a3689bd36aa8d51031b4faf7e2331bbd8148a9 | [
"Apache-2.0"
] | 2 | 2020-08-24T21:50:16.000Z | 2020-12-06T05:18:57.000Z | from smlb.features.chemistry_development_kit_molecules import (
ChemistryDevelopmentKitMoleculeFeatures,
)
from smlb.features.matminer_composition import MatminerCompositionFeatures
| 37.2 | 74 | 0.887097 |
bdd5c50f3389f1039b4b3b2bb0245d42331175ec | 1,968 | py | Python | numpy/typing/mypy_plugin.py | alexhenrie/numpy | 662f973ba58563b268d009e67806aa1150ca1cb2 | [
"BSD-3-Clause"
] | 4 | 2021-02-19T19:10:50.000Z | 2021-02-23T13:27:44.000Z | numpy/typing/mypy_plugin.py | alexhenrie/numpy | 662f973ba58563b268d009e67806aa1150ca1cb2 | [
"BSD-3-Clause"
] | 169 | 2020-12-25T07:10:57.000Z | 2022-03-29T22:12:31.000Z | numpy/typing/mypy_plugin.py | alexhenrie/numpy | 662f973ba58563b268d009e67806aa1150ca1cb2 | [
"BSD-3-Clause"
] | null | null | null | """A module containing `numpy`-specific plugins for mypy."""
import typing as t
import numpy as np
try:
import mypy.types
from mypy.types import Type
from mypy.plugin import Plugin, AnalyzeTypeContext
_HookFunc = t.Callable[[AnalyzeTypeContext], Type]
MYPY_EX: t.Optional[ModuleNotFoundError] = None
except ModuleNotFoundError as ex:
MYPY_EX = ex
__all__: t.List[str] = []
def _get_precision_dict() -> t.Dict[str, str]:
names = [
("_NBitByte", np.byte),
("_NBitShort", np.short),
("_NBitIntC", np.intc),
("_NBitIntP", np.intp),
("_NBitInt", np.int_),
("_NBitLongLong", np.longlong),
("_NBitHalf", np.half),
("_NBitSingle", np.single),
("_NBitDouble", np.double),
("_NBitLongDouble", np.longdouble),
]
ret = {}
for name, typ in names:
n: int = 8 * typ().dtype.itemsize
ret[f'numpy.typing._nbit.{name}'] = f"numpy._{n}Bit"
return ret
#: A dictionary mapping type-aliases in `numpy.typing._nbit` to
#: concrete `numpy.typing.NBitBase` subclasses.
_PRECISION_DICT = _get_precision_dict()
def _hook(ctx: "AnalyzeTypeContext") -> "Type":
"""Replace a type-alias with a concrete ``NBitBase`` subclass."""
typ, _, api = ctx
name = typ.name.split(".")[-1]
name_new = _PRECISION_DICT[f"numpy.typing._nbit.{name}"]
return api.named_type(name_new)
if MYPY_EX is None:
class _NumpyPlugin(Plugin):
"""A plugin for assigning platform-specific `numpy.number` precisions."""
def get_type_analyze_hook(self, fullname: str) -> t.Optional[_HookFunc]:
if fullname in _PRECISION_DICT:
return _hook
return None
def plugin(version: str) -> t.Type[_NumpyPlugin]:
"""An entry-point for mypy."""
return _NumpyPlugin
else:
def plugin(version: str) -> t.Type["_NumpyPlugin"]:
"""An entry-point for mypy."""
raise MYPY_EX
| 28.114286 | 81 | 0.626524 |
970361b69af566e027101b9098d4da56f00db470 | 3,344 | py | Python | spydrnet/__init__.py | ganeshgore/spydrnet | 22672b8fc7d63461a71077bd20f29df6d38e96f4 | [
"BSD-3-Clause"
] | null | null | null | spydrnet/__init__.py | ganeshgore/spydrnet | 22672b8fc7d63461a71077bd20f29df6d38e96f4 | [
"BSD-3-Clause"
] | null | null | null | spydrnet/__init__.py | ganeshgore/spydrnet | 22672b8fc7d63461a71077bd20f29df6d38e96f4 | [
"BSD-3-Clause"
] | null | null | null | """
SpyDrNet
========
SpyDrNet is an EDA tool for analyzing and transforming netlists.
See https://byuccl.github.io/spydrnet for more details.
"""
import importlib
import pkgutil
import pathlib
import sys
import os
discovered_plugins = {
name: importlib.import_module(name)
for finder, name, ispkg
in pkgutil.iter_modules()
if name.startswith('spydrnet_')
}
print("Installed Plugins", discovered_plugins.keys())
def get_active_plugins():
active_plugins = {}
config_file = os.path.join(pathlib.Path.home(), ".spydrnet")
if os.path.isfile(config_file):
for plugin in open(config_file, "r").read().split():
if discovered_plugins.get(plugin, None):
active_plugins.update({plugin: discovered_plugins[plugin]})
else:
print("Plugin %s is not installed " % plugin)
else:
with open(config_file, "w") as fp:
fp.write("\n".join(discovered_plugins.keys()))
active_plugins.update(discovered_plugins)
return active_plugins
print("Active Plugins", get_active_plugins().keys())
# Release data
from spydrnet import release
__author__ = '%s <%s>\n%s <%s>\n%s <%s>' % \
(release.authors['Keller'] + release.authors['Skouson'] +
release.authors['Wirthlin'])
__license__ = release.license
__date__ = release.date
__version__ = release.version
__release__ = release.release
from spydrnet.ir import *
from spydrnet.util.hierarchical_reference import HRef
OUT = Port.Direction.OUT
IN = Port.Direction.IN
INOUT = Port.Direction.INOUT
UNDEFINED = Port.Direction.UNDEFINED
from spydrnet.util.selection import INSIDE, OUTSIDE, BOTH, ALL
from spydrnet.testing.test import run as test
from spydrnet.parsers import parse
from spydrnet.composers import compose
from spydrnet.plugins import namespace_manager
from spydrnet.util import get_netlists, get_libraries, get_definitions, get_ports, get_cables, get_instances,\
get_wires, get_pins
from spydrnet.util import get_hinstances, get_hports, get_hpins, get_hcables, get_hwires
import os
base_dir = os.path.dirname(os.path.abspath(__file__))
import glob
example_netlist_names = list()
for filename in glob.glob(os.path.join(base_dir, 'support_files', 'EDIF_netlists', "*")):
basename = os.path.basename(filename)
example_netlist_names.append(basename[:basename.index('.')])
example_netlist_names.sort()
# logger for the module
import logging
import sys
LOG_FORMAT = "%(levelname)5s %(filename)s:%(lineno)s (%(threadName)10s) - %(message)s"
logger = logging.getLogger('spydrnet_logs')
logger.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setLevel(logging.INFO)
stream_handler.setFormatter(logging.Formatter(LOG_FORMAT))
logger.addHandler(stream_handler)
def enable_file_logging(LOG_LEVEL=None, filename=""):
LOG_LEVEL = LOG_LEVEL or "INFO"
file_handler = logging.FileHandler("_" + filename + "_spydrnet.log", mode='w')
file_handler.setFormatter(logging.Formatter(LOG_FORMAT))
file_handler.setLevel(eval(f"logging.{LOG_LEVEL}"))
logger.addHandler(file_handler)
return file_handler
def load_example_netlist_by_name(name):
assert name in example_netlist_names, "Example netlist not found"
return parse(os.path.join(base_dir, 'support_files', 'EDIF_netlists', name + ".edf.zip"))
| 30.962963 | 110 | 0.739833 |
d92a5f3ee330cf3deddb4be4d0a7ad5c7e34c048 | 16,641 | py | Python | deep_learning4e.py | netwong/aima-python | 3511e766fe8a45aeefb019c8a5c19636a3b11579 | [
"MIT"
] | 1 | 2020-04-24T17:12:48.000Z | 2020-04-24T17:12:48.000Z | deep_learning4e.py | surajit-techie/aima-python | 04fa465401af1939e076b022a9e10a5437ebefe7 | [
"MIT"
] | null | null | null | deep_learning4e.py | surajit-techie/aima-python | 04fa465401af1939e076b022a9e10a5437ebefe7 | [
"MIT"
] | 1 | 2019-12-09T20:50:14.000Z | 2019-12-09T20:50:14.000Z | """Deep learning. (Chapters 20)"""
import math
import random
import statistics
from keras import optimizers
from keras.layers import Dense, SimpleRNN
from keras.layers.embeddings import Embedding
from keras.models import Sequential
from keras.preprocessing import sequence
from utils4e import (sigmoid, dot_product, softmax1D, conv1D, GaussianKernel, element_wise_product, vector_add,
random_weights, scalar_vector_product, matrix_multiplication, map_vector, mse_loss)
class Node:
"""
A node in a computational graph contains the pointer to all its parents.
:param val: value of current node.
:param parents: a container of all parents of current node.
"""
def __init__(self, val=None, parents=None):
if parents is None:
parents = []
self.val = val
self.parents = parents
def __repr__(self):
return "<Node {}>".format(self.val)
class NNUnit(Node):
"""
A single unit of a layer in a neural network
:param weights: weights between parent nodes and current node
:param value: value of current node
"""
def __init__(self, weights=None, value=None):
super(NNUnit, self).__init__(value)
self.weights = weights or []
class Layer:
"""
A layer in a neural network based on a computational graph.
:param size: number of units in the current layer
"""
def __init__(self, size=3):
self.nodes = [NNUnit() for _ in range(size)]
def forward(self, inputs):
"""Define the operation to get the output of this layer"""
raise NotImplementedError
class OutputLayer(Layer):
"""1D softmax output layer in 19.3.2"""
def __init__(self, size=3):
super(OutputLayer, self).__init__(size)
def forward(self, inputs):
assert len(self.nodes) == len(inputs)
res = softmax1D(inputs)
for node, val in zip(self.nodes, res):
node.val = val
return res
class InputLayer(Layer):
"""1D input layer. Layer size is the same as input vector size."""
def __init__(self, size=3):
super(InputLayer, self).__init__(size)
def forward(self, inputs):
"""Take each value of the inputs to each unit in the layer."""
assert len(self.nodes) == len(inputs)
for node, inp in zip(self.nodes, inputs):
node.val = inp
return inputs
class DenseLayer(Layer):
"""
1D dense layer in a neural network.
:param in_size: input vector size, int.
:param out_size: output vector size, int.
:param activation: activation function, Activation object.
"""
def __init__(self, in_size=3, out_size=3, activation=None):
super(DenseLayer, self).__init__(out_size)
self.out_size = out_size
self.inputs = None
self.activation = sigmoid() if not activation else activation
# initialize weights
for node in self.nodes:
node.weights = random_weights(-0.5, 0.5, in_size)
def forward(self, inputs):
self.inputs = inputs
res = []
# get the output value of each unit
for unit in self.nodes:
val = self.activation.f(dot_product(unit.weights, inputs))
unit.val = val
res.append(val)
return res
class ConvLayer1D(Layer):
"""
1D convolution layer of in neural network.
:param kernel_size: convolution kernel size
"""
def __init__(self, size=3, kernel_size=3):
super(ConvLayer1D, self).__init__(size)
# init convolution kernel as gaussian kernel
for node in self.nodes:
node.weights = GaussianKernel(kernel_size)
def forward(self, features):
# each node in layer takes a channel in the features.
assert len(self.nodes) == len(features)
res = []
# compute the convolution output of each channel, store it in node.val
for node, feature in zip(self.nodes, features):
out = conv1D(feature, node.weights)
res.append(out)
node.val = out
return res
class MaxPoolingLayer1D(Layer):
"""
1D max pooling layer in a neural network.
:param kernel_size: max pooling area size
"""
def __init__(self, size=3, kernel_size=3):
super(MaxPoolingLayer1D, self).__init__(size)
self.kernel_size = kernel_size
self.inputs = None
def forward(self, features):
assert len(self.nodes) == len(features)
res = []
self.inputs = features
# do max pooling for each channel in features
for i in range(len(self.nodes)):
feature = features[i]
# get the max value in a kernel_size * kernel_size area
out = [max(feature[i:i + self.kernel_size]) for i in range(len(feature) - self.kernel_size + 1)]
res.append(out)
self.nodes[i].val = out
return res
def init_examples(examples, idx_i, idx_t, o_units):
"""Init examples from dataset.examples."""
inputs, targets = {}, {}
for i, e in enumerate(examples):
# input values of e
inputs[i] = [e[i] for i in idx_i]
if o_units > 1:
# one-hot representation of e's target
t = [0 for i in range(o_units)]
t[e[idx_t]] = 1
targets[i] = t
else:
# target value of e
targets[i] = [e[idx_t]]
return inputs, targets
def gradient_descent(dataset, net, loss, epochs=1000, l_rate=0.01, batch_size=1, verbose=None):
"""
Gradient descent algorithm to update the learnable parameters of a network.
:return: the updated network
"""
examples = dataset.examples # init data
for e in range(epochs):
total_loss = 0
random.shuffle(examples)
weights = [[node.weights for node in layer.nodes] for layer in net]
for batch in get_batch(examples, batch_size):
inputs, targets = init_examples(batch, dataset.inputs, dataset.target, len(net[-1].nodes))
# compute gradients of weights
gs, batch_loss = BackPropagation(inputs, targets, weights, net, loss)
# update weights with gradient descent
weights = vector_add(weights, scalar_vector_product(-l_rate, gs))
total_loss += batch_loss
# update the weights of network each batch
for i in range(len(net)):
if weights[i]:
for j in range(len(weights[i])):
net[i].nodes[j].weights = weights[i][j]
if verbose and (e + 1) % verbose == 0:
print("epoch:{}, total_loss:{}".format(e + 1, total_loss))
return net
def adam_optimizer(dataset, net, loss, epochs=1000, rho=(0.9, 0.999), delta=1 / 10 ** 8,
l_rate=0.001, batch_size=1, verbose=None):
"""
[Figure 19.6]
Adam optimizer to update the learnable parameters of a network.
Required parameters are similar to gradient descent.
:return the updated network
"""
examples = dataset.examples
# init s,r and t
s = [[[0] * len(node.weights) for node in layer.nodes] for layer in net]
r = [[[0] * len(node.weights) for node in layer.nodes] for layer in net]
t = 0
# repeat util converge
for e in range(epochs):
# total loss of each epoch
total_loss = 0
random.shuffle(examples)
weights = [[node.weights for node in layer.nodes] for layer in net]
for batch in get_batch(examples, batch_size):
t += 1
inputs, targets = init_examples(batch, dataset.inputs, dataset.target, len(net[-1].nodes))
# compute gradients of weights
gs, batch_loss = BackPropagation(inputs, targets, weights, net, loss)
# update s,r,s_hat and r_gat
s = vector_add(scalar_vector_product(rho[0], s),
scalar_vector_product((1 - rho[0]), gs))
r = vector_add(scalar_vector_product(rho[1], r),
scalar_vector_product((1 - rho[1]), element_wise_product(gs, gs)))
s_hat = scalar_vector_product(1 / (1 - rho[0] ** t), s)
r_hat = scalar_vector_product(1 / (1 - rho[1] ** t), r)
# rescale r_hat
r_hat = map_vector(lambda x: 1 / (math.sqrt(x) + delta), r_hat)
# delta weights
delta_theta = scalar_vector_product(-l_rate, element_wise_product(s_hat, r_hat))
weights = vector_add(weights, delta_theta)
total_loss += batch_loss
# update the weights of network each batch
for i in range(len(net)):
if weights[i]:
for j in range(len(weights[i])):
net[i].nodes[j].weights = weights[i][j]
if verbose and (e + 1) % verbose == 0:
print("epoch:{}, total_loss:{}".format(e + 1, total_loss))
return net
def BackPropagation(inputs, targets, theta, net, loss):
"""
The back-propagation algorithm for multilayer networks in only one epoch, to calculate gradients of theta
:param inputs: a batch of inputs in an array. Each input is an iterable object.
:param targets: a batch of targets in an array. Each target is an iterable object.
:param theta: parameters to be updated.
:param net: a list of predefined layer objects representing their linear sequence.
:param loss: a predefined loss function taking array of inputs and targets.
:return: gradients of theta, loss of the input batch.
"""
assert len(inputs) == len(targets)
o_units = len(net[-1].nodes)
n_layers = len(net)
batch_size = len(inputs)
gradients = [[[] for _ in layer.nodes] for layer in net]
total_gradients = [[[0] * len(node.weights) for node in layer.nodes] for layer in net]
batch_loss = 0
# iterate over each example in batch
for e in range(batch_size):
i_val = inputs[e]
t_val = targets[e]
# forward pass and compute batch loss
for i in range(1, n_layers):
layer_out = net[i].forward(i_val)
i_val = layer_out
batch_loss += loss(t_val, layer_out)
# initialize delta
delta = [[] for _ in range(n_layers)]
previous = [layer_out[i] - t_val[i] for i in range(o_units)]
h_layers = n_layers - 1
# backward pass
for i in range(h_layers, 0, -1):
layer = net[i]
derivative = [layer.activation.derivative(node.val) for node in layer.nodes]
delta[i] = element_wise_product(previous, derivative)
# pass to layer i-1 in the next iteration
previous = matrix_multiplication([delta[i]], theta[i])[0]
# compute gradient of layer i
gradients[i] = [scalar_vector_product(d, net[i].inputs) for d in delta[i]]
# add gradient of current example to batch gradient
total_gradients = vector_add(total_gradients, gradients)
return total_gradients, batch_loss
class BatchNormalizationLayer(Layer):
"""Batch normalization layer."""
def __init__(self, size, epsilon=0.001):
super(BatchNormalizationLayer, self).__init__(size)
self.epsilon = epsilon
# self.weights = [beta, gamma]
self.weights = [0, 0]
self.inputs = None
def forward(self, inputs):
# mean value of inputs
mu = sum(inputs) / len(inputs)
# standard error of inputs
stderr = statistics.stdev(inputs)
self.inputs = inputs
res = []
# get normalized value of each input
for i in range(len(self.nodes)):
val = [(inputs[i] - mu) * self.weights[0] / math.sqrt(self.epsilon + stderr ** 2) + self.weights[1]]
res.append(val)
self.nodes[i].val = val
return res
def get_batch(examples, batch_size=1):
"""Split examples into multiple batches"""
for i in range(0, len(examples), batch_size):
yield examples[i: i + batch_size]
def NeuralNetLearner(dataset, hidden_layer_sizes=None, learning_rate=0.01, epochs=100,
optimizer=gradient_descent, batch_size=1, verbose=None):
"""
Simple dense multilayer neural network.
:param hidden_layer_sizes: size of hidden layers in the form of a list
"""
if hidden_layer_sizes is None:
hidden_layer_sizes = [4]
input_size = len(dataset.inputs)
output_size = len(dataset.values[dataset.target])
# initialize the network
raw_net = [InputLayer(input_size)]
# add hidden layers
hidden_input_size = input_size
for h_size in hidden_layer_sizes:
raw_net.append(DenseLayer(hidden_input_size, h_size))
hidden_input_size = h_size
raw_net.append(DenseLayer(hidden_input_size, output_size))
# update parameters of the network
learned_net = optimizer(dataset, raw_net, mse_loss, epochs, l_rate=learning_rate,
batch_size=batch_size, verbose=verbose)
def predict(example):
n_layers = len(learned_net)
layer_input = example
layer_out = example
# get the output of each layer by forward passing
for i in range(1, n_layers):
layer_out = learned_net[i].forward(layer_input)
layer_input = layer_out
return layer_out.index(max(layer_out))
return predict
def PerceptronLearner(dataset, learning_rate=0.01, epochs=100, verbose=None):
"""
Simple perceptron neural network.
"""
input_size = len(dataset.inputs)
output_size = len(dataset.values[dataset.target])
# initialize the network, add dense layer
raw_net = [InputLayer(input_size), DenseLayer(input_size, output_size)]
# update the network
learned_net = gradient_descent(dataset, raw_net, mse_loss, epochs, l_rate=learning_rate, verbose=verbose)
def predict(example):
layer_out = learned_net[1].forward(example)
return layer_out.index(max(layer_out))
return predict
def SimpleRNNLearner(train_data, val_data, epochs=2):
"""
RNN example for text sentimental analysis.
:param train_data: a tuple of (training data, targets)
Training data: ndarray taking training examples, while each example is coded by embedding
Targets: ndarray taking targets of each example. Each target is mapped to an integer.
:param val_data: a tuple of (validation data, targets)
:param epochs: number of epochs
:return: a keras model
"""
total_inputs = 5000
input_length = 500
# init data
X_train, y_train = train_data
X_val, y_val = val_data
# init a the sequential network (embedding layer, rnn layer, dense layer)
model = Sequential()
model.add(Embedding(total_inputs, 32, input_length=input_length))
model.add(SimpleRNN(units=128))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# train the model
model.fit(X_train, y_train, validation_data=(X_val, y_val), epochs=epochs, batch_size=128, verbose=2)
return model
def keras_dataset_loader(dataset, max_length=500):
"""
Helper function to load keras datasets.
:param dataset: keras data set type
:param max_length: max length of each input sequence
"""
# init dataset
(X_train, y_train), (X_val, y_val) = dataset
if max_length > 0:
X_train = sequence.pad_sequences(X_train, maxlen=max_length)
X_val = sequence.pad_sequences(X_val, maxlen=max_length)
return (X_train[10:], y_train[10:]), (X_val, y_val), (X_train[:10], y_train[:10])
def AutoencoderLearner(inputs, encoding_size, epochs=200):
"""
Simple example of linear auto encoder learning producing the input itself.
:param inputs: a batch of input data in np.ndarray type
:param encoding_size: int, the size of encoding layer
:param epochs: number of epochs
:return: a keras model
"""
# init data
input_size = len(inputs[0])
# init model
model = Sequential()
model.add(Dense(encoding_size, input_dim=input_size, activation='relu', kernel_initializer='random_uniform',
bias_initializer='ones'))
model.add(Dense(input_size, activation='relu', kernel_initializer='random_uniform', bias_initializer='ones'))
# update model with sgd
sgd = optimizers.SGD(lr=0.01)
model.compile(loss='mean_squared_error', optimizer=sgd, metrics=['accuracy'])
# train the model
model.fit(inputs, inputs, epochs=epochs, batch_size=10, verbose=2)
return model
| 33.961224 | 113 | 0.634397 |
2a873b58cced04a93acdd1f59366a6e65721e1a1 | 3,261 | py | Python | Algorithm.Framework/Portfolio/ConfidenceWeightedPortfolioConstructionModel.py | echoplaza/Lean | 66f32cffe2ddb07532c8160299a7b1b6d67429ee | [
"Apache-2.0"
] | 1 | 2021-02-11T21:13:12.000Z | 2021-02-11T21:13:12.000Z | Algorithm.Framework/Portfolio/ConfidenceWeightedPortfolioConstructionModel.py | echoplaza/Lean | 66f32cffe2ddb07532c8160299a7b1b6d67429ee | [
"Apache-2.0"
] | 1 | 2020-08-25T03:02:47.000Z | 2020-08-25T03:02:47.000Z | Algorithm.Framework/Portfolio/ConfidenceWeightedPortfolioConstructionModel.py | echoplaza/Lean | 66f32cffe2ddb07532c8160299a7b1b6d67429ee | [
"Apache-2.0"
] | null | null | null | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("QuantConnect.Common")
AddReference("QuantConnect.Algorithm.Framework")
from QuantConnect import *
from QuantConnect.Algorithm.Framework.Portfolio import *
from InsightWeightingPortfolioConstructionModel import InsightWeightingPortfolioConstructionModel
class ConfidenceWeightedPortfolioConstructionModel(InsightWeightingPortfolioConstructionModel):
'''Provides an implementation of IPortfolioConstructionModel that generates percent targets based on the
Insight.Confidence. The target percent holdings of each Symbol is given by the Insight.Confidence from the last
active Insight for that symbol.
For insights of direction InsightDirection.Up, long targets are returned and for insights of direction
InsightDirection.Down, short targets are returned.
If the sum of all the last active Insight per symbol is bigger than 1, it will factor down each target
percent holdings proportionally so the sum is 1.
It will ignore Insight that have no Insight.Confidence value.'''
def __init__(self, rebalance = Resolution.Daily, portfolioBias = PortfolioBias.LongShort):
'''Initialize a new instance of ConfidenceWeightedPortfolioConstructionModel
Args:
rebalance: Rebalancing parameter. If it is a timedelta, date rules or Resolution, it will be converted into a function.
If None will be ignored.
The function returns the next expected rebalance time for a given algorithm UTC DateTime.
The function returns null if unknown, in which case the function will be called again in the
next loop. Returning current time will trigger rebalance.
portfolioBias: Specifies the bias of the portfolio (Short, Long/Short, Long)'''
super().__init__(rebalance, portfolioBias)
def ShouldCreateTargetForInsight(self, insight):
'''Method that will determine if the portfolio construction model should create a
target for this insight
Args:
insight: The insight to create a target for'''
# Ignore insights that don't have Confidence value
return insight.Confidence is not None
def GetValue(self, insight):
'''Method that will determine which member will be used to compute the weights and gets its value
Args:
insight: The insight to create a target for
Returns:
The value of the selected insight member'''
return insight.Confidence
| 56.224138 | 131 | 0.736584 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.