content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
#
# PySNMP MIB module Juniper-ACCOUNTING-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Juniper-ACCOUNTING-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:50:51 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
acctngSelectionIndex, acctngSelectionEntry, acctngFileEntry = mibBuilder.importSymbols("ACCOUNTING-CONTROL-MIB", "acctngSelectionIndex", "acctngSelectionEntry", "acctngFileEntry")
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ValueRangeConstraint, ValueSizeConstraint, SingleValueConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueRangeConstraint", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsUnion")
InterfaceIndex, = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex")
juniMibs, = mibBuilder.importSymbols("Juniper-MIBs", "juniMibs")
JuniPolicyAttachmentType, = mibBuilder.importSymbols("Juniper-POLICY-MIB", "JuniPolicyAttachmentType")
JuniInterfaceLocation, JuniAcctngAdminType, JuniEnable, JuniInterfaceDescrFormat, JuniAcctngOperType = mibBuilder.importSymbols("Juniper-TC", "JuniInterfaceLocation", "JuniAcctngAdminType", "JuniEnable", "JuniInterfaceDescrFormat", "JuniAcctngOperType")
juniIfType, = mibBuilder.importSymbols("Juniper-UNI-IF-MIB", "juniIfType")
ModuleCompliance, NotificationGroup, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup", "ObjectGroup")
iso, Gauge32, MibIdentifier, Counter64, Counter32, Bits, NotificationType, Unsigned32, Integer32, IpAddress, ObjectIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "iso", "Gauge32", "MibIdentifier", "Counter64", "Counter32", "Bits", "NotificationType", "Unsigned32", "Integer32", "IpAddress", "ObjectIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "TimeTicks")
RowStatus, TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "RowStatus", "TextualConvention", "DisplayString")
juniAcctngMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24))
juniAcctngMIB.setRevisions(('2009-07-16 15:00', '2005-04-26 15:00', '2003-02-28 15:00', '2002-12-17 15:37', '2001-12-05 14:16', '2001-11-19 19:00', '2001-03-26 13:22', '2000-11-07 19:00', '2000-07-21 00:00', '2000-03-20 00:00', '2000-01-17 00:00', '1999-10-18 00:00',))
if mibBuilder.loadTexts: juniAcctngMIB.setLastUpdated('200907161500Z')
if mibBuilder.loadTexts: juniAcctngMIB.setOrganization('Juniper Networks, Inc.')
juniAcctngMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1))
juniAcctngSelectionControl = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 1))
juniAcctngFileControl = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 2))
juniAcctngInterfaceControl = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 3))
juniAcctngScalarGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 4))
juniAcctngVirtualRouterControl = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 5))
juniAcctngInterfaceDescrFormat = MibScalar((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 4, 1), JuniInterfaceDescrFormat()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: juniAcctngInterfaceDescrFormat.setStatus('current')
juniAcctngInterfaceNumberingMode = MibScalar((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 4, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("proprietaryNumbering", 0), ("rfc1213Number", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: juniAcctngInterfaceNumberingMode.setStatus('current')
juniAcctngFileFormat = MibScalar((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 4, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("includeCR", 0), ("includeCRLF", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: juniAcctngFileFormat.setStatus('current')
juniAcctngSelectionTable = MibTable((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 1, 1), )
if mibBuilder.loadTexts: juniAcctngSelectionTable.setStatus('current')
juniAcctngSelectionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 1, 1, 1), )
acctngSelectionEntry.registerAugmentions(("Juniper-ACCOUNTING-MIB", "juniAcctngSelectionEntry"))
juniAcctngSelectionEntry.setIndexNames(*acctngSelectionEntry.getIndexNames())
if mibBuilder.loadTexts: juniAcctngSelectionEntry.setStatus('current')
juniAcctngSelectionType = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 1, 1, 1, 1), Bits().clone(namedValues=NamedValues(("ietfAccountControl", 0), ("connectionLessLayer2", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: juniAcctngSelectionType.setStatus('current')
juniAcctngSelectionMode = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("absoluteCounterValues", 1), ("deltaCounterValues", 2))).clone('deltaCounterValues')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: juniAcctngSelectionMode.setStatus('current')
juniAcctngSelectionSubtreeType = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("unknown", 0), ("lineCard", 1), ("systemController", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: juniAcctngSelectionSubtreeType.setStatus('deprecated')
juniAcctngSelectionMaxIfStackLevels = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 1, 1, 1, 4), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: juniAcctngSelectionMaxIfStackLevels.setStatus('current')
juniAcctngSelectionPolicyName = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 1, 1, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: juniAcctngSelectionPolicyName.setStatus('current')
juniAcctngSelectionPolicyType = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 1, 1, 1, 6), JuniPolicyAttachmentType().clone('noPolicy')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: juniAcctngSelectionPolicyType.setStatus('current')
juniAcctngSelectionIfCreateDeleteStats = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 1, 1, 1, 7), JuniEnable().clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: juniAcctngSelectionIfCreateDeleteStats.setStatus('current')
juniAcctngSelectionIfCreateDeleteStatsIfTypes = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 1, 1, 1, 8), Bits().clone(namedValues=NamedValues(("ip", 0), ("ppp", 1), ("atm1483", 2), ("vlan", 3), ("mplsMajor", 4), ("mplsL2Shim", 5), ("mplsMinor", 6)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: juniAcctngSelectionIfCreateDeleteStatsIfTypes.setStatus('current')
juniAcctngSelectionIfStackStartTable = MibTable((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 1, 3), )
if mibBuilder.loadTexts: juniAcctngSelectionIfStackStartTable.setStatus('current')
juniAcctngSelectionIfStackStartEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 1, 3, 1), ).setIndexNames((0, "ACCOUNTING-CONTROL-MIB", "acctngSelectionIndex"), (0, "Juniper-ACCOUNTING-MIB", "juniAcctngSelectionIfStackIfIndex"))
if mibBuilder.loadTexts: juniAcctngSelectionIfStackStartEntry.setStatus('current')
juniAcctngSelectionIfStackIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 1, 3, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: juniAcctngSelectionIfStackIfIndex.setStatus('current')
juniAcctngSelectionIfStackRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 1, 3, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniAcctngSelectionIfStackRowStatus.setStatus('current')
juniAcctngFileTable = MibTable((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 2, 1), )
if mibBuilder.loadTexts: juniAcctngFileTable.setStatus('current')
juniAcctngFileEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 2, 1, 1), )
acctngFileEntry.registerAugmentions(("Juniper-ACCOUNTING-MIB", "juniAcctngFileEntry"))
juniAcctngFileEntry.setIndexNames(*acctngFileEntry.getIndexNames())
if mibBuilder.loadTexts: juniAcctngFileEntry.setStatus('current')
juniAcctngFileXferMode = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 2, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("juniAcctngManualTransfer", 1), ("juniAcctngAutomatedTransfer", 2), ("juniAcctngTransferOnFileFull", 3), ("juniAcctngRedundantTransfer", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: juniAcctngFileXferMode.setStatus('current')
juniAcctngFileXferIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 2, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: juniAcctngFileXferIndex.setStatus('current')
juniAcctngFileXferSecondaryIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 2, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: juniAcctngFileXferSecondaryIndex.setStatus('current')
juniAcctngObsInterfaceTable = MibTable((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 3, 1), )
if mibBuilder.loadTexts: juniAcctngObsInterfaceTable.setStatus('obsolete')
juniAcctngObsInterfaceEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 3, 1, 1), ).setIndexNames((0, "Juniper-UNI-IF-MIB", "juniIfType"))
if mibBuilder.loadTexts: juniAcctngObsInterfaceEntry.setStatus('obsolete')
juniAcctngObsInterfaceAdminStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 3, 1, 1, 1), JuniAcctngAdminType()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniAcctngObsInterfaceAdminStatus.setStatus('obsolete')
juniAcctngObsInterfaceOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 3, 1, 1, 2), JuniAcctngOperType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: juniAcctngObsInterfaceOperStatus.setStatus('obsolete')
juniAcctngObsInterfaceRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 3, 1, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniAcctngObsInterfaceRowStatus.setStatus('obsolete')
juniAcctngObsInterfaceAccntgFileIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 3, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniAcctngObsInterfaceAccntgFileIndex.setStatus('obsolete')
juniAcctngInterfaceTable = MibTable((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 3, 2), )
if mibBuilder.loadTexts: juniAcctngInterfaceTable.setStatus('current')
juniAcctngInterfaceEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 3, 2, 1), ).setIndexNames((0, "Juniper-UNI-IF-MIB", "juniIfType"), (0, "Juniper-ACCOUNTING-MIB", "juniAcctngInterfaceFileIndex"), (0, "Juniper-ACCOUNTING-MIB", "juniAcctngInterfaceLocation"))
if mibBuilder.loadTexts: juniAcctngInterfaceEntry.setStatus('current')
juniAcctngInterfaceFileIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 3, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535)))
if mibBuilder.loadTexts: juniAcctngInterfaceFileIndex.setStatus('current')
juniAcctngInterfaceLocation = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 3, 2, 1, 2), JuniInterfaceLocation())
if mibBuilder.loadTexts: juniAcctngInterfaceLocation.setStatus('current')
juniAcctngInterfaceAdminStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 3, 2, 1, 3), JuniAcctngAdminType()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniAcctngInterfaceAdminStatus.setStatus('current')
juniAcctngInterfaceOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 3, 2, 1, 4), JuniAcctngOperType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: juniAcctngInterfaceOperStatus.setStatus('current')
juniAcctngInterfaceRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 3, 2, 1, 5), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniAcctngInterfaceRowStatus.setStatus('current')
juniAcctngIfFinalStatsXferStatsTable = MibTable((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 3, 3), )
if mibBuilder.loadTexts: juniAcctngIfFinalStatsXferStatsTable.setStatus('current')
juniAcctngIfFinalStatsXferStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 3, 3, 1), ).setIndexNames((0, "Juniper-ACCOUNTING-MIB", "juniAcctngIfFinalStatsXferStatsSlotNumber"))
if mibBuilder.loadTexts: juniAcctngIfFinalStatsXferStatsEntry.setStatus('current')
juniAcctngIfFinalStatsXferStatsSlotNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 3, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 14)))
if mibBuilder.loadTexts: juniAcctngIfFinalStatsXferStatsSlotNumber.setStatus('current')
juniAcctngIfFinalStatsXferStatsReceived = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 3, 3, 1, 2), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: juniAcctngIfFinalStatsXferStatsReceived.setStatus('current')
juniAcctngIfFinalStatsXferStatsTransferred = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 3, 3, 1, 3), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: juniAcctngIfFinalStatsXferStatsTransferred.setStatus('current')
juniAcctngIfFinalStatsXferStatsDropped = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 3, 3, 1, 4), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: juniAcctngIfFinalStatsXferStatsDropped.setStatus('current')
juniAcctngVirtualRouterTable = MibTable((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 5, 1), )
if mibBuilder.loadTexts: juniAcctngVirtualRouterTable.setStatus('current')
juniAcctngVirtualRouterTableEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 5, 1, 1), ).setIndexNames((0, "Juniper-ACCOUNTING-MIB", "juniAcctngVirtualRouterTableIndex"))
if mibBuilder.loadTexts: juniAcctngVirtualRouterTableEntry.setStatus('current')
juniAcctngVirtualRouterTableIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 5, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: juniAcctngVirtualRouterTableIndex.setStatus('current')
juniAcctngVirtualRouterTableVRName = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 5, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniAcctngVirtualRouterTableVRName.setStatus('current')
juniAcctngVirtualRouterTableRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 1, 5, 1, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniAcctngVirtualRouterTableRowStatus.setStatus('current')
juniAcctngSelectionSchema = ObjectIdentity((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2))
if mibBuilder.loadTexts: juniAcctngSelectionSchema.setStatus('current')
juniAcctngSelectionSchemaIf = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 1))
juniAcctngIfInOctets = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 1, 1))
juniAcctngIfInUcastPkts = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 1, 2))
juniAcctngIfInDiscards = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 1, 3))
juniAcctngIfInErrors = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 1, 4))
juniAcctngIfInUnknownProtos = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 1, 5))
juniAcctngIfOutOctets = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 1, 6))
juniAcctngIfOutUcastPkts = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 1, 7))
juniAcctngIfOutDiscards = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 1, 8))
juniAcctngIfOutErrors = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 1, 9))
juniAcctngIfCorrelator = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 1, 10))
juniAcctngIfInPolicedOctets = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 1, 11))
juniAcctngIfInPolicedPkts = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 1, 12))
juniAcctngIfInSpoofedPkts = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 1, 13))
juniAcctngIfOutPolicedOctets = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 1, 14))
juniAcctngIfOutPolicedPkts = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 1, 15))
juniAcctngIfOutSchedulerDropOctets = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 1, 16))
juniAcctngIfOutSchedulerDropPkts = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 1, 17))
juniAcctngIfLowerInterface = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 1, 18))
juniAcctngIfTimeOffset = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 1, 19))
juniAcctngifInMulticastPkts = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 1, 20))
juniAcctngifInBroadcastPkts = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 1, 21))
juniAcctngifOutMulticastPkts = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 1, 22))
juniAcctngifOutBroadcastPkts = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 1, 23))
juniAcctngSelectionSchemaIfStack = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 3))
juniAcctngSelectionSchemaSystem = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 4))
juniAcctngSelectionSchemaPolicy = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 5))
juniAcctngSelectionSchemaIgmp = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 6))
juniAcctngSelectionSchemaQos = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 7))
juniAcctngGreenPackets = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 5, 1))
juniAcctngUpperGreenPackets = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 5, 2))
juniAcctngYellowPackets = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 5, 3))
juniAcctngUpperYellowPackets = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 5, 4))
juniAcctngRedPackets = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 5, 5))
juniAcctngUpperRedPackets = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 5, 6))
juniAcctngGreenBytes = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 5, 7))
juniAcctngUpperGreenBytes = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 5, 8))
juniAcctngYellowBytes = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 5, 9))
juniAcctngUpperYellowBytes = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 5, 10))
juniAcctngRedBytes = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 5, 11))
juniAcctngUpperRedBytes = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 5, 12))
juniAcctngIgmpLowerIndex = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 6, 1))
juniAcctngIgmpRouterIndex = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 6, 2))
juniAcctngIgmpDestAddr = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 6, 3))
juniAcctngIgmpSourceIndex = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 6, 4))
juniAcctngIgmpMulticastGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 6, 5))
juniAcctngIgmpLowerIgmpCommand = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 6, 6))
juniAcctngIgmpLowerTimeStamp = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 6, 7))
juniAcctngParentShapingRate = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 7, 1))
juniAcctngParentSharedShapRate = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 7, 2))
juniAcctngParentChildWeight = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 7, 3))
juniAcctngQueueLength = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 7, 4))
juniAcctngForwardedRate = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 7, 5))
juniAcctngAggDropRate = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 7, 6))
juniAcctngForwardedPackets = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 7, 7))
juniAcctngForwardedBytes = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 7, 8))
juniAcctngGreenDropPackets = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 7, 9))
juniAcctngGreenDropBytes = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 7, 10))
juniAcctngYellowDropPackets = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 7, 11))
juniAcctngYellowDropBytes = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 7, 12))
juniAcctngRedDropPackets = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 7, 13))
juniAcctngRedDropBytes = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 7, 14))
juniAcctngDropProfile = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 7, 15))
juniAcctngQueueProfile = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 7, 16))
juniAcctngSchedulerProfile = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 7, 17))
juniAcctngStatisticsProfile = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 7, 18))
juniAcctngShapingMode = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 7, 19))
juniAcctngShapingRate = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 7, 20))
juniAcctngBurst = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 7, 21))
juniAcctngAssuredRate = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 7, 22))
juniAcctngWeight = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 7, 23))
juniAcctngRedEnabled = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 7, 24))
juniAcctngSharedShapingMode = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 7, 25))
juniAcctngSharedShapingRate = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 7, 26))
juniAcctngByteAdjType = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 7, 27))
juniAcctngByteAdjBytes = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 2, 7, 28))
juniAcctngConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 3))
juniAcctngGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 3, 1))
juniAcctngCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 3, 2))
juniAcctngCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 3, 2, 1)).setObjects(("Juniper-ACCOUNTING-MIB", "juniAcctngBasicGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniAcctngCompliance = juniAcctngCompliance.setStatus('obsolete')
juniAcctngCompliance2 = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 3, 2, 2)).setObjects(("Juniper-ACCOUNTING-MIB", "juniAcctngBasicGroup2"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniAcctngCompliance2 = juniAcctngCompliance2.setStatus('obsolete')
juniAcctngCompliance3 = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 3, 2, 3)).setObjects(("Juniper-ACCOUNTING-MIB", "juniAcctngBasicGroup3"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniAcctngCompliance3 = juniAcctngCompliance3.setStatus('obsolete')
juniAcctngCompliance4 = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 3, 2, 4)).setObjects(("Juniper-ACCOUNTING-MIB", "juniAcctngBasicGroup4"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniAcctngCompliance4 = juniAcctngCompliance4.setStatus('current')
juniAcctngCompliance5 = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 3, 2, 5)).setObjects(("Juniper-ACCOUNTING-MIB", "juniAcctngBasicGroup5"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniAcctngCompliance5 = juniAcctngCompliance5.setStatus('current')
juniAcctngCompliance6 = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 3, 2, 6)).setObjects(("Juniper-ACCOUNTING-MIB", "juniAcctngBasicGroup6"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniAcctngCompliance6 = juniAcctngCompliance6.setStatus('current')
juniAcctngBasicGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 3, 1, 1)).setObjects(("Juniper-ACCOUNTING-MIB", "juniAcctngSelectionType"), ("Juniper-ACCOUNTING-MIB", "juniAcctngSelectionMode"), ("Juniper-ACCOUNTING-MIB", "juniAcctngSelectionSubtreeType"), ("Juniper-ACCOUNTING-MIB", "juniAcctngSelectionMaxIfStackLevels"), ("Juniper-ACCOUNTING-MIB", "juniAcctngSelectionIfStackRowStatus"), ("Juniper-ACCOUNTING-MIB", "juniAcctngFileXferMode"), ("Juniper-ACCOUNTING-MIB", "juniAcctngFileXferIndex"), ("Juniper-ACCOUNTING-MIB", "juniAcctngFileXferSecondaryIndex"), ("Juniper-ACCOUNTING-MIB", "juniAcctngObsInterfaceAdminStatus"), ("Juniper-ACCOUNTING-MIB", "juniAcctngObsInterfaceOperStatus"), ("Juniper-ACCOUNTING-MIB", "juniAcctngObsInterfaceRowStatus"), ("Juniper-ACCOUNTING-MIB", "juniAcctngObsInterfaceAccntgFileIndex"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniAcctngBasicGroup = juniAcctngBasicGroup.setStatus('obsolete')
juniAcctngBasicGroup2 = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 3, 1, 2)).setObjects(("Juniper-ACCOUNTING-MIB", "juniAcctngSelectionType"), ("Juniper-ACCOUNTING-MIB", "juniAcctngSelectionMode"), ("Juniper-ACCOUNTING-MIB", "juniAcctngSelectionMaxIfStackLevels"), ("Juniper-ACCOUNTING-MIB", "juniAcctngSelectionIfStackRowStatus"), ("Juniper-ACCOUNTING-MIB", "juniAcctngFileXferMode"), ("Juniper-ACCOUNTING-MIB", "juniAcctngFileXferIndex"), ("Juniper-ACCOUNTING-MIB", "juniAcctngFileXferSecondaryIndex"), ("Juniper-ACCOUNTING-MIB", "juniAcctngObsInterfaceAdminStatus"), ("Juniper-ACCOUNTING-MIB", "juniAcctngObsInterfaceOperStatus"), ("Juniper-ACCOUNTING-MIB", "juniAcctngObsInterfaceRowStatus"), ("Juniper-ACCOUNTING-MIB", "juniAcctngObsInterfaceAccntgFileIndex"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniAcctngBasicGroup2 = juniAcctngBasicGroup2.setStatus('obsolete')
juniAcctngBasicGroup3 = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 3, 1, 3)).setObjects(("Juniper-ACCOUNTING-MIB", "juniAcctngSelectionType"), ("Juniper-ACCOUNTING-MIB", "juniAcctngSelectionMode"), ("Juniper-ACCOUNTING-MIB", "juniAcctngSelectionMaxIfStackLevels"), ("Juniper-ACCOUNTING-MIB", "juniAcctngSelectionPolicyName"), ("Juniper-ACCOUNTING-MIB", "juniAcctngSelectionPolicyType"), ("Juniper-ACCOUNTING-MIB", "juniAcctngSelectionIfStackRowStatus"), ("Juniper-ACCOUNTING-MIB", "juniAcctngFileXferMode"), ("Juniper-ACCOUNTING-MIB", "juniAcctngFileXferIndex"), ("Juniper-ACCOUNTING-MIB", "juniAcctngFileXferSecondaryIndex"), ("Juniper-ACCOUNTING-MIB", "juniAcctngObsInterfaceAdminStatus"), ("Juniper-ACCOUNTING-MIB", "juniAcctngObsInterfaceOperStatus"), ("Juniper-ACCOUNTING-MIB", "juniAcctngObsInterfaceRowStatus"), ("Juniper-ACCOUNTING-MIB", "juniAcctngObsInterfaceAccntgFileIndex"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniAcctngBasicGroup3 = juniAcctngBasicGroup3.setStatus('obsolete')
juniAcctngDeprecatedGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 3, 1, 4)).setObjects(("Juniper-ACCOUNTING-MIB", "juniAcctngSelectionSubtreeType"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniAcctngDeprecatedGroup = juniAcctngDeprecatedGroup.setStatus('deprecated')
juniAcctngBasicGroup4 = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 3, 1, 5)).setObjects(("Juniper-ACCOUNTING-MIB", "juniAcctngInterfaceDescrFormat"), ("Juniper-ACCOUNTING-MIB", "juniAcctngInterfaceNumberingMode"), ("Juniper-ACCOUNTING-MIB", "juniAcctngFileFormat"), ("Juniper-ACCOUNTING-MIB", "juniAcctngSelectionType"), ("Juniper-ACCOUNTING-MIB", "juniAcctngSelectionMode"), ("Juniper-ACCOUNTING-MIB", "juniAcctngSelectionMaxIfStackLevels"), ("Juniper-ACCOUNTING-MIB", "juniAcctngSelectionPolicyName"), ("Juniper-ACCOUNTING-MIB", "juniAcctngSelectionPolicyType"), ("Juniper-ACCOUNTING-MIB", "juniAcctngSelectionIfStackRowStatus"), ("Juniper-ACCOUNTING-MIB", "juniAcctngFileXferMode"), ("Juniper-ACCOUNTING-MIB", "juniAcctngFileXferIndex"), ("Juniper-ACCOUNTING-MIB", "juniAcctngFileXferSecondaryIndex"), ("Juniper-ACCOUNTING-MIB", "juniAcctngInterfaceAdminStatus"), ("Juniper-ACCOUNTING-MIB", "juniAcctngInterfaceOperStatus"), ("Juniper-ACCOUNTING-MIB", "juniAcctngInterfaceRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniAcctngBasicGroup4 = juniAcctngBasicGroup4.setStatus('current')
juniAcctngBasicGroup5 = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 3, 1, 6)).setObjects(("Juniper-ACCOUNTING-MIB", "juniAcctngInterfaceDescrFormat"), ("Juniper-ACCOUNTING-MIB", "juniAcctngInterfaceNumberingMode"), ("Juniper-ACCOUNTING-MIB", "juniAcctngFileFormat"), ("Juniper-ACCOUNTING-MIB", "juniAcctngSelectionType"), ("Juniper-ACCOUNTING-MIB", "juniAcctngSelectionMode"), ("Juniper-ACCOUNTING-MIB", "juniAcctngSelectionMaxIfStackLevels"), ("Juniper-ACCOUNTING-MIB", "juniAcctngSelectionPolicyName"), ("Juniper-ACCOUNTING-MIB", "juniAcctngSelectionPolicyType"), ("Juniper-ACCOUNTING-MIB", "juniAcctngSelectionIfStackRowStatus"), ("Juniper-ACCOUNTING-MIB", "juniAcctngSelectionIfCreateDeleteStats"), ("Juniper-ACCOUNTING-MIB", "juniAcctngSelectionIfCreateDeleteStatsIfTypes"), ("Juniper-ACCOUNTING-MIB", "juniAcctngFileXferMode"), ("Juniper-ACCOUNTING-MIB", "juniAcctngFileXferIndex"), ("Juniper-ACCOUNTING-MIB", "juniAcctngFileXferSecondaryIndex"), ("Juniper-ACCOUNTING-MIB", "juniAcctngInterfaceAdminStatus"), ("Juniper-ACCOUNTING-MIB", "juniAcctngInterfaceOperStatus"), ("Juniper-ACCOUNTING-MIB", "juniAcctngInterfaceRowStatus"), ("Juniper-ACCOUNTING-MIB", "juniAcctngIfFinalStatsXferStatsReceived"), ("Juniper-ACCOUNTING-MIB", "juniAcctngIfFinalStatsXferStatsTransferred"), ("Juniper-ACCOUNTING-MIB", "juniAcctngIfFinalStatsXferStatsDropped"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniAcctngBasicGroup5 = juniAcctngBasicGroup5.setStatus('obsolete')
juniAcctngBasicGroup6 = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 24, 3, 1, 7)).setObjects(("Juniper-ACCOUNTING-MIB", "juniAcctngVirtualRouterTableIndex"), ("Juniper-ACCOUNTING-MIB", "juniAcctngVirtualRouterTableVRName"), ("Juniper-ACCOUNTING-MIB", "juniAcctngVirtualRouterTableRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniAcctngBasicGroup6 = juniAcctngBasicGroup6.setStatus('current')
mibBuilder.exportSymbols("Juniper-ACCOUNTING-MIB", juniAcctngMIB=juniAcctngMIB, juniAcctngSelectionIfStackRowStatus=juniAcctngSelectionIfStackRowStatus, juniAcctngFileFormat=juniAcctngFileFormat, juniAcctngSelectionEntry=juniAcctngSelectionEntry, juniAcctngIfOutPolicedPkts=juniAcctngIfOutPolicedPkts, juniAcctngifInBroadcastPkts=juniAcctngifInBroadcastPkts, juniAcctngifOutBroadcastPkts=juniAcctngifOutBroadcastPkts, juniAcctngIfFinalStatsXferStatsTransferred=juniAcctngIfFinalStatsXferStatsTransferred, juniAcctngSelectionIfCreateDeleteStats=juniAcctngSelectionIfCreateDeleteStats, juniAcctngSelectionSchemaIgmp=juniAcctngSelectionSchemaIgmp, juniAcctngCompliance6=juniAcctngCompliance6, juniAcctngYellowPackets=juniAcctngYellowPackets, juniAcctngSelectionIfCreateDeleteStatsIfTypes=juniAcctngSelectionIfCreateDeleteStatsIfTypes, juniAcctngSelectionSubtreeType=juniAcctngSelectionSubtreeType, juniAcctngIfFinalStatsXferStatsSlotNumber=juniAcctngIfFinalStatsXferStatsSlotNumber, juniAcctngInterfaceAdminStatus=juniAcctngInterfaceAdminStatus, juniAcctngIfOutSchedulerDropOctets=juniAcctngIfOutSchedulerDropOctets, juniAcctngIfInUcastPkts=juniAcctngIfInUcastPkts, juniAcctngIfFinalStatsXferStatsEntry=juniAcctngIfFinalStatsXferStatsEntry, juniAcctngIfInOctets=juniAcctngIfInOctets, juniAcctngIgmpLowerIgmpCommand=juniAcctngIgmpLowerIgmpCommand, juniAcctngBasicGroup4=juniAcctngBasicGroup4, juniAcctngInterfaceFileIndex=juniAcctngInterfaceFileIndex, juniAcctngParentSharedShapRate=juniAcctngParentSharedShapRate, juniAcctngBasicGroup3=juniAcctngBasicGroup3, juniAcctngMIBObjects=juniAcctngMIBObjects, juniAcctngInterfaceLocation=juniAcctngInterfaceLocation, juniAcctngInterfaceControl=juniAcctngInterfaceControl, juniAcctngFileXferSecondaryIndex=juniAcctngFileXferSecondaryIndex, juniAcctngSchedulerProfile=juniAcctngSchedulerProfile, juniAcctngByteAdjBytes=juniAcctngByteAdjBytes, juniAcctngYellowDropPackets=juniAcctngYellowDropPackets, juniAcctngIfTimeOffset=juniAcctngIfTimeOffset, juniAcctngCompliance5=juniAcctngCompliance5, juniAcctngQueueLength=juniAcctngQueueLength, juniAcctngWeight=juniAcctngWeight, juniAcctngIfCorrelator=juniAcctngIfCorrelator, juniAcctngShapingMode=juniAcctngShapingMode, juniAcctngInterfaceRowStatus=juniAcctngInterfaceRowStatus, juniAcctngifOutMulticastPkts=juniAcctngifOutMulticastPkts, juniAcctngUpperYellowPackets=juniAcctngUpperYellowPackets, juniAcctngFileControl=juniAcctngFileControl, juniAcctngObsInterfaceRowStatus=juniAcctngObsInterfaceRowStatus, juniAcctngForwardedRate=juniAcctngForwardedRate, juniAcctngVirtualRouterTableRowStatus=juniAcctngVirtualRouterTableRowStatus, juniAcctngFileXferMode=juniAcctngFileXferMode, juniAcctngForwardedPackets=juniAcctngForwardedPackets, juniAcctngSharedShapingRate=juniAcctngSharedShapingRate, juniAcctngIfFinalStatsXferStatsReceived=juniAcctngIfFinalStatsXferStatsReceived, juniAcctngCompliance3=juniAcctngCompliance3, juniAcctngScalarGroup=juniAcctngScalarGroup, juniAcctngDropProfile=juniAcctngDropProfile, juniAcctngVirtualRouterControl=juniAcctngVirtualRouterControl, juniAcctngIfOutSchedulerDropPkts=juniAcctngIfOutSchedulerDropPkts, juniAcctngVirtualRouterTable=juniAcctngVirtualRouterTable, juniAcctngGroups=juniAcctngGroups, juniAcctngIfOutDiscards=juniAcctngIfOutDiscards, juniAcctngRedEnabled=juniAcctngRedEnabled, juniAcctngIfInUnknownProtos=juniAcctngIfInUnknownProtos, juniAcctngCompliance4=juniAcctngCompliance4, juniAcctngRedBytes=juniAcctngRedBytes, juniAcctngIgmpDestAddr=juniAcctngIgmpDestAddr, juniAcctngQueueProfile=juniAcctngQueueProfile, juniAcctngFileTable=juniAcctngFileTable, juniAcctngInterfaceTable=juniAcctngInterfaceTable, juniAcctngUpperGreenPackets=juniAcctngUpperGreenPackets, juniAcctngIgmpLowerTimeStamp=juniAcctngIgmpLowerTimeStamp, juniAcctngBasicGroup2=juniAcctngBasicGroup2, PYSNMP_MODULE_ID=juniAcctngMIB, juniAcctngIgmpLowerIndex=juniAcctngIgmpLowerIndex, juniAcctngUpperGreenBytes=juniAcctngUpperGreenBytes, juniAcctngIgmpMulticastGroup=juniAcctngIgmpMulticastGroup, juniAcctngSelectionPolicyName=juniAcctngSelectionPolicyName, juniAcctngFileEntry=juniAcctngFileEntry, juniAcctngFileXferIndex=juniAcctngFileXferIndex, juniAcctngConformance=juniAcctngConformance, juniAcctngIfInPolicedOctets=juniAcctngIfInPolicedOctets, juniAcctngObsInterfaceTable=juniAcctngObsInterfaceTable, juniAcctngSelectionControl=juniAcctngSelectionControl, juniAcctngParentShapingRate=juniAcctngParentShapingRate, juniAcctngByteAdjType=juniAcctngByteAdjType, juniAcctngBurst=juniAcctngBurst, juniAcctngSelectionMode=juniAcctngSelectionMode, juniAcctngCompliance2=juniAcctngCompliance2, juniAcctngGreenDropBytes=juniAcctngGreenDropBytes, juniAcctngInterfaceDescrFormat=juniAcctngInterfaceDescrFormat, juniAcctngObsInterfaceAdminStatus=juniAcctngObsInterfaceAdminStatus, juniAcctngObsInterfaceEntry=juniAcctngObsInterfaceEntry, juniAcctngAggDropRate=juniAcctngAggDropRate, juniAcctngSelectionIfStackStartTable=juniAcctngSelectionIfStackStartTable, juniAcctngSelectionSchemaIfStack=juniAcctngSelectionSchemaIfStack, juniAcctngGreenBytes=juniAcctngGreenBytes, juniAcctngYellowDropBytes=juniAcctngYellowDropBytes, juniAcctngIfInDiscards=juniAcctngIfInDiscards, juniAcctngVirtualRouterTableIndex=juniAcctngVirtualRouterTableIndex, juniAcctngVirtualRouterTableVRName=juniAcctngVirtualRouterTableVRName, juniAcctngCompliance=juniAcctngCompliance, juniAcctngSelectionTable=juniAcctngSelectionTable, juniAcctngVirtualRouterTableEntry=juniAcctngVirtualRouterTableEntry, juniAcctngSelectionSchemaPolicy=juniAcctngSelectionSchemaPolicy, juniAcctngForwardedBytes=juniAcctngForwardedBytes, juniAcctngSelectionSchemaIf=juniAcctngSelectionSchemaIf, juniAcctngifInMulticastPkts=juniAcctngifInMulticastPkts, juniAcctngIfOutPolicedOctets=juniAcctngIfOutPolicedOctets, juniAcctngInterfaceOperStatus=juniAcctngInterfaceOperStatus, juniAcctngIfInPolicedPkts=juniAcctngIfInPolicedPkts, juniAcctngSelectionIfStackStartEntry=juniAcctngSelectionIfStackStartEntry, juniAcctngIfInErrors=juniAcctngIfInErrors, juniAcctngBasicGroup=juniAcctngBasicGroup, juniAcctngCompliances=juniAcctngCompliances, juniAcctngSharedShapingMode=juniAcctngSharedShapingMode, juniAcctngSelectionSchema=juniAcctngSelectionSchema, juniAcctngIfOutUcastPkts=juniAcctngIfOutUcastPkts, juniAcctngSelectionIfStackIfIndex=juniAcctngSelectionIfStackIfIndex, juniAcctngRedDropBytes=juniAcctngRedDropBytes, juniAcctngAssuredRate=juniAcctngAssuredRate, juniAcctngRedDropPackets=juniAcctngRedDropPackets, juniAcctngSelectionSchemaQos=juniAcctngSelectionSchemaQos, juniAcctngObsInterfaceAccntgFileIndex=juniAcctngObsInterfaceAccntgFileIndex, juniAcctngIfFinalStatsXferStatsTable=juniAcctngIfFinalStatsXferStatsTable, juniAcctngIfOutErrors=juniAcctngIfOutErrors, juniAcctngIgmpSourceIndex=juniAcctngIgmpSourceIndex, juniAcctngIfInSpoofedPkts=juniAcctngIfInSpoofedPkts, juniAcctngBasicGroup6=juniAcctngBasicGroup6, juniAcctngGreenDropPackets=juniAcctngGreenDropPackets, juniAcctngStatisticsProfile=juniAcctngStatisticsProfile, juniAcctngSelectionType=juniAcctngSelectionType, juniAcctngInterfaceNumberingMode=juniAcctngInterfaceNumberingMode, juniAcctngUpperRedPackets=juniAcctngUpperRedPackets, juniAcctngYellowBytes=juniAcctngYellowBytes, juniAcctngSelectionPolicyType=juniAcctngSelectionPolicyType, juniAcctngGreenPackets=juniAcctngGreenPackets, juniAcctngInterfaceEntry=juniAcctngInterfaceEntry, juniAcctngDeprecatedGroup=juniAcctngDeprecatedGroup, juniAcctngShapingRate=juniAcctngShapingRate, juniAcctngUpperYellowBytes=juniAcctngUpperYellowBytes, juniAcctngParentChildWeight=juniAcctngParentChildWeight, juniAcctngIgmpRouterIndex=juniAcctngIgmpRouterIndex, juniAcctngIfOutOctets=juniAcctngIfOutOctets, juniAcctngRedPackets=juniAcctngRedPackets, juniAcctngBasicGroup5=juniAcctngBasicGroup5, juniAcctngSelectionSchemaSystem=juniAcctngSelectionSchemaSystem, juniAcctngIfFinalStatsXferStatsDropped=juniAcctngIfFinalStatsXferStatsDropped, juniAcctngIfLowerInterface=juniAcctngIfLowerInterface, juniAcctngObsInterfaceOperStatus=juniAcctngObsInterfaceOperStatus, juniAcctngSelectionMaxIfStackLevels=juniAcctngSelectionMaxIfStackLevels, juniAcctngUpperRedBytes=juniAcctngUpperRedBytes)
| [
2,
198,
2,
9485,
15571,
7378,
337,
9865,
8265,
7653,
9346,
12,
26861,
28270,
2751,
12,
8895,
33,
357,
4023,
1378,
16184,
76,
489,
8937,
13,
785,
14,
79,
893,
11632,
8,
198,
2,
7054,
45,
13,
16,
2723,
2393,
1378,
14,
14490,
14,
6... | 2.458098 | 15,429 |
"""API Doc templates generator."""
from typing import Any, Dict, List, Optional, Union
from urllib.parse import quote, urljoin
class HydraDoc:
"""Class for an API Doc."""
def __init__(self, API: str, title: str, desc: str,
entrypoint: str, base_url: str, doc_name: str) -> None:
"""Initialize the APIDoc."""
self.API = API
self.entrypoint_endpoint = entrypoint
self.title = title
self.base_url = base_url
self.context = Context("{}".format(urljoin(base_url, API)))
self.parsed_classes = dict() # type: Dict[str, Any]
self.other_classes = list() # type: List[HydraClass]
self.collections = dict() # type: Dict[str, Any]
# type: List[Union[HydraStatus,HydraError]]
self.possible_status = list()
self.entrypoint = HydraEntryPoint(base_url, entrypoint)
self.desc = desc
self.doc_name = doc_name
self.doc_url = DocUrl(self.base_url, self.API, self.doc_name)
def add_supported_class(
self, class_: 'HydraClass') -> None:
"""Add a new supportedClass.
Raises:
TypeError: If `class_` is not an instance of `HydraClass`
"""
if not isinstance(class_, HydraClass):
raise TypeError("Type is not <HydraClass>")
self.parsed_classes[class_.path] = {
"context": Context(address="{}{}".format(self.base_url, self.API), class_=class_),
"class": class_,
}
def add_supported_collection(self, collection_: 'HydraCollection') -> None:
"""Add a supported Collection
Raises:
TypeError: If `collection_` is not an instance of `HydraCollection`
"""
if not isinstance(collection_, HydraCollection):
raise TypeError("Type is not <HydraCollection>")
self.collections[collection_.path] = {
"context": Context(address="{}{}".format(self.base_url, self.API),
collection=collection_), "collection": collection_}
def add_possible_status(self, status: Union['HydraStatus', 'HydraError']) -> None:
"""Add a new possibleStatus.
Raises:
TypeError: If `status` is not an instance of `HydraStatus`.
"""
if not isinstance(status, HydraStatus):
raise TypeError("Type is not <HydraStatus>")
self.possible_status.append(status)
def add_baseCollection(self) -> None:
"""Add Collection class to the API Doc."""
collection = HydraClass(
_id="http://www.w3.org/ns/hydra/core#Collection", title="Collection", desc=None)
member = HydraClassProp(
"http://www.w3.org/ns/hydra/core#member", "members", False, False, None)
collection.add_supported_prop(member)
self.other_classes.append(collection)
def add_baseResource(self) -> None:
"""Add Resource class to the API Doc."""
resource = HydraClass(
_id="http://www.w3.org/ns/hydra/core#Resource", title="Resource", desc=None)
self.other_classes.append(resource)
def add_to_context(
self, key: str, value: Union[Dict[str, str], str]) -> None:
"""Add entries to the vocabs context."""
self.context.add(key, value)
def gen_EntryPoint(self) -> None:
"""Generate the EntryPoint for the Hydra Doc."""
# pdb.set_trace()
for class_ in self.parsed_classes:
if self.parsed_classes[class_]["class"].endpoint:
self.entrypoint.add_Class(self.parsed_classes[class_]["class"])
for collection in self.collections:
self.entrypoint.add_Collection(
self.collections[collection]["collection"])
def generate(self) -> Dict[str, Any]:
"""Get the Hydra API Doc as a python dict."""
parsed_classes = [self.parsed_classes[key]["class"]
for key in self.parsed_classes]
collections = [self.collections[key]["collection"]
for key in self.collections]
doc = {
"@context": self.context.generate(),
"@id": "{}/{}".format(urljoin(self.base_url, self.API), self.doc_name),
"@type": "ApiDocumentation",
"title": self.title,
"description": self.desc,
"entrypoint": urljoin(self.base_url, self.entrypoint_endpoint),
"supportedClass": [
x.generate() for x in parsed_classes + self.other_classes + collections + [self.entrypoint]],
"possibleStatus": [status.generate() for status in self.possible_status]
}
return doc
class HydraClass():
"""Template for a new class."""
def __init__(
self, title: str, desc: str, path: str = None,
endpoint: bool = False, sub_classof: None = None, _id: str = None) -> None:
"""Initialize the Hydra_Class."""
self.id_ = _id if _id is not None else "{}{}".format(DocUrl.doc_url, title)
self.title = title
self.desc = desc
self.path = path if path else title
self.parents = None
self.endpoint = endpoint
self.supportedProperty = list() # type: List
self.supportedOperation = list() # type: List
if sub_classof is not None:
self.parents = sub_classof
def add_supported_prop(
self, prop: Union['HydraClassProp', 'EntryPointClass', 'EntryPointCollection']) -> None:
"""Add a new supportedProperty.
Raises:
TypeError: If `prop` is not an instance of `HydraClassProp` or `EntryPointClass`
or `EntryPointCollection`
"""
if not isinstance(
prop, (HydraClassProp, EntryPointClass, EntryPointCollection)):
raise TypeError("Type is not <HydraClassProp>")
self.supportedProperty.append(prop)
def add_supported_op(
self, op: Union['EntryPointOp', 'HydraClassOp']) -> None:
"""Add a new supportedOperation.
Raises:
TypeError: If `op` is not an instance of `HydraClassOp` or `EntryPointOp`
"""
if not isinstance(op, (HydraClassOp, EntryPointOp)):
raise TypeError("Type is not <HydraClassOp>")
self.supportedOperation.append(op)
def generate(self) -> Dict[str, Any]:
"""Get the Hydra class as a python dict."""
class_ = {
"@id": self.id_,
"@type": "hydra:Class",
"title": self.title,
"description": self.desc,
"supportedProperty": [x.generate() for x in self.supportedProperty],
"supportedOperation": [x.generate() for x in self.supportedOperation],
}
if self.parents is not None:
class_["subClassOf"] = self.parents
return class_
class HydraClassProp():
"""Template for a new property."""
def __init__(self,
prop: Union[str, 'HydraLink'],
title: str,
read: bool,
write: bool,
required: bool,
desc: str = "",
) -> None:
"""Initialize the Hydra_Prop."""
self.prop = prop
self.title = title
self.read = read
self.write = write
self.required = required
self.desc = desc
def generate(self) -> Dict[str, Any]:
"""Get the Hydra prop as a python dict."""
prop = {
"@type": "SupportedProperty",
"title": self.title,
"required": self.required,
"readable": self.read,
"writeable": self.write
}
if isinstance(self.prop, HydraLink):
prop["property"] = self.prop.generate()
else:
prop["property"] = self.prop
if len(self.desc) > 0:
prop["description"] = self.desc
return prop
class HydraClassOp():
"""Template for a new supportedOperation."""
def __init__(self,
title: str,
method: str,
expects: Optional[str],
returns: Optional[str],
expects_header: List[str] = [],
returns_header: List[str] = [],
possible_status: List[Union['HydraStatus', 'HydraError']] = [],
) -> None:
"""Initialize the Hydra_Prop."""
self.title = title
self.method = method
self.expects = expects
self.returns = returns
self.expects_header = expects_header
self.returns_header = returns_header
self.possible_status = possible_status
def get_type(self, method: str) -> str:
"""Return @type for op based on method type."""
if method == "POST":
return "http://schema.org/UpdateAction"
if method == "PUT":
return "http://schema.org/AddAction"
if method == "DELETE":
return "http://schema.org/DeleteAction"
if method == "GET":
return "http://schema.org/FindAction"
raise NameError("Please select methods from GET, PUT, POST and DELETE")
def generate(self) -> Dict[str, Any]:
"""Get the Hydra op as a python dict."""
op = {
"@type": self.get_type(self.method),
"title": self.title,
"method": self.method,
"expects": self.expects,
"returns": self.returns,
"expectsHeader": self.expects_header,
"returnsHeader": self.returns_header,
"possibleStatus": [status.generate() for status in self.possible_status]
}
return op
class HydraCollection():
"""Class for Hydra Collection."""
def __init__(
self,
collection_name: str = None,
collection_path: str = None,
collection_description: str = None,
manages: Union[Dict[str, Any], List] = None,
get: bool = True, post: bool = True, put: bool = True, delete: bool = True) -> None:
"""Generate Collection for related resources."""
self.collection_id = "{}{}".format(DocUrl.doc_url, quote(collection_name, safe=''))
self.name = collection_name
self.collection_description = collection_description
self.path = collection_path if collection_path else self.name
self.supportedOperation = list() # type: List
self.supportedProperty = [HydraClassProp("http://www.w3.org/ns/hydra/core#member",
"members",
False, False, False,
"The members of {}".format(collection_name))]
self.manages = manages
if get:
get_op = HydraCollectionOp("_:{}_retrieve".format(self.name),
"http://schema.org/FindAction",
"GET", "Retrieves all the members of {}".format(self.name),
None, self.manages['object'], [], [], [])
self.supportedOperation.append(get_op)
if put:
put_op = HydraCollectionOp("_:{}_create".format(self.name), "http://schema.org/AddAction",
"PUT", "Create new member in {}".format(self.name),
self.manages['object'], self.manages['object'], [], [],
[HydraStatus(code=201, desc="A new member in {} created".format(self.name))]
)
self.supportedOperation.append(put_op)
if post:
post_op = HydraCollectionOp("_:{}_update".format(self.name),
"http://schema.org/UpdateAction",
"POST", "Update member of {} ".format(self.name),
self.manages['object'], self.manages['object'], [], [],
[HydraStatus(code=200, desc="If the entity was updated"
"from {}.".format(self.name))]
)
self.supportedOperation.append(post_op)
if delete:
delete_op = HydraCollectionOp("_:{}_delete".format(self.name),
"http://schema.org/DeleteAction",
"DELETE", "Delete member of {} ".format(self.name),
self.manages['object'], self.manages['object'], [], [],
[HydraStatus(code=200, desc="If entity was deleted"
"successfully from {}.".format(self.name))]
)
self.supportedOperation.append(delete_op)
def generate(self) -> Dict[str, Any]:
"""Get as a python dict."""
collection = {
"@id": self.collection_id,
"@type": "Collection",
"subClassOf": "http://www.w3.org/ns/hydra/core#Collection",
"title": self.name,
"description": self.collection_description,
"supportedOperation": [x.generate() for x in self.supportedOperation],
"supportedProperty": [x.generate() for x in self.supportedProperty],
"manages": self.manages
}
return collection
class HydraCollectionOp(HydraClassOp):
"""Operation class for Collection operations."""
def __init__(self,
id_: str,
type_: str,
method: str,
desc: str,
expects: Optional[str],
returns: Optional[str],
expects_header: List[str] = [],
returns_header: List[str] = [],
possible_status: List[Union['HydraStatus', 'HydraError']] = [],
) -> None:
"""Create method."""
self.id_ = id_
self.type_ = type_
self.method = method
self.desc = desc
self.expects = expects
self.returns = returns
self.expects_header = expects_header
self.returns_header = returns_header
self.possible_status = possible_status
def generate(self) -> Dict[str, Any]:
"""Get as a Python dict."""
object_ = {
"@id": self.id_,
"@type": self.type_,
"method": self.method,
"description": self.desc,
"expects": self.expects,
"returns": self.returns,
"expectsHeader": self.expects_header,
"returnsHeader": self.returns_header,
"possibleStatus": [status.generate() for status in self.possible_status]
}
return object_
class HydraEntryPoint():
"""Template for a new entrypoint."""
def __init__(self, base_url: str, entrypoint: str) -> None:
"""Initialize the Entrypoint."""
self.url = base_url
self.api = entrypoint
self.entrypoint = HydraClass("EntryPoint", "The main entry point or homepage of the API.",
_id="{}#EntryPoint".format(urljoin(self.url, self.api)))
self.entrypoint.add_supported_op(EntryPointOp(
"_:entry_point".format(base_url), "GET", "The APIs main entry point.", None, None,
type_="{}/{}#EntryPoint".format(base_url, entrypoint)))
self.context = Context(
"{}{}".format(
base_url,
entrypoint),
entrypoint=self)
self.collections: List[EntryPointCollection] = []
def add_Class(self, class_: HydraClass) -> None:
"""Add supportedProperty to the EntryPoint.
Raises:
TypeError: If `class_` is not an instance of `HydraClass`.
"""
if not isinstance(class_, HydraClass):
raise TypeError("Type is not <HydraClass>")
entrypoint_class = EntryPointClass(class_)
self.entrypoint.add_supported_prop(entrypoint_class)
self.context.add(entrypoint_class.name, {
"@id": entrypoint_class.id_, "@type": "@id"})
def add_Collection(self, collection: HydraCollection) -> None:
"""Add supportedProperty to the EntryPoint.
Raises:
TypeError: If `collection` is not an instance of `HydraCollection`.
"""
if not isinstance(collection, HydraCollection):
raise TypeError("Type is not <HydraCollection>")
entrypoint_collection = EntryPointCollection(collection)
self.collections.append(entrypoint_collection.generate())
self.entrypoint.add_supported_prop(entrypoint_collection)
self.context.add(entrypoint_collection.name, {
"@id": entrypoint_collection.id_, "@type": "@id"})
def generate(self) -> Dict[str, Any]:
"""Get as a Python dict."""
return self.entrypoint.generate()
def get(self) -> Dict[str, str]:
"""Create the EntryPoint object to be returnd for the get function."""
object_ = {
"@context": "{}{}/contexts/EntryPoint.jsonld".format(self.url,self.api),
"@id": "{}{}".format(self.url,self.api),
"@type": "EntryPoint",
}
for item in self.entrypoint.supportedProperty:
uri = item.id_
if item.generate() in self.collections:
collection_returned = item.generate()
collection_id = uri.replace(
"{}EntryPoint".format(DocUrl.doc_url), "{}{}".format(self.url,self.api))
collection_to_append = {
"@id": collection_id,
'title': collection_returned['hydra:title'],
'@type': "Collection",
"supportedOperation": collection_returned['property']['supportedOperation'],
"manages": collection_returned['property']['manages']
}
if "collections" in object_:
object_['collections'].append(collection_to_append)
else:
object_['collections'] = []
object_['collections'].append(collection_to_append)
else:
object_[item.name] = uri.replace(
"{}EntryPoint".format(DocUrl.doc_url), "{}{}".format(self.url,self.api))
return object_
class EntryPointCollection():
"""Class for a Collection Entry to the EntryPoint object."""
def __init__(self, collection: HydraCollection) -> None:
"""Create method."""
self.name = collection.name
self.supportedOperation = collection.supportedOperation
self.manages = collection.manages
if collection.path:
self.id_ = "{}EntryPoint/{}".format(DocUrl.doc_url, quote(collection.path, safe=''))
else:
self.id_ = "{}EntryPoint/{}".format(DocUrl.doc_url, quote(self.name, safe=''))
def generate(self) -> Dict[str, Any]:
"""Get as a python dict."""
object_ = {
"property": {
"@id": self.id_,
"@type": "hydra:Link",
"label": self.name,
"description": "The {} collection".format(self.name, ),
"domain": "{}EntryPoint".format(DocUrl.doc_url),
"range": "{}{}".format(DocUrl.doc_url, self.name),
"manages": self.manages,
"supportedOperation": [],
},
"hydra:title": self.name.lower(),
"hydra:description": "The {} collection".format(self.name, ),
"required": None,
"readable": True,
"writeable": False
} # type: Dict[str, Any]
for op in self.supportedOperation:
operation = EntryPointOp(op.id_.lower(), op.method,
op.desc, op.expects, op.returns,
op.expects_header, op.returns_header,
op.possible_status,
type_=op.type_)
object_["property"]["supportedOperation"].append(
operation.generate())
return object_
class EntryPointClass():
"""Class for a Operation Entry to the EntryPoint object."""
def __init__(self, class_: HydraClass) -> None:
"""Create method."""
self.name = class_.title
self.desc = class_.desc
self.supportedOperation = class_.supportedOperation
if class_.path:
self.id_ = "{}EntryPoint/{}".format(DocUrl.doc_url, class_.path)
else:
self.id_ = "{}EntryPoint/{}".format(DocUrl.doc_url, self.name)
def generate(self) -> Dict[str, Any]:
"""Get as Python Dict."""
object_ = {
"property": {
"@id": self.id_,
"@type": "hydra:Link",
"label": self.name,
"description": self.desc,
"domain": "{}EntryPoint".format(DocUrl.doc_url),
"range": "{}{}".format(DocUrl.doc_url, self.name),
"supportedOperation": []
},
"hydra:title": self.name.lower(),
"hydra:description": "The {} Class".format(self.name),
"required": None,
"readable": True,
"writeable": False
} # type: Dict[str, Any]
for op in self.supportedOperation:
operation = EntryPointOp(op.title.lower(), op.method,
None, op.expects, op.returns, op.expects_header,
op.returns_header, op.possible_status,
label=op.title)
object_["property"]["supportedOperation"].append(
operation.generate())
return object_
class EntryPointOp():
"""supportedOperation for EntryPoint."""
def __init__(self,
id_: str,
method: str,
desc: str,
expects: Optional[str],
returns: Optional[str],
expects_header: List[str] = [],
returns_header: List[str] = [],
possible_status: List[Union['HydraStatus', 'HydraError']] = [],
type_: Optional[str] = None,
label: str = "",
) -> None:
"""Create method."""
self.id_ = id_
self.method = method
self.desc = desc
self.expects = expects
self.returns = returns
self.expects_header = expects_header
self.returns_header = returns_header
self.possible_status = possible_status
self.label = label
self.type_ = type_
def get_type(self, method: str) -> str:
"""Return @type for op based on method type."""
if method == "POST":
return "http://schema.org/UpdateAction"
elif method == "PUT":
return "http://schema.org/AddAction"
elif method == "DELETE":
return "http://schema.org/DeleteAction"
else:
return "http://schema.org/FindAction"
def generate(self) -> Dict[str, Any]:
"""Get as Python Dict."""
prop = {
"@id": self.id_,
"@type": self.get_type(self.method),
"method": self.method,
"description": self.desc,
"expects": self.expects,
"returns": self.returns,
"expectsHeader": self.expects_header,
"returnsHeader": self.returns_header,
"possibleStatus": [status.generate() for status in self.possible_status]
}
if self.type_ is not None:
prop["@type"] = self.type_
if len(self.label) > 0:
prop["label"] = self.label
return prop
class IriTemplateMapping():
"""Class for hydra IriTemplateMapping"""
def generate(self) -> Dict[str, Any]:
"""Get IriTemplateMapping as a python dict"""
iri_template_mapping = {
"@type": "hydra:IriTemplateMapping",
"hydra:variable": self.variable,
"hydra:property": self.prop,
"hydra:required": self.required
}
return iri_template_mapping
class HydraIriTemplate():
"""Class for hydra IriTemplates"""
def generate(self) -> Dict[str, Any]:
"""Get IriTemplate as a python dict"""
base_url = DocUrl.doc_url.rsplit('/',2)[0]
iri_template = {
"@type": "hydra:IriTemplate",
"hydra:template": "{}{}".format(base_url,self.template),
"hydra:variableRepresentation": self.variable_rep,
"hydra:mapping": [x.generate() for x in self.mapping]
}
return iri_template
class HydraStatus():
"""Class for possibleStatus in Hydra Doc."""
def __init__(self, code: int, id_: str = None, title: str = "", desc: str = "") -> None:
"""Create method."""
self.code = code
self.id_ = id_
self.title = title
self.desc = desc
def generate(self, status_type: str = "Status") -> Dict[str, Any]:
"""Get as Python dict."""
status = {
"@context": "http://www.w3.org/ns/hydra/context.jsonld",
"@type": status_type,
"statusCode": self.code,
"title": self.title,
"description": self.desc,
}
if self.id_ is not None:
status["@id"] = self.id_
return status
class HydraError(HydraStatus):
"""Class for Hydra Error to represent error details."""
def __init__(self, code: int, id_: str = None, title: str = "", desc: str = "") -> None:
"""Create method"""
super().__init__(code, id_, title, desc)
def generate(self) -> Dict[str, Any]:
"""Get error response body"""
error = HydraStatus.generate(self, status_type="Error")
return error
class HydraLink():
"""Template for a link property."""
def __init__(
self, id_: str, title: str = "",
desc: str = "", domain: str = "", range_: str = "") -> None:
"""Initialize the Hydra_Link."""
self.id_ = id_ if "http" in id_ else "{}{}".format(DocUrl.doc_url, id_)
self.range = range_
self.title = title
self.desc = desc
self.domain = domain
self.supportedOperation = list() # type: List
def add_supported_op(
self, op: Union['EntryPointOp', 'HydraClassOp']) -> None:
"""Add a new supportedOperation.
Raises:
TypeError: If `op` is not an instance of `HydraClassOp` or `EntryPointOp`
"""
if not isinstance(op, (HydraClassOp, EntryPointOp)):
raise TypeError("Type is not <HydraClassOp>")
self.supportedOperation.append(op)
def generate(self) -> Dict[str, Any]:
"""Get the Hydra link as a python dict."""
link = {
"@id": self.id_,
"@type": "hydra:Link",
"title": self.title,
"description": self.desc,
"range": self.range,
"domain": self.domain,
"supportedOperation": [x.generate() for x in self.supportedOperation],
}
return link
class Context():
"""Class for JSON-LD context."""
def __init__(self,
address: str,
class_: Optional[HydraClass] = None,
collection: Optional[HydraCollection] = None,
entrypoint: Optional[HydraEntryPoint] = None,
) -> None:
"""Initialize context."""
# NOTE: adders is a dictionary containing additional
# context elements to the base Hydra context
if class_ is not None:
self.context = {"hydra": "http://www.w3.org/ns/hydra/core#",
"members": "http://www.w3.org/ns/hydra/core#member", "object": "http://schema.org/object",
class_.title: class_.id_} # type: Dict[str, Any]
for prop in class_.supportedProperty:
if isinstance(prop.prop, HydraLink):
self.context[prop.title] = prop.prop.id_
else:
self.context[prop.title] = prop.prop
elif collection is not None:
self.context = {"hydra": "http://www.w3.org/ns/hydra/core#",
"members": "http://www.w3.org/ns/hydra/core#member",
collection.name: collection.collection_id}
elif entrypoint is not None:
self.context = {
"EntryPoint": "{}EntryPoint".format(DocUrl.doc_url),
}
else:
self.context = {
"hydra": "http://www.w3.org/ns/hydra/core#",
"property": {
"@type": "@id",
"@id": "hydra:property"
},
"supportedClass": "hydra:supportedClass",
"supportedProperty": "hydra:supportedProperty",
"supportedOperation": "hydra:supportedOperation",
"label": "rdfs:label",
"rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
"domain": {
"@type": "@id",
"@id": "rdfs:domain"
},
"ApiDocumentation": "hydra:ApiDocumentation",
"range": {
"@type": "@id",
"@id": "rdfs:range"
},
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"title": "hydra:title",
"expects": {
"@type": "@id",
"@id": "hydra:expects"
},
"returns": {
"@id": "hydra:returns",
"@type": "@id"
},
"entrypoint": {
"@id": "hydra:entrypoint",
"@type": "@id"
},
"object": {
"@id": "hydra:object",
"@type": "@id"
},
"subject": {
"@id": "hydra:subject",
"@type": "@id"
},
"readable": "hydra:readable",
"writeable": "hydra:writeable",
"possibleStatus": "hydra:possibleStatus",
"required": "hydra:required",
"method": "hydra:method",
"statusCode": "hydra:statusCode",
"description": "hydra:description",
"expectsHeader": "hydra:expectsHeader",
"returnsHeader": "hydra:returnsHeader",
"manages": "hydra:manages",
"subClassOf": {
"@id": "rdfs:subClassOf",
"@type": "@id"
},
"search": "hydra:search"
}
def createContext(self, object_: Dict[str, Any]) -> None:
"""Create the context for the given object."""
if isinstance(object_, HydraClass):
self.add(object_.title, object_.id)
for prop in object_.supportedProperty:
self.add(prop.title, self.prop)
if isinstance(object_, HydraCollection):
self.add(object_.name, "{}:{}".format(DocUrl.doc_url, object_.name))
self.add(object_.class_.title, object_.class_.id)
def generate(self) -> Dict[str, Any]:
"""Get as a python dict."""
return self.context
def add(self, key: str, value: Union[Dict[str, str], str]) -> None:
"""Add entry to context."""
self.context[key] = value
| [
37811,
17614,
14432,
24019,
17301,
526,
15931,
198,
6738,
19720,
1330,
4377,
11,
360,
713,
11,
7343,
11,
32233,
11,
4479,
198,
6738,
2956,
297,
571,
13,
29572,
1330,
9577,
11,
19016,
22179,
628,
198,
4871,
31613,
23579,
25,
198,
220,
... | 2.034615 | 15,687 |
"""
Grafana-Script functions module
This is the functions module of Grafana-Script
:license: MIT, see LICENSE for more details
:copyright: (c) 2020 by NETHINKS GmbH, see AUTHORS for more details
"""
import xml.etree.ElementTree as ET
import requests
from grafana_script.config import ScriptConfig
CONF = ScriptConfig()
class OpennmsFunctions:
"""
Functions for OpenNMS to get all necessary information
"""
def __init__(self):
"""
Get OpenNMS username, password and url for requests
"""
self.o_user = CONF.get_value('OpenNMS', 'user')
self.o_password = CONF.get_value('OpenNMS', 'password')
self.o_url = CONF.get_value('OpenNMS', 'base_url')
self.o_protocol = CONF.get_value('OpenNMS', 'protocol')
def associate_to_id(self):
"""
Creates a dictionary in the format {foreignID:nodeID}
This is needed for the interface correction since we dont got
the node id but its needed in the rest call in OpenNMS
"""
opennms_address = self.o_url + '/opennms/rest/nodes?limit=0'
opennms_access = '%s://%s:%s@%s' % (self.o_protocol, self.o_user, self.o_password, opennms_address)
requestdata = requests.get(opennms_access, verify=False).text
nodes_xml = ET.fromstring(requestdata.encode('utf-8'))
nodes = nodes_xml.findall('node')
foreign_to_id = {}
for entry in nodes:
foreign_to_id.update({entry.get('foreignId'): entry.get('id')})
return foreign_to_id
def corrected_interface(self, foreign_to_id, object_id, interface):
"""
In the datasource you only write something like Fa0, Gi0, etc...
for your Interface. This functions takes it and checks for the right
interface needed in Grafana :
Gi0/0/0 => interfaceSnmp[Gi0_0_0-70f35a7135d1]
"""
opennms_node_id = foreign_to_id[str(object_id)]
resourceid = ''
opennms_address = self.o_url + '/opennms/rest/nodes/%s/snmpinterfaces?limit=0' % opennms_node_id
access = '%s://%s:%s@%s' % (self.o_protocol, self.o_user, self.o_password, opennms_address)
nodes = requests.get(access, verify=False).text
nodes_xml = ET.fromstring(nodes.encode('utf-8'))
all_interfaces = nodes_xml.findall('snmpInterface')
if nodes_xml.get('totalCount') != '0':
for data_entry in all_interfaces:
interfaces = data_entry.find('ifName').text
if interfaces == interface:
try:
phys_addr = data_entry.find('physAddr').text
interface = interface.replace('/', '_').replace('.', '_')
resourceid = 'interfaceSnmp' + '[' + '%s' % interface + '-' + phys_addr + ']'
except AttributeError:
resourceid = 'interfaceSnmp' + '[' + '%s' % interface + ']'
return resourceid
def corrected_interface_from_resources(self, foreign_to_id, object_id, interface):
"""
In the datasource you only write something like Fa0, Gi0, etc...
for your Interface. This functions takes it and checks for the right
interface needed in Grafana :
Gi0/0/0 => interfaceSnmp[Gi0_0_0-70f35a7135d1]
"""
opennms_node_id = foreign_to_id[str(object_id)]
resourceid = ''
opennms_address = self.o_url + '/opennms/rest/resources/fornode/%s' % opennms_node_id
access = '%s://%s:%s@%s' % (self.o_protocol, self.o_user, self.o_password, opennms_address)
resources = requests.get(access, verify=False).text
resources_xml = ET.fromstring(resources.encode('utf-8'))
for children in resources_xml:
if children.attrib:
for resource in children:
if 'SNMP Interface Data' in resource.attrib.values():
interface_corr = interface.replace('/', '_').replace('.', '_') + '-'
if resource.attrib['name'].startswith(interface_corr) or resource.attrib['name'] == interface:
resourceid = 'interfaceSnmp' + '[' + '%s' % resource.attrib['name'] + ']'
return resourceid
def hc_octets_check(self, foreign_to_id, object_id, interface):
"""
Check if the resouce has ifHcInOctets or only provides ifInOctets
"""
opennms_node_id = foreign_to_id[str(object_id)]
opennms_address = self.o_url + '/opennms/rest/measurements/node[%s].%s/ifHCInOctets?&aggregation=AVERAGE'%(opennms_node_id, interface)
access = '%s://%s:%s@%s' % (self.o_protocol, self.o_user, self.o_password, opennms_address)
response = requests.get(access)
hc_octets_available = ''
if response.status_code == 200:
hc_octets_available = 'true'
else:
hc_octets_available = 'false'
return hc_octets_available
| [
37811,
198,
38,
32188,
2271,
12,
7391,
5499,
8265,
198,
1212,
318,
262,
5499,
8265,
286,
7037,
69,
2271,
12,
7391,
198,
25,
43085,
25,
17168,
11,
766,
38559,
24290,
329,
517,
3307,
198,
25,
22163,
4766,
25,
357,
66,
8,
12131,
416,
... | 2.233123 | 2,222 |
"""maxibonkata.chat
This module contains the Chat class implementation.
:copyright: 2019, Karumi & The Cocktail
:license: Apache License. See LICENSE.txt file for further details.
"""
| [
37811,
9806,
571,
261,
74,
1045,
13,
17006,
198,
198,
1212,
8265,
4909,
262,
24101,
1398,
7822,
13,
198,
198,
25,
22163,
4766,
25,
13130,
11,
9375,
12994,
1222,
383,
23769,
13199,
198,
198,
25,
43085,
25,
24843,
13789,
13,
4091,
38559... | 3.528302 | 53 |
# Copyright 2021 Kartik Sharma. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bboxes encoder simple test."""
import pytest
import tensorflow as tf
from absl import logging
from detectia.engine import builder
from detectia.config import Config
# set tensorflow random seed.
tf.random.set_seed(111111)
@pytest.mark.parametrize(
["backbone"],
[
pytest.param("efficientnet-b0"),
pytest.param("efficientnet-b1"),
pytest.param("efficientnet-b2"),
],
)
if __name__ == "__main__":
logging.set_verbosity(logging.WARNING)
tf.test.main()
| [
2,
15069,
33448,
32872,
1134,
40196,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
... | 3.348442 | 353 |
"""
Este código possui partes desenvolvidas ou baseadas em código desenvolvido por Thales Otávio
Link do GitHub: https://github.com/thalesorp/NSGA-II
"""
import sys
import random
from .individual import Individual
class Population:
"""Class of population of indiviuals, used by NSGA-II"""
def initiate(self, n_individuals):
"""Initialize a new population"""
for _ in range(n_individuals):
# Lista de inteiros que representa o genoma do indivíduo
genome = list()
# Gera um novo indivíduo, sempre verificando se a quantodade de lixo não estourou o limite
while len(genome) <= self.genotype_quantity:
genotype = random.randrange(self.genome_min_value, self.genome_max_value + 1)
genome.append(genotype)
# Verifica se o novo gene não excedeu a capacidade do indivíduo
# TODO Excluir pontos desnecessários para a geração das demandas de lixo
# TODO Utilizar o k-means para o agrupamento dos pontos
# TODO Terminar a geração do indivíduo e o cálculo da quantidade de lixo da rota do mesmo
if self.calcula_quantidade_lixo(genome) > self.capacidade_caminhao:
# Remove o último gene que sobrecarregou a rota
genome.remove(len(genome))
# Encerra o loop
break
self.new_individual(genome)
def calcula_quantidade_lixo(self, genoma):
"""Calcula a quantidade de lixo de um determinado genoma e a retorna"""
# Verifica se o genoma está vazio, se estiver a quantidade de lixo é 0
if len(genoma) == 0:
return 0
def new_individual(self, genome):
"""Create a new individual with "genome" and insert into population"""
self.insert(Individual(genome))
def insert(self, individual):
"""Insert a new individual into population"""
self.individuals.append(individual)
self.size += 1
def delete_individual(self, individual):
"""Delete "individual" from population"""
self.individuals.remove(individual)
self.size -= 1
def union(self, population):
'''Union operation over "population" and current population'''
for individual in population.individuals:
self.insert(individual)
# Front utils
def reset_fronts(self):
"""Delete all fronts and prepare the population to be sorted in fronts"""
for individual in self.individuals:
individual.domination_count = 0
individual.dominated_by = list()
self.fronts = list()
def new_front(self):
"""Start a new front"""
self.fronts.append([])
def get_random_individual(self):
"""Return a random individual of this population"""
index = random.randint(0, self.size-1)
return self.individuals[index]
def add_to_front(self, index, individual):
"""Add the individual into "index" front"""
self.fronts[index].append(individual)
def get_last_front_index(self):
"""Retun the index of last front"""
return len(self.fronts)-1
def add_to_last_front(self, individual):
"""Add individual to last front"""
self.fronts[self.get_last_front_index()].append(individual)
def get_last_front(self):
"""Return the last front"""
return self.fronts[len(self.fronts)-1]
def delete_individual_from_last_front(self, individual):
"""Deletes the individual from front AND from individuals list"""
# Deleting from last front the individual with index = "index"
last_front = self.get_last_front()
index = last_front.index(individual)
del last_front[index]
self.delete_individual(individual)
def delete_last_front(self):
"""Deleting the last front and the individuals inside"""
last_front = self.get_last_front()
for individual in last_front:
self.delete_individual(individual)
self.fronts.remove(last_front)
# Crowding Distance utils
def get_neighbour(self, individual_genome, front_index, genome_index):
"""Return the left and right neighbour values of "individual_genome\""""
genome_list = list()
for individual in self.fronts[front_index]:
genome_list.append(individual.genome[genome_index])
genome_list.sort()
individual_genome_index = genome_list.index(individual_genome)
# Usually, the value is as described bellow
left_neighbour_index = individual_genome_index - 1
right_neighbour_index = individual_genome_index + 1
# But when isn't, then it's checked the cases when there's no neighbour on one side
if individual_genome_index == 0:
# When it happens, the closest neighbour it's himself
left_neighbour_index = 0
if individual_genome_index == (len(genome_list)-1):
right_neighbour_index = (len(genome_list)-1)
return genome_list[left_neighbour_index], genome_list[right_neighbour_index]
def get_extreme_neighbours(self, genome_index):
"""Return the highest and lowest neighbour values of "individual_genome\""""
genome_list = list()
for individual in self.individuals:
genome_list.append(individual.genome[genome_index])
return min(genome_list), max(genome_list)
# Utils
def _show_individuals(self):
"""Show the values of each individual of population"""
result = "INDIVIDUALS:\n"
i = 1
for individual in self.individuals:
result += (" [" + str(i) + "] " + str(individual) + "\n")
i += 1
print(result)
def _show_front(self, front_index):
"""Show only front with "front_index\""""
result = "FRONT:\n"
j = 0
for individual in self.fronts[front_index]:
j += 1
result += (" [" + str(j) + "] " + str(individual) + "\n")
result += "\n"
print(result)
def _show_fronts_simple(self):
"""Show all fronts"""
result = "FRONTS:\n"
i = 0
for front in self.fronts:
i += 1
result += "FRONT NUMBER " + str(i) + ":\n"
j = 0
for individual in front:
j += 1
result += (" [" + str(j) + "] " + individual.__str_genome__() + "\n")
result += "\n"
print(result)
def _show_general_domination_info(self):
"""Show all data of population"""
for individual in self.individuals:
sys.stdout.write(" Individual: " + str(individual)
+ "\tdomination count: " + str(individual.domination_count)
+ "\tdominated by this: ")
for dominated_individual in individual.dominated_by:
sys.stdout.write(str(dominated_individual.name) + ", ")
print("")
print("")
def _show_fronts_with_crowding_distance(self):
"""Show all fronts"""
i = 1
for front in self.fronts:
sys.stdout.write("Front " + str(i) + ": ")
i += 1
for individual in front:
sys.stdout.write(str(individual)+ ".CD: "
+ str(individual.crowding_distance) + ", ")
print("")
| [
37811,
198,
198,
36,
4169,
269,
10205,
12894,
78,
1184,
9019,
636,
274,
748,
268,
10396,
16921,
292,
267,
84,
2779,
38768,
795,
269,
10205,
12894,
78,
748,
268,
10396,
16921,
78,
16964,
536,
2040,
19740,
6557,
85,
952,
198,
11280,
466... | 2.307621 | 3,254 |
from django.contrib import admin
from django.forms.fields import ChoiceField
from django.shortcuts import redirect
from django.template.response import TemplateResponse
from django.urls import path
from django.utils.html import format_html, conditional_escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from olympia import amo
from olympia.activity.models import ActivityLog
from olympia.addons.models import Addon
from olympia.amo.urlresolvers import reverse
from .models import Block
@admin.register(Block)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
42625,
14208,
13,
23914,
13,
25747,
1330,
18502,
15878,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
18941,
198,
6738,
42625,
14208,
13,
28243,
13,
26209,
1330,
37350,
31077,
... | 3.5875 | 160 |
#!/usr/bin/python
from os.path import expanduser, dirname, exists
from os import makedirs
import sys
from datetime import datetime
from optparse import OptionParser
import logging
import logging.handlers
from config_resolver import Config
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.DEBUG)
FORMATTER = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
CONSOLE_HANDLER = logging.StreamHandler()
CONSOLE_HANDLER.setLevel(logging.INFO)
CONSOLE_HANDLER.setFormatter(FORMATTER)
logging.getLogger().setLevel(logging.DEBUG)
logging.getLogger().addHandler(CONSOLE_HANDLER)
CONF = Config('wicked', 'metafilter')
from metafilter.model import Session, CONFIG, set_dsn
from metafilter.model.nodes import update_nodes_from_path, remove_orphans, calc_md5
error_log = expanduser(CONFIG.get('cli_logging', 'error_log', None))
if error_log:
if not exists(dirname(error_log)):
LOG.info('Creating logging folder: %s' % dirname(error_log))
makedirs(dirname(error_log))
ERROR_HANDLER = logging.handlers.RotatingFileHandler(filename=error_log, maxBytes=102400, backupCount=5)
ERROR_HANDLER.setLevel(logging.WARNING)
ERROR_HANDLER.setFormatter(FORMATTER)
logging.getLogger().addHandler(ERROR_HANDLER)
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
6738,
28686,
13,
6978,
1330,
4292,
7220,
11,
26672,
3672,
11,
7160,
198,
6738,
28686,
1330,
285,
4335,
17062,
198,
11748,
25064,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
2172,
29572,... | 2.714885 | 477 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Draft Version ... presented to illustrate how the problem shifts and how later solutions address this
# * In this version, several solutions from Stack Overflow are added into the code.
# * the result is that one encode error is solved but another one shifts to a decode error
# * one suggested solution for decode is used and though it worked for others, it fails here
import requests
import datetime
import re
# this function created instead of modifying __str__ because in testing, this error cropped up
# both in the use of a print() satement all by itself, and in an event.__str__ call
text = requests.get('https://www.python.org/events/python-user-group/').text
timePattern = '<time datetime="[\w:+-]+">(.+)<span class="say-no-more">([\d ]+)</span>(.*)</time>'
locationPattern = '<span class="event-location">(.*)</span>'
titlePattern = '<h3 class="event-title"><a href=".+">(.*)</a></h3>'
time = re.findall(timePattern, text)
time = [''.join(i) for i in time]
location = re.findall(locationPattern, text)
title = re.findall(titlePattern, text)
events = [event(title[i], time[i], location[i]) for i in range(len(title))]
for i in events:
print (30*'-')
print(i) # bug fix: i is in events, so this calls __str__ in the object
print (' Time : %s' %i.time)
print (str_Intl(' Location: %s' %i.location)) # when bug happened here, had to add str_Intl as bug fix | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
220,
220,
198,
198,
2,
13650,
10628,
2644,
5545,
284,
19418,
703,
262,
1917,
15381,
290,
703,
1568,
8136,
2209,
428,
198,
... | 2.895551 | 517 |
import gym
import tensorflow as tf
import tensorflow.contrib.layers as layers
from utils.general import get_logger
from q1_schedule import LinearExploration, LinearSchedule
from q2_linear import Linear
from configs.q7 import config
import pandas as pd
import trading_env
class MyDQNCNN(Linear):
"""
CNN
"""
def get_q_values_op(self, state, scope, reuse=False):
"""
Returns Q values for all actions
Args:
state: (tf tensor)
shape = (batch_size, img height, img width, nchannels)
scope: (string) scope name, that specifies if target network or not
reuse: (bool) reuse of variables in the scope
Returns:
out: (tf tensor) of shape = (batch_size, num_actions)
"""
# this information might be useful
num_actions = self.env.action_space.n
out = state
print state.shape
with tf.variable_scope(scope, reuse) as ts:
#bn = tf.layers.batch_normalization(
# inputs=layers.flatten(state))
# valid padding
#conv1_1 = tf.layers.conv2d(inputs=state, filters=16, kernel_size=[1, 5])
#conv1_2 = tf.layers.conv2d(inputs=state, filters=16, kernel_size=[config.state_history, 1])
#concat = tf.concat(layers.flatten(conv1_1), layers.flatten(conv1_2), 1)
conv1 = tf.layers.conv2d(inputs=state, filters=16, kernel_size=[1, 1])
bn = tf.layers.batch_normalization(inputs=layers.flatten(conv1))
full = layers.fully_connected(bn, num_outputs=256)
out = layers.fully_connected(
inputs=full, num_outputs=num_actions, activation_fn=None)
return out
"""
Use a different architecture for the Atari game. Please report the final result.
Feel free to change the configuration. If so, please report your hyperparameters.
"""
if __name__ == '__main__':
# make env
df = pd.read_csv('dataset/btc_indexed2.csv')
env = trading_env.make(env_id='training_v1', obs_data_len=1, step_len=1,
df=df, fee=0.003, max_position=5, deal_col_name='close',
return_transaction=False, sample_days=30,
feature_names=['low', 'high', 'open', 'close', 'volume'])
env.reset()
# exploration strategy
# you may want to modify this schedule
exp_schedule = LinearExploration(env, config.eps_begin,
config.eps_end, config.eps_nsteps)
# you may want to modify this schedule
# learning rate schedule
lr_schedule = LinearSchedule(config.lr_begin, config.lr_end,
config.lr_nsteps)
# train model
model = MyDQNCNN(env, config)
model.run(exp_schedule, lr_schedule)
| [
11748,
11550,
201,
198,
201,
198,
11748,
11192,
273,
11125,
355,
48700,
201,
198,
11748,
11192,
273,
11125,
13,
3642,
822,
13,
75,
6962,
355,
11685,
201,
198,
201,
198,
6738,
3384,
4487,
13,
24622,
1330,
651,
62,
6404,
1362,
201,
198,... | 2.153046 | 1,346 |
import time
import threading
import praw
import common
| [
11748,
640,
198,
11748,
4704,
278,
198,
11748,
279,
1831,
198,
198,
11748,
2219,
628
] | 3.8 | 15 |
import sys
if __name__ == '__main__':
if len(sys.argv) != 2:
print('Usage: %s [length]' % sys.argv[0])
sys.exit(1)
print( fibonacci( int(sys.argv[1]) ) )
| [
11748,
25064,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
611,
18896,
7,
17597,
13,
853,
85,
8,
14512,
362,
25,
198,
220,
220,
220,
220,
220,
220,
220,
3601,
10786,
28350,
25,
4064,
82,
... | 2 | 90 |
import unittest
from unittest.mock import patch
from pott.option import Option
from pott.paper import Paper
from pott.yaml import Yaml
from pott.assistants.assistant import Assistant
from pott.assistants.global_assistant import GlobalAssistant
from pott.assistants.local_assistant import LocalAssistant
if __name__ == "__main__":
unittest.main()
| [
11748,
555,
715,
395,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
8529,
198,
6738,
279,
1252,
13,
18076,
1330,
16018,
198,
6738,
279,
1252,
13,
20189,
1330,
14962,
198,
6738,
279,
1252,
13,
88,
43695,
1330,
14063,
75,
198,
6738,
27... | 3.238532 | 109 |
#!/usr/bin/env python
# run with --sweep (or by default):
# given a service, looks through current members for those missing an account on that service,
# and checks that member's official website's source code for mentions of that service.
# A CSV of "leads" is produced for manual review.
#
# run with --update:
# reads the CSV produced by --sweep back in and updates the YAML accordingly.
#
# run with --clean:
# removes legislators from the social media file who are no longer current
#
# run with --resolvefb:
# finds both Facebook usernames and graph IDs and updates the YAML accordingly.
#
# run with --resolveyt:
# finds both YouTube usernames and channel IDs and updates the YAML accordingly.
# other options:
# --service (required): "twitter", "youtube", or "facebook"
# --bioguide: limit to only one particular member
# --email:
# in conjunction with --sweep, send an email if there are any new leads, using
# settings in scripts/email/config.yml (if it was created and filled out).
# uses a CSV at data/social_media_blacklist.csv to exclude known non-individual account names
import csv, re
import utils
from utils import load_data, save_data
import requests
if __name__ == '__main__':
main() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
1057,
351,
1377,
46280,
538,
357,
273,
416,
4277,
2599,
198,
2,
220,
220,
1813,
257,
2139,
11,
3073,
832,
1459,
1866,
329,
883,
4814,
281,
1848,
319,
326,
2139,
11,
198,
2,... | 3.410468 | 363 |
#!/usr/bin/env python3
# encoding:utf8
"""An extensible, command based bot interface for the Signal Messenger.
"""
import functools
import json
import logging
import os
import random
import time
import uuid
import wikipedia
from datetime import datetime, timedelta
from mysignald import MySignal
from pymongo import MongoClient
from utils import parse_weather_location
workdir = os.getcwd() + "/"
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=logging.INFO,
handlers=[
# logging.FileHandler(filename=workdir + "signalbot.log"),
logging.StreamHandler()
]
)
logging.info("### Starting... ###")
filetime = datetime.fromtimestamp(os.path.getmtime("cities500.sqlite"))
if datetime.now() - filetime > timedelta(days=30):
logging.warning("GeoNames database is outdated. You can update it using "
"`geonames-txt2sqlite.py`")
with open(workdir + "config.json", "r") as fh:
config = json.load(fh)
USERNAME = config["username"]
ROOT = config["root"]
db_user = os.environ.get("MONGO_INITDB_ROOT_USERNAME")
db_password = os.environ.get("MONGO_INITDB_ROOT_PASSWORD")
client = MongoClient(f"mongodb://{db_user}:{db_password}@db:27017/")
USERS = client.signald.users
logging.info("### mongoDB ###")
logging.info(f"server version: {client.server_info()['version']}")
logging.info(f"available databases: {client.list_database_names()}")
s = MySignal(USERNAME, socket_path="/signald/signald.sock")
logging.info("Notifying Root")
s.send(text="I am up!", recipient=ROOT)
logging.info("Ready!")
def authenticated(func):
"""Decorator to authenticate user against database and provide command,
body and user to wrapped function."""
@functools.wraps(func)
return wrapper
@s.chat_handler("^(hi|hello).*$")
@authenticated
@s.chat_handler("^ping.*$")
@authenticated
@s.chat_handler("^coin.*$")
@authenticated
@s.chat_handler("^(wiki|wikipedia).*$")
@authenticated
@s.chat_handler("^help.*$")
@authenticated
@s.chat_handler("^add.*$")
@authenticated
@s.chat_handler("^notes.*$")
@authenticated
@s.chat_handler("^subscribe.*$")
@authenticated
@s.chat_handler("^unsubscribe.*$")
@authenticated
@s.chat_handler("^weather.*$")
@authenticated
@s.chat_handler("")
@authenticated
if __name__ == "__main__":
s.auto_message_generator = auto_message_generator
s.run_chat()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
21004,
25,
40477,
23,
198,
37811,
2025,
1070,
27339,
11,
3141,
1912,
10214,
7071,
329,
262,
26484,
24306,
13,
198,
198,
37811,
198,
11748,
1257,
310,
10141,
198,
11748,
33918,
1... | 2.696288 | 889 |
#!/usr/bin/env python
#encoding: utf-8;
from pwn import *
import sys,os
hosts = ("","localhost","localhost")
ports = (0,12300,23947)
rhp1 = {'host':hosts[0],'port':ports[0]} #for actual server
rhp2 = {'host':hosts[1],'port':ports[1]} #for localhost
rhp3 = {'host':hosts[2],'port':ports[2]} #for localhost running on docker
exploit_bin = "exploit.gz.b64"
## exploit ###########################################
## main ##############################################
if __name__ == "__main__":
global c
_overwrite_targets()
if len(sys.argv)>1:
if sys.argv[1][0]=="d":
cmd = """
set follow-fork-mode parent
"""
c = gdb.debug(FILENAME,cmd)
elif sys.argv[1][0]=="r":
print("[+] Using target as {}:{}".format(rhp1["host"], rhp1["port"]))
c = remote(rhp1["host"],rhp1["port"])
elif sys.argv[1][0]=="v":
c = remote(rhp3["host"],rhp3["port"])
else:
c = remote(rhp2['host'],rhp2['port'])
exploit()
c.interactive()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
12685,
7656,
25,
3384,
69,
12,
23,
26,
198,
198,
6738,
279,
675,
1330,
1635,
198,
11748,
25064,
11,
418,
198,
198,
4774,
82,
796,
5855,
2430,
36750,
2430,
36750,
4943,
198,
3742,
... | 2.186441 | 472 |
#!/usr/bin/env python
# Copyright (c) PLUMgrid, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
from bcc import BPF
from builtins import input
from ctypes import c_uint,c_int, c_ulong
from pyroute2 import IPRoute, IPDB, NSPopen
from simulation import Simulation
from netaddr import IPAddress
import socket, struct
ipr = IPRoute()
ipdb = IPDB(nl=ipr)
num_hosts = 4
null = open("/dev/null", "w")
try:
sim = TunnelSimulation(ipdb)
sim.start()
input("Press enter to quit:")
except Exception,e:
print str(e)
if "sim" in locals():
for p in sim.processes: p.kill(); p.wait(); p.release()
finally:
if "bc" in ipdb.interfaces: ipdb.interfaces["bc"].remove().commit()
if "sim" in locals(): sim.release()
ipdb.release()
null.close()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
15069,
357,
66,
8,
9297,
5883,
25928,
11,
3457,
13,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
4943,
198,
198,
6738,
275,
535,
1330,
2... | 2.620462 | 303 |
from Automovel import Automovel
| [
6738,
17406,
78,
626,
1330,
17406,
78,
626,
198
] | 3.555556 | 9 |
import mock
| [
11748,
15290,
628,
628
] | 3.75 | 4 |
from sklearn.externals import joblib
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
import os.path
import gzip
import os
data = load_iris()
Xtrain, Xtest, Ytrain, Ytest = train_test_split(data.data, data.target, test_size=0.3, random_state=4)
# Save to file in the current working directory
# model = LogisticRegression(C=0.1,
# max_iter=20,
# fit_intercept=True,
# n_jobs=3,
# solver='liblinear')
# model.fit(Xtrain, Ytrain)
joblib_file = "joblib_model.pkl"
# joblib.dump(model, joblib_file)
# Load from file
joblib_model = joblib.load(joblib_file)
# Calculate the accuracy and predictions
score = joblib_model.score(Xtest, Ytest)
print("Test score: {0:.2f} %".format(100 * score))
Ypredict = joblib_model.predict(Xtest) | [
201,
198,
6738,
1341,
35720,
13,
1069,
759,
874,
1330,
1693,
8019,
201,
198,
6738,
1341,
35720,
13,
29127,
62,
19849,
1330,
5972,
2569,
8081,
2234,
220,
220,
201,
198,
6738,
1341,
35720,
13,
19608,
292,
1039,
1330,
3440,
62,
29616,
22... | 2.170732 | 451 |
#!/usr/bin/env python
import rospy
import numpy as np
from sensor_msgs.msg import Joy
from geometry_msgs.msg import Twist
from sensor_msgs.msg import JointState
# build inverted matrix
# A is control allocation matrix
# 0.2075 and 0.2375 are moment arms measured from the centre of Mallard, i.e. half the distance between thrusters
A = np.mat("1 1 0 0; 0 0 -1 -1; 0.2075 -0.2075 0.2375 -0.2375")
# Calculate the Moore--Penrose Pseudoinverse
pseudoinv = np.linalg.pinv(A)
if __name__ == '__main__':
rospy.init_node('Cmd2thr')
cmd2thr()
# rospy.spin()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
686,
2777,
88,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
12694,
62,
907,
14542,
13,
19662,
1330,
14087,
198,
6738,
22939,
62,
907,
14542,
13,
19662,
1330,
44088,
198,
6738,
... | 2.623853 | 218 |
from pypy.interpreter.error import oefmt
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.typedef import TypeDef, interp_attrproperty
from pypy.interpreter.typedef import GetSetProperty
from pypy.interpreter.gateway import interp2app
from rpython.rlib import jit
from pypy.module._cffi_backend.cdataobj import W_CData
from pypy.module._cffi_backend.cdataobj import W_CDataPtrToStructOrUnion
from pypy.module._cffi_backend.ctypeptr import W_CTypePtrOrArray
from pypy.module._cffi_backend.ctypeptr import W_CTypePointer
from pypy.module._cffi_backend.ctypefunc import W_CTypeFunc
from pypy.module._cffi_backend.ctypestruct import W_CTypeStructOrUnion
from pypy.module._cffi_backend import allocator
class W_FunctionWrapper(W_Root):
"""A wrapper around a real W_CData which points to a function
generated in the C code. The real W_CData has got no struct/union
argument (only pointers to it), and no struct/union return type
(it is replaced by a hidden pointer as first argument). This
wrapper is callable, and the arguments it expects and returns
are directly the struct/union. Calling ffi.typeof(wrapper)
also returns the original struct/union signature.
This class cannot be used for variadic functions.
"""
_immutable_ = True
@jit.unroll_safe
W_FunctionWrapper.typedef = TypeDef(
'_cffi_backend.__FFIFunctionWrapper',
__repr__ = interp2app(W_FunctionWrapper.descr_repr),
__call__ = interp2app(W_FunctionWrapper.descr_call),
__name__ = interp_attrproperty('fnname', cls=W_FunctionWrapper, wrapfn="newtext"),
__module__ = interp_attrproperty('modulename', cls=W_FunctionWrapper, wrapfn="newtext"),
__doc__ = GetSetProperty(W_FunctionWrapper.descr_get_doc),
__get__ = interp2app(W_FunctionWrapper.descr_get),
)
W_FunctionWrapper.typedef.acceptable_as_base_class = False
| [
6738,
279,
4464,
88,
13,
3849,
3866,
353,
13,
18224,
1330,
267,
891,
16762,
198,
6738,
279,
4464,
88,
13,
3849,
3866,
353,
13,
8692,
26801,
13200,
1330,
370,
62,
30016,
198,
6738,
279,
4464,
88,
13,
3849,
3866,
353,
13,
774,
9124,
... | 2.702128 | 705 |
from django.contrib import admin
from .models import Label
admin.site.register(Label) | [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
764,
27530,
1330,
36052,
198,
198,
28482,
13,
15654,
13,
30238,
7,
33986,
8
] | 3.583333 | 24 |
# ! are the imports needed if they are defined in main?
import arcpy
import os
import shutil
from helpers import *
# ! are we using tni at all? EHHHH NOT REALLY
# Compute Transit Need Index (TNI) based on the 2003 service standards for each census blockgroup.
# Use the minority, income, age and car ownership data computed in prior functions as inputs, and
# add a feature class indicating TNI to the final output gdb (final_gdb_loc)
| [
2,
5145,
389,
262,
17944,
2622,
611,
484,
389,
5447,
287,
1388,
30,
198,
198,
11748,
10389,
9078,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
198,
6738,
49385,
1330,
1635,
198,
198,
2,
5145,
389,
356,
1262,
256,
8461,
379,
477,
3... | 3.808696 | 115 |
#!/usr/bin/env python
N = 128
vals = [-1]*N
for i in range(N):
ch = chr(i)
if 'A' <= ch <= 'Z':
vals[i] = i - ord('A')
elif 'a' <= ch <= 'z':
vals[i] = i - ord('a')
groupsz = 16
grps = []
grp = []
for v in vals:
if len(grp) >= groupsz:
grps.append(fmtgrp(grp))
grp = []
grp.append(v)
if grp:
grps.append(fmtgrp(grp))
print("""#pragma once
#include <cassert>
// Generated by mkiconv.py. Do not edit by hand.
""")
print(f"constexpr int iconv_table[{len(vals)}] = {{")
for grp in grps:
print(f" {', '.join(grp)},")
print("};")
print("""
constexpr int iconv(char c) noexcept {
#ifndef NDEBUG
assert(('A' <= c && c <= 'Z') || ('a' <= c && c <= 'z'));
return iconv_table[static_cast<unsigned char>(c & 0x7Fu)];
#else
return iconv_table[static_cast<unsigned char>(c)];
#endif
}
""")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
628,
198,
45,
796,
13108,
198,
12786,
796,
25915,
16,
60,
9,
45,
198,
1640,
1312,
287,
2837,
7,
45,
2599,
198,
220,
220,
220,
442,
796,
442,
81,
7,
72,
8,
198,
220,
220,
220,
611,
7... | 2.045024 | 422 |
import pytest
from ckanapi import LocalCKAN, NotFound
| [
11748,
12972,
9288,
198,
198,
6738,
269,
27541,
15042,
1330,
10714,
34,
42,
1565,
11,
1892,
21077,
628
] | 3.111111 | 18 |
#!/usr/bin/env python3
'''
Translates Unix TMG driving table from PDP-11 assembly to C arrays.
Assumptions:
- string literals are single line
- byte directive has two octal values
'''
from sys import argv
if __name__=='__main__':
t = Translator()
t.start()
for s in file_statements(argv[1]):
t.translate(s)
t.end()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
7061,
6,
198,
8291,
75,
689,
33501,
309,
20474,
5059,
3084,
422,
350,
6322,
12,
1157,
10474,
284,
327,
26515,
13,
198,
8021,
388,
8544,
25,
220,
198,
532,
4731,
4187,
874,
... | 2.536765 | 136 |
url='https://us-cert.cisa.gov/ics/advisories/icsa-21-154-01'
advisory=url.replace('https://us-cert.cisa.gov/ics/advisories/','')
print(advisory)
| [
6371,
11639,
5450,
1378,
385,
12,
22583,
13,
66,
9160,
13,
9567,
14,
873,
14,
324,
4703,
1749,
14,
873,
64,
12,
2481,
12,
21526,
12,
486,
6,
201,
198,
201,
198,
324,
41783,
28,
6371,
13,
33491,
10786,
5450,
1378,
385,
12,
22583,
... | 2.138889 | 72 |
from .docker_operator import *
from .subdag_operator import *
| [
6738,
764,
45986,
62,
46616,
1330,
1635,
198,
6738,
764,
7266,
67,
363,
62,
46616,
1330,
1635,
198
] | 3.444444 | 18 |
from .field import Field
from scipy import special
from .core import BeamMix, Phase, MultPhase, IntAttenuator, CircAperture, GaussHermite, GaussLaguerre, Interpol
from .misc import PI,Tilt, backward_compatible
import numpy as _np
def AiryBeam1D(Fin, x0 = 0.001, a = 100):
"""
*Creates a 1D nonspreading Airy beam.*
:math:`F(x,y,z=0) = Ai\\left(\\dfrac{x}{x_0}\\right)e^{ax}`
:param Fin: input field
:type Fin: Field
:param x0: scale x (default = 1*mm)
:type x0: int, float
:param a: degree of apodization x (default = 0.1/mm)
:type a: int, float
:return: output field (N x N square array of complex numbers).
:rtype: `LightPipes.field.Field`
:Example:
.. code-block::
from LightPipes import *
import matplotlib.pyplot as plt
import numpy as np
wavelength = 2.3*um
size = 30*mm
N = 500
N2=N//2
x0=0.3*mm
a=0.1/mm
dz=1.25*cm
NZ=200
F0=Begin(size,wavelength,N)
F0=AiryBeam1D(F0,x0=x0, a=a)
Ix=np.zeros(N)
for k in range(0,NZ):
F=Forvard(F0,dz*k)
I=Intensity(F)
Ix=np.vstack([Ix,I[N2]])
plt.figure(figsize = (12,5))
plt.imshow(Ix,
extent=[-size/2/mm, size/2/mm, 0, NZ*dz/cm],
aspect=0.08,
origin='lower',
cmap='jet',
)
plt.xlabel('x [mm]')
plt.ylabel('z [cm]')
s = r'LightPipes for Python' + '\\n'+ '1D Airy beam' + '\\n\\n'\\
r'$\\lambda = {:4.2f}$'.format(wavelength/um) + r' ${\\mu}m$' + '\\n'\\
r'$size = {:4.2f}$'.format(size/mm) + r' $mm$' + '\\n'\\
r'$N = {:4d}$'.format(N) + '\\n'\\
r'$x_0 = {:4.2f}$'.format(x0/mm) + r' $mm$' + '\\n'\\
r'$a = $' + '{:4.2f}'.format(a*mm) + r' $/mm$' + '\\n'\\
r'${\\copyright}$ Fred van Goor, May 2022'
plt.text(16, 50, s, bbox={'facecolor': 'white', 'pad': 5})
plt.show()
.. plot:: ./Examples/Commands/AiryBeam1D.py
ref: M. V. Berry and N. L. Balazs, “Nonspreading wave packets,” Am. J. Phys. 47, 264–267 (1979).
"""
Fout = Field.copy(Fin)
Y, X = Fout.mgrid_cartesian
Fout.field = special.airy(X/x0)[0]*_np.exp(X*a)
Fout._IsGauss=False
return Fout
def AiryBeam2D(Fin, x0 = 0.001, y0 = 0.001, a1 = 100, a2 = 100):
"""
*Creates a 2D nonspreading Airy beam.*
:math:`F(x,y,z=0) = Ai\\left(\\dfrac{x}{x_0}\\right)Ai\\left(\\dfrac{y}{y_0}\\right)e^{a_1x+a_2y}`
:param Fin: input field
:type Fin: Field
:param x0: scale x (default = 1*mm)
:type x0: int, float
:param y0: scale y (default = 1*mm)
:type y0: int, float
:param a1: degree of apodization x (default = 0.1/mm)
:type a1: int, float
:param a2: degree of apodization y (default = 0.1/mm)
:type a2: int, float
:return: output field (N x N square array of complex numbers).
:rtype: `LightPipes.field.Field`
:Example:
.. code-block::
from LightPipes import *
import matplotlib.pyplot as plt
import numpy as np
wavelength = 2.3*um
size = 30*mm
N = 500
x0=y0=1*mm
a1=a2=0.1/mm
z=900*cm
F0=Begin(size,wavelength,N)
F0=AiryBeam2D(F0,x0=x0, y0=y0, a1=a1, a2=a2)
F=Fresnel(F0,z)
I=Intensity(F)
plt.figure(figsize = (9,5))
plt.imshow(I,
extent=[-size/2/mm, size/2/mm, -size/2/mm, size/2/mm],
origin='lower',
cmap='jet',
)
plt.title('2D Airy beam')
plt.xlabel('x [mm]')
plt.ylabel('y [mm]')
s = r'LightPipes for Python' + '\\n'+ '2D Airy beam' + '\\n\\n'\\
r'$\\lambda = {:4.2f}$'.format(wavelength/um) + r' ${\\mu}m$' + '\\n'\\
r'$size = {:4.2f}$'.format(size/mm) + r' $mm$' + '\\n'\\
r'$N = {:4d}$'.format(N) + '\\n'\\
r'$x_0 = y_0 = {:4.2f}$'.format(x0/mm) + r' $mm$' + '\\n'\\
r'$a1 = a2 = $' + '{:4.2f}'.format(a1*mm) + r' $/mm$' + '\\n'\\
r'$z = $' + '{:4.2f}'.format(z/cm) + r' $cm$' + '\\n'\\
r'${\\copyright}$ Fred van Goor, May 2022'
plt.text(16, -10, s, bbox={'facecolor': 'white', 'pad': 5})
plt.show()
.. plot:: ./Examples/Commands/AiryBeam2D.py
ref: M. V. Berry and N. L. Balazs, “Nonspreading wave packets,” Am. J. Phys. 47, 264–267 (1979).
.. seealso::
* :ref:`Examples: Non-diffracting Airy beams<Generation of a 2-dimensional Airy beam from a Gaussian laser beam.>`
"""
Fout = Field.copy(Fin)
Y, X = Fout.mgrid_cartesian
Fout.field = special.airy(X/x0)[0]*special.airy(Y/y0)[0]*_np.exp(X*a1)*_np.exp(Y*a2)
Fout._IsGauss=False
return Fout
@backward_compatible
def PointSource(Fin, x=0.0, y=0.0):
"""
*Creates a point source.*
:param Fin: input field
:type Fin: Field
:param x: x-position of the point source (default = 0.0)
:type x: int, float
:param y: y-position of the point source (default = 0.0)
:type y: int, float
:return: output field (N x N square array of complex numbers).
:rtype: `LightPipes.field.Field`
:Example:
>>> F = PointSource(F) # point source at center of the grid
>>> F = PointSource(F, x = 5*mm) # point source at x=5mm, y=0.0
>>> F = PointSource(F, 5*mm, 0.0) # Idem
"""
Fout = Field.copy(Fin)
if abs(x) >= Fin.siz/2 or abs(y) >= Fin.siz/2:
raise ValueError(
'error in PointSource: x and y must be inside grid size, between: {:4.4f} and {:4.4f}, got: x = {:4.4f}, y = {:4.4f}'.format(-Fin.siz/2,Fin.siz/2,x,y))
Fout=IntAttenuator(0,Fin)
nx = int(Fin.N * (0.5 + x / Fin.siz))
ny = int(Fin.N * (0.5 + y / Fin.siz))
Fout.field[ny, nx] = 1.0
Fout._IsGauss=False
return Fout
def PlaneWave(Fin, w, tx=0.0, ty=0.0, x_shift=0.0, y_shift=0.0):
"""
*Creates a (circular) plane wavefront.*
:param Fin: input field
:type Fin: Field
:param w: diameter of the plane wave
:param tx: tilt in radiants (default = 0.0)
:type tx: int, float
:param ty: tilt in radiants (default = 0.0)
:type ty: int, float
:param x_shift: shift in x direction (default = 0.0)
:type x_shift: int, float
:param y_shift: shift in y direction (default = 0.0)
:type y_shift: int, float
:return: output field (N x N square array of complex numbers).
:rtype: `LightPipes.field.Field`
:Example:
>>> F = PlaneWave(F, w = 2*mm) # plane wave with diameter of 2 mm at center of the grid
>>> F = PlaneWave(F, w = 2*mm, x = 5*mm) # Idem at x=5mm, y=0.0
>>> F = PlaneWave(F, w = 2*mm, x = 5*mm, ty = 1.0*mrad) # Idem at x=5mm, y=0.0, tilted 1.0 mrad
>>> F = PlaneWave(F, 2*mm, 5*mm, 0.0, 1.0*mrad) # Idem
"""
Fout = Field.copy(Fin)
Fout=CircAperture(Fout, w/2, x_shift, y_shift )
Fout=Tilt(Fout, tx, ty)
Fout._IsGauss=False
return Fout
@backward_compatible
def GaussBeam( Fin, w0, n=0, m=0, x_shift=0, y_shift=0, tx=0, ty=0, doughnut=False, LG=False):
"""
*Creates a Gaussian beam in its waist.*
:param Fin: input field
:type Fin: Field
:param w0: size of the Gauss waist
:param x_shift: shift in x direction (default = 0.0)
:type x_shift: int, float
:param y_shift: shift in y direction (default = 0.0)
:type y_shift: int, float
:param tx: tilt in radiants (default = 0.0)
:type tx: int, float
:param ty: tilt in radiants (default = 0.0)
:type ty: int, float
:param doughnut: if True a dougnut mode is generated (default = False)
:type doughnut: bool
:param LG: if True a (n,m) Laguerre-Gauss mode is generated, if False a Hermite Gauss mode (default = False)
:type LG: bool
:return: output field (N x N square array of complex numbers).
:rtype: `LightPipes.field.Field`
:Example:
>>> w0 = 3*mm
>>> F=GaussBeam(w0,F) #TEM0,0 Hermite Gauss beam with size (radius) 3 mm in center of the grid
>>> F=GaussBeam(w0,F,LG=True,n=2,m=5) # LG2,5 Laguerre Gauss beam
>>> F=GaussBeam(w0,F,doughnut=True,m=1) # LG0,1* doughnut beam
>>> F=GaussBeam(w0,F,doughnut=True,m=1, tx = 1*mrad, x_shift = 1*mm) # idem, tilted and shifted
"""
Fout=Field.copy(Fin)
if not doughnut:
if LG:
Fout = GaussLaguerre(Fin, w0, p=n, l=m, A=1.0)
else:
Fout = GaussHermite(Fin, w0, n ,m, A=1.0)
else:
if m==0:
m=1
# alternative: raise error
#raise ValueError(
# 'm cannot be zero for the doughnut mode')
Fout = GaussLaguerre(Fin, w0, p=n, l=m, A = 1.0 )
Fout = Interpol( Fout, Fout.siz, Fout.N, 0, 0, 360 / (4 * m), 1)
Fout = MultPhase(Fout, PI/2 )
Fout = BeamMix(GaussLaguerre(Fin, w0, p=n, l=m, A=1 ), Fout)
Fout = Interpol(Fout, Fin.siz, Fin.N, x_shift, y_shift, 0, 1 )
Fout = Tilt(Fout, tx, ty )
if not LG and not doughnut and tx == 0.0 and ty == 0.0 and x_shift == 0.0 and y_shift == 0.0:
Fout._IsGauss = True #analytical propagation is possible using ABCD matrices
Fout._q = -1j* _np.pi*w0*w0/Fout.lam
Fout._w0 = w0
Fout._z = 0.0
Fout._A = 1.0
Fout._n = n
Fout._m = m
return Fout
| [
6738,
764,
3245,
1330,
7663,
198,
6738,
629,
541,
88,
1330,
2041,
198,
6738,
764,
7295,
1330,
25855,
35608,
11,
18983,
11,
7854,
35645,
11,
2558,
8086,
268,
84,
1352,
11,
7672,
32,
27286,
11,
12822,
1046,
48523,
578,
11,
12822,
1046,
... | 1.922658 | 4,952 |
from django.urls import path
from ravens_metadata_apps.content_pages import views
urlpatterns = (
path('monitoring/alive',
views.view_monitoring_alive,
name='view_monitoring_alive'),
path('test/err-500',
views.view_err_500_test,
name='view_err_500_test'),
path('test/err-400',
views.view_err_404_test,
name='view_err_404_test'),
# homepage/ splash page
#
path('',
views.view_homepage,
name='view_homepage'),
)
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
6738,
37735,
82,
62,
38993,
62,
18211,
13,
11299,
62,
31126,
1330,
5009,
198,
198,
6371,
33279,
82,
796,
357,
628,
220,
220,
220,
3108,
10786,
41143,
278,
14,
282,
425,
3256,
198,
2... | 2.174468 | 235 |
"""Post data to S3."""
import boto3
import os
import pickle
from src.config import CACHE_PATH, FINDINGS_S3_BUCKET
from src.single_layer_network import list_findings
from src.training_data import load_all_training_tiles, tag_with_locations
from src.training_visualization import render_results_for_analysis
def post_findings_to_s3(raster_data_paths, model, training_info, bands, render_results):
"""Aggregate findings from all NAIPs into a pickled list, post to S3."""
findings = []
for path in raster_data_paths:
labels, images = load_all_training_tiles(path, bands)
if len(labels) == 0 or len(images) == 0:
print("WARNING, there is a borked naip image file")
continue
false_positives, fp_images = list_findings(labels, images, model)
path_parts = path.split('/')
filename = path_parts[len(path_parts) - 1]
print("FINDINGS: {} false pos of {} tiles, from {}".format(
len(false_positives), len(images), filename))
if render_results:
# render JPEGs showing findings
render_results_for_analysis([path], false_positives, fp_images, training_info['bands'],
training_info['tile_size'])
# combine findings for all NAIP images analyzedfor the region
[findings.append(f) for f in tag_with_locations(fp_images, false_positives,
training_info['tile_size'],
training_info['naip_state'])]
# dump combined findings to disk as a pickle
try:
os.mkdir(CACHE_PATH + training_info['naip_state'])
except:
pass
naip_path_in_cache_dir = training_info['naip_state'] + '/' + 'findings.pickle'
local_path = CACHE_PATH + naip_path_in_cache_dir
with open(local_path, 'w') as outfile:
pickle.dump(findings, outfile)
# push pickle to S3
s3_client = boto3.client('s3')
s3_client.upload_file(local_path, FINDINGS_S3_BUCKET, naip_path_in_cache_dir)
| [
37811,
6307,
1366,
284,
311,
18,
526,
15931,
198,
198,
11748,
275,
2069,
18,
198,
11748,
28686,
198,
11748,
2298,
293,
198,
198,
6738,
12351,
13,
11250,
1330,
327,
2246,
13909,
62,
34219,
11,
376,
12115,
20754,
62,
50,
18,
62,
33,
1... | 2.253521 | 923 |
import numpy as np
import matplotlib.pyplot as plt
import math
cmap = plt.cm.viridis
| [
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
10688,
198,
198,
66,
8899,
796,
458,
83,
13,
11215,
13,
37040,
29207,
198
] | 2.636364 | 33 |
'''
Módulo para mandar a llamar las funciones de la tarea de Pruebas estadísticas.
'''
import os
import pathlib
from questionary import text, select, Choice
from validaciones import is_valid_chi
import abajo_arriba
import frecuencias
from ruta import get_file_path
def read_chi():
'''
Pide al usuario ingresar un valor para chi cuadrada.
'''
chi = text(
'Ingrese valor para chi cuadrada:',
validate=is_valid_chi,
qmark='>'
).ask()
return float(chi)
def main():
'''
Corre el programa de Pruebas estadísticas.
'''
# ruta del archivo CSV que usa el programa
csv_file = pathlib.Path(get_file_path('Pruebas.csv'))
# si no existe el archivo en el directorio
if not csv_file.exists():
print("Tiene que existir un archivo llamado 'Pruebas.csv' en el mismo directorio para usar el programa.")
# salir del programa
return
menu = select(
'Seleccione la prueba estadística a usar:',
[
Choice('Abajo y arriba', 1),
Choice('Frecuencias', 2),
Choice('Salir', 0)
],
qmark='*'
).ask()
# abajo y arriba
if menu == 1:
chi = read_chi()
abajo_arriba.inicio(chi)
# frecuencias
elif menu == 2:
chi = read_chi()
frecuencias.main(chi)
main()
print()
os.system('pause')
| [
7061,
6,
198,
44,
10205,
67,
43348,
31215,
6855,
283,
257,
32660,
39236,
39990,
25439,
295,
274,
390,
8591,
256,
20337,
390,
1736,
518,
12093,
1556,
324,
8836,
11268,
292,
13,
198,
7061,
6,
198,
198,
11748,
28686,
198,
11748,
3108,
80... | 2.15674 | 638 |
def update_gs_range(vals, sheet, cell_range):
"""Update range of cells in a google sheets sheet
Args:
vals (list:list:str): List of lists of movie attributes [[title,year,cover_url],...]
sheet (<class Sheet>): Google Sheets sheet object
cell_range (str): cell range in [A-Z][int]:[A-Z][int] style
"""
sheet.spreadsheet.values_update(
f'{sheet.title}!{cell_range}',
params={'valueInputOption': 'RAW'},
body={'values': vals}) | [
4299,
4296,
62,
14542,
62,
9521,
7,
12786,
11,
9629,
11,
2685,
62,
9521,
2599,
198,
197,
37811,
10260,
2837,
286,
4778,
287,
257,
23645,
15747,
9629,
628,
197,
42035,
25,
198,
197,
197,
12786,
357,
4868,
25,
4868,
25,
2536,
2599,
73... | 2.8 | 160 |
import logging
from django.core.management.base import BaseCommand
from feedreader.models import Outline
from feedreader.functions import get_unread_count
logger = logging.getLogger(__name__)
| [
11748,
18931,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
8692,
1330,
7308,
21575,
198,
198,
6738,
3745,
46862,
13,
27530,
1330,
3806,
1370,
198,
6738,
3745,
46862,
13,
12543,
2733,
1330,
651,
62,
403,
961,
62,
9127,
198,
1... | 3.5 | 56 |
import matplotlib.pyplot as plt
import networkx as nx
import pydot
from networkx.drawing.nx_pydot import graphviz_layout
G = nx.path_graph(8)
nx.draw(G)
plt.show()
G = nx.cycle_graph(24)
pos = nx.spring_layout(G, iterations=200)
nx.draw(G, pos, node_color=range(24), node_size=800, cmap=plt.cm.Blues)
plt.show()
G = nx.star_graph(20)
pos = nx.spring_layout(G)
colors = range(20)
nx.draw(G, pos, node_color='#A0CBE2', edge_color=colors,
width=4, edge_cmap=plt.cm.Blues, with_labels=False)
plt.show()
G = nx.balanced_tree(3, 5)
pos = graphviz_layout(G, prog='twopi', args='')
plt.figure(figsize=(8, 8))
nx.draw(G, pos, node_size=20, alpha=0.5, node_color="blue", with_labels=False)
plt.axis('equal')
plt.show() | [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
3127,
87,
355,
299,
87,
198,
198,
11748,
279,
5173,
313,
198,
6738,
3127,
87,
13,
19334,
278,
13,
77,
87,
62,
79,
5173,
313,
1330,
4823,
85,
528,
62,
39786,
198,... | 2.139466 | 337 |
terms = 11
result = list(map(lambda x: 2 ** x, range(terms)))
print("The total terms are:",terms)
for i in range(terms):
print("2 raised to power",i,"is",result[i]) | [
38707,
796,
1367,
198,
198,
20274,
796,
1351,
7,
8899,
7,
50033,
2124,
25,
362,
12429,
2124,
11,
2837,
7,
38707,
22305,
198,
198,
4798,
7203,
464,
2472,
2846,
389,
25,
1600,
38707,
8,
198,
1640,
1312,
287,
2837,
7,
38707,
2599,
198,... | 2.816667 | 60 |
#Cyber Security Engineer
#Offensive Specialist
import base64
import os
import sys
#The commented one is bash script to convert shellcode into raw binary formate.
#You just need to change the input file and then run second command to convert into binary form.
#This script will run on linux distributions.
#Commented script will convert C format shellcode into raw form and if you want to convert C# shellcode into raw just follow two simple steps.
#STEP1 ----> Replace '0x' with ''
#STEP2 ----> Replace ',' with ''
#Now Run just second command.....................
"""
#!/bin/bash
echo "[>] Parsing Input File"
cat $INPUT |grep '"' |tr -d " " |tr -d "\n" |sed 's/[\"x.;(){}]//g' >> Parsed.txt
echo "[>] Pipe output to xxd"
xxd -r -p Parsed.txt Converted.bin
echo "[>] Clean up"
echo "[>] Done!!"
"""
#Now this part of code will convert raw form of shellcode into C,C#,Base64 form.
#Just run the code and give binary file as an input.
INPUT_FILE = sys.argv[1]
try:
with open(INPUT_FILE, 'rb') as shellcode:
shell_D = shellcode.read()
except:
print("File argument needed! %s <raw payload file>" % sys.argv[0])
shell_coded = ''
for byte in shell_D:
shell_coded += "\\x" + hex(byte)[2:].zfill(2)
C_Sharp = "0" + ",0".join(shell_coded.split("\\")[1:])
encode = base64.b64encode(C_Sharp.encode())
with open('All_TYEPES_SHELLCODES.txt', 'w') as out:
out.write("\n\nC shellcode format:\n\n")
out.write(shell_coded)
out.write("\n\nC# format shellcode:\n\n")
out.write(C_Sharp)
out.write("\n\nBase64 Encoded shellcode:\n\n")
out.write(encode.decode('ascii'))
out.write("\n")
| [
2,
20418,
527,
4765,
23164,
201,
198,
2,
9362,
2021,
36816,
201,
198,
201,
198,
201,
198,
11748,
2779,
2414,
201,
198,
11748,
28686,
201,
198,
11748,
25064,
201,
198,
201,
198,
2,
464,
16476,
530,
318,
27334,
4226,
284,
10385,
7582,
... | 2.599071 | 646 |
import os
import click
from flask import Flask, abort, jsonify, request
from flask_cors import CORS
from flask_jwt_extended import JWTManager, create_access_token
from flask_ldap3_login import LDAP3LoginManager
app = Flask(__name__)
CORS(app)
app.config.from_envvar("FLASK_CONFIG_FILE")
app.config.from_object(Config)
# Initialise the JSON Web Token Manager.
jwt = JWTManager()
jwt.init_app(app)
# Initialise the ldap manager using the settings read into the flask app.
ldap_manager = LDAP3LoginManager(app)
@app.route("/")
@app.route("/public_key")
@app.route("/token", methods=["POST"])
#############################################################################
# Command line helper utilities.
#############################################################################
@app.cli.command()
@click.argument('username')
@click.option("--last-forever", is_flag=True)
def generate_token(username, last_forever):
"""Generate token."""
if last_forever:
token = create_access_token(identity=username, expires_delta=False)
else:
token = create_access_token(identity=username)
print(token)
@app.cli.command()
@click.argument('username')
@click.option('--password', prompt=True, hide_input=True)
def test_authentication(username, password):
"""Test authentication."""
ldap_response = ldap_manager.authenticate(username, password)
print(ldap_response.status)
| [
11748,
28686,
198,
198,
11748,
3904,
198,
198,
6738,
42903,
1330,
46947,
11,
15614,
11,
33918,
1958,
11,
2581,
198,
6738,
42903,
62,
66,
669,
1330,
327,
20673,
198,
6738,
42903,
62,
73,
46569,
62,
2302,
1631,
1330,
449,
39386,
13511,
... | 3.13245 | 453 |
import argparse
import numpy as np
import torch
import utils.stream as s
def get_arguments():
"""Gets arguments from the command line.
Returns:
A parser with the input arguments.
"""
# Creates the ArgumentParser
parser = argparse.ArgumentParser(
usage='Reconstructs an RBM with linear combination of original and sampled weights.')
# Adds a dataset argument with pre-defined choices
parser.add_argument('dataset', help='Dataset identifier', choices=[
'mnist', 'fmnist', 'kmnist'])
# Adds an identifier argument to the desired pre-trained model path
parser.add_argument(
'input_model', help='Input name for the pre-trained RBM', type=str)
# Adds an identifier argument to the desired sampled weights file
parser.add_argument(
'input_sampled', help='Input name for the sampled weight file', type=str)
# Adds an identifier argument to the desired file
parser.add_argument(
'-alpha', help='Constant used to calculate the linear combination between weights', type=float, default=0.01)
return parser.parse_args()
if __name__ == '__main__':
# Gathers the input arguments
args = get_arguments()
# Gathering variables from arguments
dataset = args.dataset
input_model = args.input_model
input_sampled = args.input_sampled
alpha = args.alpha
# Loads the testing data
_, _, test = s.load_dataset(name=dataset)
# Loads the pre-trained model
model = torch.load(f'models/{input_model}.pth')
# Loading original and sampled weights
W_sampled = np.load(f'weights/{input_sampled}.npy')
# Reshaping weights to correct dimension
W_sampled = np.reshape(W_sampled, [W_sampled.shape[0], model.n_visible, model.n_hidden])
# Resetting biases for fair comparison
model.a = torch.nn.Parameter(torch.zeros(model.n_visible))
model.b = torch.nn.Parameter(torch.zeros(model.n_hidden))
# For every sampled weight
for i in range(W_sampled.shape[0]):
# Applying linear combination of original and sampled weights as new weights
model.W = torch.nn.Parameter((1 - alpha) * model.W + alpha * torch.from_numpy(W_sampled[i]).to(model.device))
# Checking model device type
if model.device == 'cuda':
# Applying its parameters as cuda again
model = model.cuda()
# Reconstructs an RBM
mse, pl = model.reconstruct(test)
| [
11748,
1822,
29572,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
198,
11748,
3384,
4487,
13,
5532,
355,
264,
628,
198,
4299,
651,
62,
853,
2886,
33529,
198,
220,
220,
220,
37227,
38,
1039,
7159,
422,
262,
3141,
162... | 2.778652 | 890 |
######## Snakemake header ########
import sys; sys.path.append("/home/cmb-panasas2/skchoudh/software_frozen/anaconda27/envs/riboraptor/lib/python3.5/site-packages"); import pickle; snakemake = pickle.loads(b'\x80\x03csnakemake.script\nSnakemake\nq\x00)\x81q\x01}q\x02(X\x04\x00\x00\x00ruleq\x03X\n\x00\x00\x00merge_bamsq\x04X\x06\x00\x00\x00paramsq\x05csnakemake.io\nParams\nq\x06)\x81q\x07X\x04\x00\x00\x00/tmpq\x08a}q\t(X\x07\x00\x00\x00tmp_dirq\nh\x08X\x06\x00\x00\x00_namesq\x0b}q\x0ch\nK\x00N\x86q\rsubX\x05\x00\x00\x00inputq\x0ecsnakemake.io\nInputFiles\nq\x0f)\x81q\x10(X\x17\x00\x00\x00bams_srr/SRR1062555.bamq\x11X\x17\x00\x00\x00bams_srr/SRR1062556.bamq\x12X\x17\x00\x00\x00bams_srr/SRR1062557.bamq\x13X\x17\x00\x00\x00bams_srr/SRR1062558.bamq\x14X\x17\x00\x00\x00bams_srr/SRR1062559.bamq\x15X\x17\x00\x00\x00bams_srr/SRR1062560.bamq\x16X\x17\x00\x00\x00bams_srr/SRR1062561.bamq\x17X\x17\x00\x00\x00bams_srr/SRR1062562.bamq\x18X\x17\x00\x00\x00bams_srr/SRR1062563.bamq\x19X\x17\x00\x00\x00bams_srr/SRR1062564.bamq\x1aX\x17\x00\x00\x00bams_srr/SRR1062565.bamq\x1bX\x17\x00\x00\x00bams_srr/SRR1062566.bamq\x1cX\x17\x00\x00\x00bams_srr/SRR1062567.bamq\x1dX\x17\x00\x00\x00bams_srr/SRR1062568.bamq\x1eX\x17\x00\x00\x00bams_srr/SRR1062569.bamq\x1fX\x17\x00\x00\x00bams_srr/SRR1062570.bamq X\x17\x00\x00\x00bams_srr/SRR1062571.bamq!X\x17\x00\x00\x00bams_srr/SRR1062572.bamq"X\x17\x00\x00\x00bams_srr/SRR1062573.bamq#X\x17\x00\x00\x00bams_srr/SRR1062574.bamq$X\x17\x00\x00\x00bams_srr/SRR1062575.bamq%X\x17\x00\x00\x00bams_srr/SRR1062576.bamq&X\x17\x00\x00\x00bams_srr/SRR1062577.bamq\'X\x17\x00\x00\x00bams_srr/SRR1062578.bamq(X\x17\x00\x00\x00bams_srr/SRR1062579.bamq)X\x17\x00\x00\x00bams_srr/SRR1062580.bamq*X\x17\x00\x00\x00bams_srr/SRR1062581.bamq+X\x17\x00\x00\x00bams_srr/SRR1062582.bamq,X\x17\x00\x00\x00bams_srr/SRR1062583.bamq-X\x17\x00\x00\x00bams_srr/SRR1062584.bamq.X\x17\x00\x00\x00bams_srr/SRR1062585.bamq/X\x17\x00\x00\x00bams_srr/SRR1062586.bamq0X\x17\x00\x00\x00bams_srr/SRR1062587.bamq1X\x17\x00\x00\x00bams_srr/SRR1062588.bamq2X\x17\x00\x00\x00bams_srr/SRR1062589.bamq3X\x17\x00\x00\x00bams_srr/SRR1062590.bamq4X\x17\x00\x00\x00bams_srr/SRR1062591.bamq5X\x17\x00\x00\x00bams_srr/SRR1062592.bamq6X\x17\x00\x00\x00bams_srr/SRR1062593.bamq7X\x17\x00\x00\x00bams_srr/SRR1062594.bamq8X\x17\x00\x00\x00bams_srr/SRR1062595.bamq9X\x17\x00\x00\x00bams_srr/SRR1062596.bamq:X\x17\x00\x00\x00bams_srr/SRR1062597.bamq;X\x17\x00\x00\x00bams_srr/SRR1062598.bamq<X\x17\x00\x00\x00bams_srr/SRR1062599.bamq=X\x17\x00\x00\x00bams_srr/SRR1062600.bamq>X\x17\x00\x00\x00bams_srr/SRR1062601.bamq?X\x17\x00\x00\x00bams_srr/SRR1062602.bamq@X\x17\x00\x00\x00bams_srr/SRR1062603.bamqAX\x17\x00\x00\x00bams_srr/SRR1062604.bamqBX\x17\x00\x00\x00bams_srr/SRR1062605.bamqCX\x17\x00\x00\x00bams_srr/SRR1062606.bamqDX\x17\x00\x00\x00bams_srr/SRR1062607.bamqEX\x17\x00\x00\x00bams_srr/SRR1062608.bamqFX\x17\x00\x00\x00bams_srr/SRR1062609.bamqGX\x17\x00\x00\x00bams_srr/SRR1062610.bamqHX\x17\x00\x00\x00bams_srr/SRR1062611.bamqIX\x17\x00\x00\x00bams_srr/SRR1062612.bamqJX\x17\x00\x00\x00bams_srr/SRR1062613.bamqKX\x17\x00\x00\x00bams_srr/SRR1062614.bamqLX\x17\x00\x00\x00bams_srr/SRR1062615.bamqMX\x17\x00\x00\x00bams_srr/SRR1062616.bamqNX\x17\x00\x00\x00bams_srr/SRR1062617.bamqOX\x17\x00\x00\x00bams_srr/SRR1062618.bamqPX\x17\x00\x00\x00bams_srr/SRR1062619.bamqQX\x17\x00\x00\x00bams_srr/SRR1062620.bamqRX\x17\x00\x00\x00bams_srr/SRR1062621.bamqSX\x17\x00\x00\x00bams_srr/SRR1062622.bamqTX\x17\x00\x00\x00bams_srr/SRR1062623.bamqUX\x17\x00\x00\x00bams_srr/SRR1062624.bamqVX\x17\x00\x00\x00bams_srr/SRR1062625.bamqWX\x17\x00\x00\x00bams_srr/SRR1062626.bamqXX\x17\x00\x00\x00bams_srr/SRR1062627.bamqYX\x17\x00\x00\x00bams_srr/SRR1062628.bamqZX\x17\x00\x00\x00bams_srr/SRR1062629.bamq[X\x17\x00\x00\x00bams_srr/SRR1062630.bamq\\X\x17\x00\x00\x00bams_srr/SRR1062631.bamq]X\x17\x00\x00\x00bams_srr/SRR1062632.bamq^X\x17\x00\x00\x00bams_srr/SRR1062633.bamq_X\x17\x00\x00\x00bams_srr/SRR1062634.bamq`X\x17\x00\x00\x00bams_srr/SRR1062635.bamqaX\x17\x00\x00\x00bams_srr/SRR1062636.bamqbX\x17\x00\x00\x00bams_srr/SRR1062637.bamqcX\x17\x00\x00\x00bams_srr/SRR1062638.bamqde}qeh\x0b}qfsbX\t\x00\x00\x00wildcardsqgcsnakemake.io\nWildcards\nqh)\x81qiX\t\x00\x00\x00SRX399823qja}qk(X\x06\x00\x00\x00sampleqlhjh\x0b}qmX\x06\x00\x00\x00sampleqnK\x00N\x86qosubX\x06\x00\x00\x00configqp}qqX\x0b\x00\x00\x00config_pathqrX\x1b\x00\x00\x00configs/GRCz10_SRP034750.pyqssX\x06\x00\x00\x00outputqtcsnakemake.io\nOutputFiles\nqu)\x81qvX\x12\x00\x00\x00bams/SRX399823.bamqwa}qxh\x0b}qysbX\x03\x00\x00\x00logqzcsnakemake.io\nLog\nq{)\x81q|}q}h\x0b}q~sbX\x07\x00\x00\x00threadsq\x7fK\x01X\t\x00\x00\x00resourcesq\x80csnakemake.io\nResources\nq\x81)\x81q\x82(K\x01K\x01e}q\x83(X\x06\x00\x00\x00_nodesq\x84K\x01X\x06\x00\x00\x00_coresq\x85K\x01h\x0b}q\x86(h\x84K\x00N\x86q\x87h\x85K\x01N\x86q\x88uubub.'); from snakemake.logging import logger; logger.printshellcmds = True
######## Original script #########
import os
import tempfile
from snakemake.shell import shell
if len(snakemake.input) > 1:
with tempfile.TemporaryDirectory(dir=snakemake.params.tmp_dir) as temp_dir:
cmd = ' -in '.join(snakemake.input)
shell(r'''bamtools merge -in {cmd} -out {snakemake.output}.unsorted \
&& samtools sort -@ {snakemake.threads} \
-T {temp_dir}/{snakemake.wildcards.sample}_merge_bam \
-o {snakemake.output} {snakemake.output}.unsorted \
&& samtools index {snakemake.output} \
&& yes | rm -rf {snakemake.output}.unsorted''')
elif len(snakemake.input) == 1:
source = os.path.abspath(str(snakemake.input[0]))
destination = os.path.abspath(str(snakemake.output))
shell('''cp {source} {destination} && cp {source}.bai {destination}.bai''')
| [
198,
7804,
16705,
15883,
13639,
46424,
21017,
198,
11748,
25064,
26,
25064,
13,
6978,
13,
33295,
7203,
14,
11195,
14,
66,
2022,
12,
6839,
292,
292,
17,
14,
8135,
354,
2778,
71,
14,
43776,
62,
69,
42005,
14,
272,
330,
13533,
1983,
14... | 1.588042 | 3,646 |
# Copyright (c) 2017-2021, Alibaba Group Holding Limited
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/python
# -*- coding: utf-8 -*-
import signal
import os
import sys
import subprocess
import time
import logging
import logging.config
# start_pfsd.sh -p 1-1 -w 32 -s 5 -a /dev/shm/pfsd
# -p is required, others are optional
# 必须前台运行
entrypoint = "/usr/local/polarstore/pfsd/bin/start_pfsd.sh -f "
mylog = None
share_dir = "/scripts"
# pre stop脚本生成这个文件,随后不久容器需要退出,所以不要拉起进程
# 当发起start_instance, pfsd可能需要1秒才感知,pfsd读取启动参数后,创建这个文件
# 而管控则会等待这个文件的创建,然后删除,同步返回start_instance成功
# pfsd容器入口
if __name__ == "__main__":
signal.signal(signal.SIGHUP, signal.SIG_IGN)
signal.signal(signal.SIGPIPE, signal.SIG_IGN)
pfsd_path = ["/var/run/pfs", "/var/run/pfsd", "/dev/shm/pfsd", "/var/log"]
for path in pfsd_path:
try:
os.mkdir(path)
os.chmod(path, 0777)
except OSError as e:
pass
mylog = logging.getLogger("pfsd_super")
try:
logging.config.fileConfig('log.conf')
except Exception as e:
logging.basicConfig(filename='/var/log/pfsd_super.log', level=logging.DEBUG, format='[%(asctime)s]%(levelname)s: %(message)s')
try:
os.remove(pfsd_exit_filename())
except OSError as e:
pass
err = start_pfsd()
sys.exit(err)
| [
2,
15069,
357,
66,
8,
2177,
12,
1238,
2481,
11,
41992,
4912,
31703,
15302,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
... | 2.054749 | 895 |
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
print(mnist.train.images.shape, mnist.train.labels.shape)
print(mnist.test.images.shape, mnist.test.labels.shape)
print(mnist.validation.images.shape,mnist.validation.labels.shape)
x = tf.placeholder(tf.float32,[None, 784])
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x,W)+b)
y_ = tf.placeholder(tf.float32,[None,10])
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_*tf.log(y),reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
init = tf.global_variables_initializer()
# prediction
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
with tf.Session() as sess:
sess.run(init)
for _ in range(1000):
batch_xs,batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x:batch_xs,y_:batch_ys})
# sess.run(correct_prediction)
res = sess.run(accuracy,feed_dict={x:mnist.test.images,y_:mnist.test.labels})
print(res) | [
6738,
11192,
273,
11125,
13,
1069,
12629,
13,
83,
44917,
82,
13,
10295,
396,
1330,
5128,
62,
7890,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
198,
10295,
396,
796,
5128,
62,
7890,
13,
961,
62,
7890,
62,
28709,
7203,
39764,
8808,
... | 2.329435 | 513 |
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(name='impro',
version='1.1',
description='Image analysis package',
author='Sebastian Reinhard',
author_email='sebastian.reinhard@stud-mail.uni-wuerzburg.de',
url='https://github.com/super-resolution',
packages=find_packages(),
package_data={'impro.render': ['shaders/*','shaders/raycast/*'], 'impro.analysis': ['cuda_files/*']},
include_package_data=True,
) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
198,
40406,
7,
3672,
11639,
32077,
3256,
198,
220,
220,
220,
220,
220,
2196,
11639,
16,
13,
16,
3256,
198,
220,
22... | 2.547368 | 190 |
"""Integration tests for the pyWriter project.
Test the conversion of the chapter descriptions.
For further information see https://github.com/peter88213/PyWriter
Published under the MIT License (https://opensource.org/licenses/mit-license.php)
"""
from pywriter.html.html_chapterdesc import HtmlChapterDesc
from pywriter.odt.odt_chapterdesc import OdtChapterDesc
from pywriter.test.import_export_test import ImportExportTest
import unittest
if __name__ == '__main__':
main()
| [
37811,
34500,
1358,
5254,
329,
262,
12972,
34379,
1628,
13,
201,
198,
201,
198,
14402,
262,
11315,
286,
262,
6843,
16969,
13,
201,
198,
201,
198,
1890,
2252,
1321,
766,
3740,
1378,
12567,
13,
785,
14,
79,
2357,
3459,
26427,
14,
20519,... | 3.211538 | 156 |
#!/usr/bin/env python
# Copyright 2018 National Technology & Engineering Solutions of Sandia, LLC
# (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S.
# Government retains certain rights in this software.
import sys, os
import textwrap
import re
try:
from argparse import ArgumentParser
from argparse import HelpFormatter
from argparse import RawDescriptionHelpFormatter
from argparse import SUPPRESS
class ParagraphHelpFormatter( HelpFormatter ):
"""
This formatter preserves paragraphs in description and epilog text,
and lines beginning with a ">" are written verbatim.
"""
def _fill_text(self, text, width, indent):
""
return format_text( text, width, indent )
except Exception:
"""
Adaptor class for python 2.6, 3.0, and 3.1. It implements a subset of
argparse functionality.
Notable differences to argparse:
- The returned object from parse_args() always contains the
non-option arguments as the 'args' data member.
- Positional arguments are not handled. If a positional argument
is specified, all the non-option arguments are stored in a
variable of that name.
- The help formatting has differences. Using ParagraphHelpFormatter
helps reduce the differences.
"""
import optparse
from optparse import HelpFormatter
from optparse import SUPPRESS_HELP as SUPPRESS
def set_num_columns_for_help_formatter( numcols=None ):
"""
The COLUMNS environment variable is used when formatting the help page.
If not already set, this function tries to determine the terminal width
and use that as the number of columns.
This function must be called before the ArgumentParser is constructed.
"""
if numcols:
os.environ['COLUMNS'] = str(numcols)
elif 'COLUMNS' not in os.environ:
ncol = get_terminal_width()
if ncol:
os.environ['COLUMNS'] = str(ncol)
###########################################################################
def format_text( text, width, indent ):
""
final = ''
cnt = 0
para = ''
numnewlines = 0
for line in text.strip().splitlines(True):
if line.strip():
cnt = 0
if line.startswith( '>' ):
if para:
final += '\n'*numnewlines
final += fill_paragraph( para, width, indent )
para = ''
numnewlines = 1
final += '\n'*numnewlines
final += line[1:].rstrip()
numnewlines = 1
else:
para += line
else:
cnt += 1
if cnt == 1 and para:
final += '\n'*numnewlines
final += fill_paragraph( para, width, indent )
para = ''
numnewlines = 2
if para:
final += '\n'*numnewlines
final += fill_paragraph( para, width, indent )
return final
whitespace_regex = re.compile(r'\s+')
def fill_paragraph( text, width, indent ):
""
text = whitespace_regex.sub(' ', text).strip()
return textwrap.fill( text, width, initial_indent=indent,
subsequent_indent=indent )
def get_terminal_width():
""
ncol = None
try:
ncol = os.get_terminal_size(0)[0]
except Exception:
try:
import fcntl, termios, struct
data = struct.pack( 'HHHH', 0, 0, 0, 0 )
data = fcntl.ioctl( 0, termios.TIOCGWINSZ, data )
h, w, hp, wp = struct.unpack( 'HHHH', data )
ncol = w
except Exception:
try:
import console
ncol = console.getTerminalSize()[0]
except Exception:
pass
return ncol
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
15069,
2864,
2351,
8987,
1222,
14044,
23555,
286,
3837,
544,
11,
11419,
198,
2,
357,
11251,
7597,
737,
4698,
262,
2846,
286,
17453,
5550,
12,
4535,
830,
2327,
1495,
351,
24563,... | 2.323407 | 1,679 |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 6 00:57:37 2019
@author: nov28
"""
from __future__ import division
import os
import sys
import numpy as np
import glob
import collections
from collections import Counter, defaultdict
import re
import json
import io
import codecs
if __name__=='__main__':
path=sys.argv[1]
#initialization
tags=[]
data=[]
vocab=[]
tagsdistinct=[]
fp=open(path, 'r',encoding='utf-8')
# fopen=fp.read()
lines = [line for line in fp.readlines()]
for l in lines:
#print(l)
l=l[:-1]
tokens=l.split(" ")
data.append(tokens)
#emis count, tag count
emiscount=Counter()
tagcount=Counter()
for dline in data:
tempt=[]
#k+=1
for t in dline:
revt=t[::-1]
sep=revt.index('/')
tag=revt[:sep][::-1]
word=revt[sep+1:][::-1]
tagcount[tag]+=1
tempt.append(tag)
if word not in vocab:
vocab.append(word)
if tag not in tagsdistinct:
#print(tag,'line ',k)
tagsdistinct.append(tag)
ind= word+' / '+ tag
emiscount[ind]+= 1
tags.append(tempt)
#emisprob
emisprob=defaultdict(float)
for key,val in emiscount.items():
temptag= key.split(' / ')[1]
emisprob[key]= float(val)/float(tagcount[temptag])
#print(temptag)
#first and last tags
firstags=defaultdict(float)
lastags=defaultdict(float)
#transany, trasncount
transanycount=Counter()
transcount=Counter()
n= len(data)
v=len(tagsdistinct)
for t in tags:
firstags[t[0]]+=1
lastags[t[-1]]+=1
for x in range(len(t)-1):
transanycount[t[x]] +=1
transcount[t[x]+','+t[x+1]] +=1
for t in tagsdistinct:
firstags[t]= float(firstags[t]+1)/float(n + v)
lastags[t]= float(lastags[t]+1)/float(n + v)
#transprob
transprob=defaultdict(float)
v=len(tagsdistinct)
for t1 in tagsdistinct:
for t2 in tagsdistinct:
newval=float(transcount[t1+','+t2] + 1)/float(transanycount[t1] + v)
transprob[t1+','+t2]=newval
with open('hmmmodel.txt','w+',encoding='utf8') as output:
output.write("All tags available:"+"\n")
for cl in tagsdistinct:
output.write(str(cl) + " ")
output.write("\n")
output.write("Vocab: "+"\n")
for v in vocab:
output.write(str(v) + " ")
output.write("\n")
output.write("Emission Probabilities "+"\n")
output.write(json.dumps(emisprob))
output.write("\n")
output.write("Transition Probabilities"+"\n")
output.write(json.dumps(transprob))
output.write("\n")
output.write("First Transition Probabilities...a(0,s)"+"\n")
output.write(json.dumps(firstags))
output.write("\n")
output.write("Last Transition Probabilities....a(s,qf)"+"\n")
output.write(json.dumps(lastags))
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
41972,
319,
3300,
1526,
220,
718,
3571,
25,
3553,
25,
2718,
13130,
201,
198,
201,
198,
31,
9800,
25,
645,
85,
2078,
201,
198,
37811,
201,
198,
... | 1.869914 | 1,745 |
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import numpy as np
import pytest
import torch
from ocpmodels.modules.evaluator import (
Evaluator,
cosine_similarity,
magnitude_error,
)
@pytest.fixture(scope="class")
@pytest.fixture(scope="class")
@pytest.fixture(scope="class")
@pytest.mark.usefixtures("load_evaluator_s2ef")
@pytest.mark.usefixtures("load_evaluator_is2rs")
@pytest.mark.usefixtures("load_evaluator_is2re")
| [
37811,
198,
15269,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
198,
198,
1212,
2723,
2438,
318,
11971,
739,
262,
17168,
5964,
1043,
287,
262,
198,
43,
2149,
24290,
2393,
287,
262,
6808,
8619,
286,
428,
2723,
5509,
13,
198,
... | 2.779904 | 209 |
# Example
import os
import cv2
import pathlib
import argparse
from PIL import Image
from torchvision import models
from torchvision import transforms
from torchvision.transforms import Resize, ToTensor, Normalize
from gradcam import GradCam
from misc import fuse_heatmap_image
class VGGGradCam(GradCam):
"""
Example given by demonstrating with a pretrained VGG19 model
"""
if __name__ == '__main__':
main()
| [
2,
17934,
198,
11748,
28686,
198,
11748,
269,
85,
17,
198,
11748,
3108,
8019,
198,
11748,
1822,
29572,
198,
198,
6738,
350,
4146,
1330,
7412,
198,
6738,
28034,
10178,
1330,
4981,
198,
6738,
28034,
10178,
1330,
31408,
198,
6738,
28034,
1... | 3.307692 | 130 |
import os
import cv2
import torch
from PIL import Image
from tqdm import tqdm
from .....trainer import Trainer
from .....trainer import TrainerCallback
from .....misc.toolkit import eval_context
@TrainerCallback.register("clip_vqgan_aligner")
__all__ = [
"CLIPWithVQGANAlignerCallback",
]
| [
11748,
28686,
198,
11748,
269,
85,
17,
198,
11748,
28034,
198,
198,
6738,
350,
4146,
1330,
7412,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
198,
6738,
11485,
986,
2213,
10613,
1330,
31924,
198,
6738,
11485,
986,
2213,
10613,
... | 2.99 | 100 |
"""Connections to gcloud datastore API servers."""
from gcloud import connection
from gcloud.datastore import datastore_v1_pb2 as datastore_pb
from gcloud.datastore import helpers
from gcloud.datastore.dataset import Dataset
class Connection(connection.Connection):
"""A connection to the Google Cloud Datastore via the Protobuf API.
This class should understand only the basic types (and protobufs)
in method arguments, however should be capable of returning advanced types.
:type credentials: :class:`oauth2client.client.OAuth2Credentials`
:param credentials: The OAuth2 Credentials to use for this connection.
"""
API_VERSION = 'v1beta2'
"""The version of the API, used in building the API call's URL."""
API_URL_TEMPLATE = ('{api_base}/datastore/{api_version}'
'/datasets/{dataset_id}/{method}')
"""A template for the URL of a particular API call."""
def _request(self, dataset_id, method, data):
"""Make a request over the Http transport to the Cloud Datastore API.
:type dataset_id: string
:param dataset_id: The ID of the dataset of which to make the request.
:type method: string
:param method: The API call method name (ie, ``runQuery``,
``lookup``, etc)
:type data: string
:param data: The data to send with the API call.
Typically this is a serialized Protobuf string.
:rtype: string
:returns: The string response content from the API call.
:raises: Exception if the response code is not 200 OK.
"""
headers = {
'Content-Type': 'application/x-protobuf',
'Content-Length': str(len(data)),
'User-Agent': self.USER_AGENT,
}
headers, content = self.http.request(
uri=self.build_api_url(dataset_id=dataset_id, method=method),
method='POST', headers=headers, body=data)
if headers['status'] != '200':
raise Exception('Request failed. Error was: %s' % content)
return content
def _rpc(self, dataset_id, method, request_pb, response_pb_cls):
""" Make a protobuf RPC request.
:type dataset_id: string
:param dataset_id: The ID of the dataset to connect to. This is
usually your project name in the cloud console.
:type method: string
:param method: The name of the method to invoke.
:type request_pb: :class:`google.protobuf.message.Message` instance
:param method: the protobuf instance representing the request.
:type response_pb_cls: a :class:`google.protobuf.message.Message'
subclass.
:param method: The class used to unmarshall the response protobuf.
"""
response = self._request(dataset_id=dataset_id, method=method,
data=request_pb.SerializeToString())
return response_pb_cls.FromString(response)
@classmethod
def build_api_url(cls, dataset_id, method, base_url=None,
api_version=None):
"""Construct the URL for a particular API call.
This method is used internally
to come up with the URL
to use when making RPCs
to the Cloud Datastore API.
:type dataset_id: string
:param dataset_id: The ID of the dataset to connect to. This is
usually your project name in the cloud console.
:type method: string
:param method: The API method to call (ie, runQuery, lookup, ...).
:type base_url: string
:param base_url: The base URL where the API lives.
You shouldn't have to provide this.
:type api_version: string
:param api_version: The version of the API to connect to.
You shouldn't have to provide this.
"""
return cls.API_URL_TEMPLATE.format(
api_base=(base_url or cls.API_BASE_URL),
api_version=(api_version or cls.API_VERSION),
dataset_id=dataset_id, method=method)
def transaction(self, transaction=connection.Connection._EMPTY):
"""Getter/setter for the connection's transaction object.
:type transaction: :class:`gcloud.datastore.transaction.Transaction`,
(setting), or omitted (getting).
:param transaction: The new transaction (if passed).
:rtype: :class:`gcloud.datastore.transaction.Transaction`, (getting)
or :class:`gcloud.datastore.connection.Connection` (setting)
:returns: the current transaction (getting) or self (setting).
"""
if transaction is self._EMPTY:
return self._current_transaction
else:
self._current_transaction = transaction
return self
def mutation(self):
"""Getter for mutation usable with current connection.
:rtype: :class:`gcloud.datastore.datastore_v1_pb2.Mutation`.
:returns: the mutation instance associated with the current transaction
(if one exists) or or a new mutation instance.
"""
if self.transaction():
return self.transaction().mutation()
else:
return datastore_pb.Mutation()
def dataset(self, *args, **kwargs):
"""Factory method for Dataset objects.
:param args: All args and kwargs will be passed along to the
:class:`gcloud.datastore.dataset.Dataset` initializer.
:rtype: :class:`gcloud.datastore.dataset.Dataset`
:returns: A dataset object that will use this connection as
its transport.
"""
kwargs['connection'] = self
return Dataset(*args, **kwargs)
def lookup(self, dataset_id, key_pbs):
"""Lookup keys from a dataset in the Cloud Datastore.
Maps the ``DatastoreService.Lookup`` protobuf RPC.
This method deals only with protobufs
(:class:`gcloud.datastore.datastore_v1_pb2.Key`
and
:class:`gcloud.datastore.datastore_v1_pb2.Entity`)
and is used under the hood for methods like
:func:`gcloud.datastore.dataset.Dataset.get_entity`:
>>> from gcloud import datastore
>>> from gcloud.datastore.key import Key
>>> connection = datastore.get_connection(email, key_path)
>>> dataset = connection.dataset('dataset-id')
>>> key = Key(dataset=dataset).kind('MyKind').id(1234)
Using the :class:`gcloud.datastore.dataset.Dataset` helper:
>>> dataset.get_entity(key)
<Entity object>
Using the ``connection`` class directly:
>>> connection.lookup('dataset-id', key.to_protobuf())
<Entity protobuf>
:type dataset_id: string
:param dataset_id: The dataset to look up the keys.
:type key_pbs: list of :class:`gcloud.datastore.datastore_v1_pb2.Key`
(or a single Key)
:param key_pbs: The key (or keys) to retrieve from the datastore.
:rtype: list of :class:`gcloud.datastore.datastore_v1_pb2.Entity`
(or a single Entity)
:returns: The entities corresponding to the keys provided.
If a single key was provided and no results matched,
this will return None.
If multiple keys were provided and no results matched,
this will return an empty list.
"""
lookup_request = datastore_pb.LookupRequest()
single_key = isinstance(key_pbs, datastore_pb.Key)
if single_key:
key_pbs = [key_pbs]
for key_pb in key_pbs:
lookup_request.key.add().CopyFrom(key_pb)
lookup_response = self._rpc(dataset_id, 'lookup', lookup_request,
datastore_pb.LookupResponse)
results = [result.entity for result in lookup_response.found]
if single_key:
if results:
return results[0]
else:
return None
return results
def run_query(self, dataset_id, query_pb, namespace=None):
"""Run a query on the Cloud Datastore.
Maps the ``DatastoreService.RunQuery`` protobuf RPC.
Given a Query protobuf,
sends a ``runQuery`` request to the Cloud Datastore API
and returns a list of entity protobufs matching the query.
You typically wouldn't use this method directly,
in favor of the :func:`gcloud.datastore.query.Query.fetch` method.
Under the hood, the :class:`gcloud.datastore.query.Query` class
uses this method to fetch data:
>>> from gcloud import datastore
>>> connection = datastore.get_connection(email, key_path)
>>> dataset = connection.dataset('dataset-id')
>>> query = dataset.query().kind('MyKind').filter('property =', 'val')
Using the `fetch`` method...
>>> query.fetch()
[<list of Entity unmarshalled from protobuf>]
>>> query.cursor()
<string containing cursor where fetch stopped>
Under the hood this is doing...
>>> connection.run_query('dataset-id', query.to_protobuf())
[<list of Entity Protobufs>], cursor, more_results, skipped_results
:type dataset_id: string
:param dataset_id: The ID of the dataset over which to run the query.
:type query_pb: :class:`gcloud.datastore.datastore_v1_pb2.Query`
:param query_pb: The Protobuf representing the query to run.
:type namespace: string
:param namespace: The namespace over which to run the query.
"""
request = datastore_pb.RunQueryRequest()
if namespace:
request.partition_id.namespace = namespace
request.query.CopyFrom(query_pb)
response = self._rpc(dataset_id, 'runQuery', request,
datastore_pb.RunQueryResponse)
return (
[e.entity for e in response.batch.entity_result],
response.batch.end_cursor,
response.batch.more_results,
response.batch.skipped_results,
)
def begin_transaction(self, dataset_id, serializable=False):
"""Begin a transaction.
Maps the ``DatastoreService.BeginTransaction`` protobuf RPC.
:type dataset_id: string
:param dataset_id: The dataset over which to execute the transaction.
"""
if self.transaction():
raise ValueError('Cannot start a transaction with another already '
'in progress.')
request = datastore_pb.BeginTransactionRequest()
if serializable:
request.isolation_level = (
datastore_pb.BeginTransactionRequest.SERIALIZABLE)
else:
request.isolation_level = (
datastore_pb.BeginTransactionRequest.SNAPSHOT)
response = self._rpc(dataset_id, 'beginTransaction', request,
datastore_pb.BeginTransactionResponse)
return response.transaction
def commit(self, dataset_id, mutation_pb):
"""Commit dataset mutations in context of current transation (if any).
Maps the ``DatastoreService.Commit`` protobuf RPC.
:type dataset_id: string
:param dataset_id: The dataset in which to perform the changes.
:type mutation_pb: :class:`gcloud.datastore.datastore_v1_pb2.Mutation`.
:param mutation_pb: The protobuf for the mutations being saved.
:rtype: :class:`gcloud.datastore.datastore_v1_pb2.MutationResult`.
:returns': the result protobuf for the mutation.
"""
request = datastore_pb.CommitRequest()
if self.transaction():
request.mode = datastore_pb.CommitRequest.TRANSACTIONAL
request.transaction = self.transaction().id()
else:
request.mode = datastore_pb.CommitRequest.NON_TRANSACTIONAL
request.mutation.CopyFrom(mutation_pb)
response = self._rpc(dataset_id, 'commit', request,
datastore_pb.CommitResponse)
return response.mutation_result
def rollback(self, dataset_id):
"""Rollback the connection's existing transaction.
Maps the ``DatastoreService.Rollback`` protobuf RPC.
Raises a ``ValueError``
if the connection isn't currently in a transaction.
:type dataset_id: string
:param dataset_id: The dataset to which the transaction belongs.
"""
if not self.transaction() or not self.transaction().id():
raise ValueError('No transaction to rollback.')
request = datastore_pb.RollbackRequest()
request.transaction = self.transaction().id()
# Nothing to do with this response, so just execute the method.
self._rpc(dataset_id, 'rollback', request,
datastore_pb.RollbackResponse)
def allocate_ids(self, dataset_id, key_pbs):
"""Obtain backend-generated IDs for a set of keys.
Maps the ``DatastoreService.AllocateIds`` protobuf RPC.
:type dataset_id: string
:param dataset_id: The dataset to which the transaction belongs.
:type key_pbs: list of :class:`gcloud.datastore.datastore_v1_pb2.Key`
:param key_pbs: The keys for which the backend should allocate IDs.
:rtype: list of :class:`gcloud.datastore.datastore_v1_pb2.Key`
:returns: An equal number of keys, with IDs filled in by the backend.
"""
request = datastore_pb.AllocateIdsRequest()
for key_pb in key_pbs:
request.key.add().CopyFrom(key_pb)
# Nothing to do with this response, so just execute the method.
response = self._rpc(dataset_id, 'allocateIds', request,
datastore_pb.AllocateIdsResponse)
return list(response.key)
def save_entity(self, dataset_id, key_pb, properties):
"""Save an entity to the Cloud Datastore with the provided properties.
.. note::
Any existing properties for the entity identified by 'key_pb'
will be replaced by those passed in 'properties'; properties
not passed in 'properties' no longer be set for the entity.
:type dataset_id: string
:param dataset_id: The dataset in which to save the entity.
:type key_pb: :class:`gcloud.datastore.datastore_v1_pb2.Key`
:param key_pb: The complete or partial key for the entity.
:type properties: dict
:param properties: The properties to store on the entity.
"""
mutation = self.mutation()
# If the Key is complete, we should upsert
# instead of using insert_auto_id.
path = key_pb.path_element[-1]
auto_id = not (path.HasField('id') or path.HasField('name'))
if auto_id:
insert = mutation.insert_auto_id.add()
else:
insert = mutation.upsert.add()
insert.key.CopyFrom(key_pb)
for name, value in properties.iteritems():
prop = insert.property.add()
# Set the name of the property.
prop.name = name
# Set the appropriate value.
helpers._set_protobuf_value(prop.value, value)
# If this is in a transaction, we should just return True. The
# transaction will handle assigning any keys as necessary.
if self.transaction():
return True
result = self.commit(dataset_id, mutation)
# If this was an auto-assigned ID, return the new Key.
if auto_id:
return result.insert_auto_id_key[0]
return True
def delete_entities(self, dataset_id, key_pbs):
"""Delete keys from a dataset in the Cloud Datastore.
This method deals only with
:class:`gcloud.datastore.datastore_v1_pb2.Key` protobufs
and not with any of the other abstractions.
For example, it's used under the hood in the
:func:`gcloud.datastore.entity.Entity.delete` method.
:type dataset_id: string
:param dataset_id: The dataset from which to delete the keys.
:type key_pbs: list of :class:`gcloud.datastore.datastore_v1_pb2.Key`
:param key_pbs: The keys to delete from the datastore.
:rtype: boolean (if in a transaction) or else
:class:`gcloud.datastore.datastore_v1_pb2.MutationResult`.
:returns: True
"""
mutation = self.mutation()
for key_pb in key_pbs:
delete = mutation.delete.add()
delete.CopyFrom(key_pb)
if not self.transaction():
self.commit(dataset_id, mutation)
return True
| [
37811,
13313,
507,
284,
308,
17721,
4818,
459,
382,
7824,
9597,
526,
15931,
198,
198,
6738,
308,
17721,
1330,
4637,
198,
6738,
308,
17721,
13,
19608,
459,
382,
1330,
4818,
459,
382,
62,
85,
16,
62,
40842,
17,
355,
4818,
459,
382,
62... | 2.356065 | 7,156 |
from django.db import models | [
6738,
42625,
14208,
13,
9945,
1330,
4981
] | 4 | 7 |
import MeCab
from collections import Counter
import re
import alkana
if __name__ == '__main__':
Shareka('これはテストです').dajarewake()
| [
11748,
2185,
34,
397,
198,
6738,
17268,
1330,
15034,
198,
11748,
302,
198,
11748,
32915,
2271,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
8734,
4914,
10786,
46036,
39258,
31676,
24336,
4... | 2.72 | 50 |
#-*- coding: utf-8 -*-
#Part of BrowseInfo. See LICENSE file for full copyright and licensing details.
from odoo import models, fields, api, _
from datetime import date,datetime
| [
2,
12,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
7841,
286,
44775,
12360,
13,
4091,
38559,
24290,
2393,
329,
1336,
6634,
290,
15665,
3307,
13,
198,
198,
6738,
16298,
2238,
1330,
4981,
11,
7032,
11,
40391,
11,
4808,
... | 3.333333 | 54 |
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
import os
from indra.util import unicode_strs
from indra.sources import bel
from indra.sources.bel.rdf_processor import BelRdfProcessor
from indra.statements import RegulateAmount, BioContext, RefContext
from nose.plugins.attrib import attr
from indra.tests.util import skip_if, IS_PY3
concept_prefix = 'http://www.openbel.org/bel/namespace//'
entity_prefix = 'http://www.openbel.org/bel/'
path_this = os.path.dirname(os.path.abspath(__file__))
test_rdf_nfkb = os.path.join(path_this, 'bel_rdfs', 'NFKB1_neighborhood.rdf')
test_rdf_myc = os.path.join(path_this, 'bel_rdfs', 'MYC_neighborhood.rdf')
@attr('webservice', 'slow')
@skip_if(not IS_PY3, 'Python 2 detected. Runtime may be excessive')
@attr('slow')
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
3601,
62,
8818,
11,
28000,
1098,
62,
17201,
874,
198,
6738,
3170,
1040,
1330,
8633,
11,
965,
198,
11748,
28686,
198,
6738,
773,
430,
13,
22602,
1330,
28000,
1098,
62,
2536,
82,
198,
... | 2.710526 | 304 |
import asyncio
from argparse import ArgumentParser
from ircrobots import ConnectionParams
from ircrobots.security import TLSVerifySHA512
from . import Bot
from .config import Config, load as config_load
from .database import Database
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("config")
args = parser.parse_args()
config = config_load(args.config)
asyncio.run(main(config))
| [
11748,
30351,
952,
198,
6738,
1822,
29572,
1330,
45751,
46677,
198,
198,
6738,
220,
1980,
22609,
1747,
1330,
26923,
10044,
4105,
198,
6738,
220,
1980,
22609,
1747,
13,
12961,
1330,
33855,
13414,
1958,
37596,
25836,
198,
198,
6738,
764,
13... | 3.233083 | 133 |
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import numpy as np
import os
from ngraph.util.persist import pickle_load, valid_path_append, fetch_file
import tarfile
class CIFAR10(object):
"""
CIFAR10 data set from https://www.cs.toronto.edu/~kriz/cifar.html
Arguments:
path (str): Local path to copy data files.
"""
def load_data(self):
"""
Fetch the CIFAR-10 dataset and load it into memory.
Arguments:
path (str, optional): Local directory in which to cache the raw
dataset. Defaults to current directory.
normalize (bool, optional): Whether to scale values between 0 and 1.
Defaults to True.
Returns:
tuple: Both training and test sets are returned.
"""
workdir, filepath = valid_path_append(self.path, '', self.filename)
if not os.path.exists(filepath):
fetch_file(self.url, self.filename, filepath, self.size)
batchdir = os.path.join(workdir, 'cifar-10-batches-py')
if not os.path.exists(os.path.join(batchdir, 'data_batch_1')):
assert os.path.exists(filepath), "Must have cifar-10-python.tar.gz"
with tarfile.open(filepath, 'r:gz') as f:
f.extractall(workdir)
train_batches = [os.path.join(batchdir, 'data_batch_' + str(i)) for i in range(1, 6)]
Xlist, ylist = [], []
for batch in train_batches:
with open(batch, 'rb') as f:
d = pickle_load(f)
Xlist.append(d['data'])
ylist.append(d['labels'])
X_train = np.vstack(Xlist).reshape(-1, 3, 32, 32)
y_train = np.vstack(ylist).ravel()
with open(os.path.join(batchdir, 'test_batch'), 'rb') as f:
d = pickle_load(f)
X_test, y_test = d['data'], d['labels']
X_test = X_test.reshape(-1, 3, 32, 32)
self.train_set = {'image': {'data': X_train,
'axes': ('N', 'C', 'H', 'W')},
'label': {'data': y_train,
'axes': ('N',)}}
self.valid_set = {'image': {'data': X_test,
'axes': ('N', 'C', 'H', 'W')},
'label': {'data': np.array(y_test),
'axes': ('N',)}}
return self.train_set, self.valid_set
| [
2,
41906,
17174,
46068,
1174,
198,
2,
15069,
2177,
12,
7908,
8180,
10501,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
... | 2.199861 | 1,441 |
from .adam import Adam
from .sgd import SGD, Momentum
from .adagrad import AdaGrad
from .adadelta import AdaDelta
from .rmsprop import RMSprop
from optim.optimizer import Optimizer
import copy
| [
6738,
764,
324,
321,
1330,
7244,
198,
6738,
764,
82,
21287,
1330,
26147,
35,
11,
29278,
388,
198,
6738,
764,
324,
363,
6335,
1330,
47395,
42731,
198,
6738,
764,
324,
324,
12514,
1330,
47395,
42430,
198,
6738,
764,
81,
907,
22930,
1330... | 3.327586 | 58 |
from larning.metaclass import CollectorType
from larning.testing import name
@name(CollectorType.__init__, 1, globals())
@name(CollectorType.__init__, 2, globals())
@name(CollectorType.__init__, 3, globals())
@name(CollectorType.__init__, 4, globals())
@name(CollectorType.__init__, 5, globals())
@name(CollectorType.clear, 1, globals())
| [
6738,
300,
4228,
13,
4164,
330,
31172,
1330,
17573,
6030,
198,
6738,
300,
4228,
13,
33407,
1330,
1438,
628,
198,
31,
3672,
7,
31337,
273,
6030,
13,
834,
15003,
834,
11,
352,
11,
15095,
874,
28955,
628,
198,
31,
3672,
7,
31337,
273,
... | 2.8 | 125 |
import sys
from PyQt5.QtWidgets import QApplication, QMainWindow
import os
current_dir = os.path.dirname(os.path.abspath(__file__))
os.chdir(current_dir)
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtWidgets import QWidget
from PyQt5.QtGui import QImage
from PyQt5.QtGui import QPixmap
from PyQt5.QtCore import QTimer
import torch
from torch.autograd import Variable
import cv2
from ASUM_GUI_final import *
import argparse
import numpy as np
import serial
import time
from data import BaseTransform, VOC_CLASSES as labelmap
# start/stop timer
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Single Shot MultiBox Detection")
parser.add_argument(
"--weights",
default="./models/SSD_sections_det.pth",
type=str,
help="Trained state_dict file path",
)
parser.add_argument(
"--cuda", default=False, type=bool, help="Use cuda in live demo"
)
parser.add_argument(
"--debug",
action="store_true",
default=False,
help="Debug when do not have the ASUM device",
)
args = parser.parse_args()
if not args.debug:
ser = serial.Serial("/dev/ttyUSB0", 9600, timeout=0.5) # 使用USB连接串行口
app = QApplication(sys.argv)
myWin = MyWindow(debug=args.debug)
myWin.show()
sys.exit(app.exec_())
| [
11748,
25064,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
54,
312,
11407,
1330,
1195,
23416,
11,
1195,
13383,
27703,
198,
11748,
28686,
198,
198,
14421,
62,
15908,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
397,
2777,... | 2.406082 | 559 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""Implements dataset functionality to be used ``habitat.EmbodiedTask``.
``habitat.core.dataset`` abstracts over a collection of
``habitat.core.Episode``. Each episode consists of a single instantiation
of a ``habitat.Agent`` inside ``habitat.Env``.
"""
import copy
import json
import random
from itertools import groupby
from typing import (
Any,
Callable,
Dict,
Generic,
Iterator,
List,
Optional,
TypeVar,
)
import attr
import numpy as np
from habitat.core.utils import not_none_validator
@attr.s(auto_attribs=True, kw_only=True)
class Episode:
r"""Base class for episode specification that includes initial position and
rotation of agent, scene id, episode. This information is provided by
a ``Dataset`` instance.
Args:
episode_id: id of episode in the dataset, usually episode number.
scene_id: id of scene in dataset.
start_position: list of length 3 for cartesian coordinates
(x, y, z).
start_rotation: list of length 4 for (x, y, z, w) elements
of unit quaternion (versor) representing 3D agent orientation
(https://en.wikipedia.org/wiki/Versor). The rotation specifying
the agent's orientation is relative to the world coordinate
axes.
"""
episode_id: str = attr.ib(default=None, validator=not_none_validator)
scene_id: str = attr.ib(default=None, validator=not_none_validator)
start_position: List[float] = attr.ib(
default=None, validator=not_none_validator
)
start_rotation: List[float] = attr.ib(
default=None, validator=not_none_validator
)
info: Optional[Dict[str, str]] = None
T = TypeVar("T", bound=Episode)
class Dataset(Generic[T]):
r"""Base class for dataset specification.
"""
episodes: List[T]
@property
def scene_ids(self) -> List[str]:
r"""
Returns:
unique scene ids present in the dataset.
"""
return sorted(list({episode.scene_id for episode in self.episodes}))
def get_scene_episodes(self, scene_id: str) -> List[T]:
r"""
Args:
scene_id: id of scene in scene dataset.
Returns:
list of episodes for the ``scene_id``.
"""
return list(
filter(lambda x: x.scene_id == scene_id, iter(self.episodes))
)
def get_episodes(self, indexes: List[int]) -> List[T]:
r"""
Args:
indexes: episode indices in dataset.
Returns:
list of episodes corresponding to indexes.
"""
return [self.episodes[episode_id] for episode_id in indexes]
def get_episode_iterator(self, *args: Any, **kwargs: Any) -> Iterator:
r"""Gets episode iterator with options. Options are specified in
EpisodeIterator documentation. To further customize iterator behavior
for your Dataset subclass, create a customized iterator class like
EpisodeIterator and override this method.
Args:
*args: positional args for iterator constructor
**kwargs: keyword args for iterator constructor
Returns:
Iterator: episode iterator with specified behavior
"""
return EpisodeIterator(self.episodes, *args, **kwargs)
def from_json(
self, json_str: str, scenes_dir: Optional[str] = None
) -> None:
r"""Creates dataset from ``json_str``. Directory containing relevant
graphical assets of scenes is passed through ``scenes_dir``.
Args:
json_str: JSON string containing episodes information.
scenes_dir: directory containing graphical assets relevant
for episodes present in ``json_str``.
"""
raise NotImplementedError
def filter_episodes(self, filter_fn: Callable[[T], bool]) -> "Dataset":
r"""Returns a new dataset with only the filtered episodes from the
original dataset.
Args:
filter_fn: function used to filter the episodes.
Returns:
the new dataset.
"""
new_episodes = []
for episode in self.episodes:
if filter_fn(episode):
new_episodes.append(episode)
new_dataset = copy.copy(self)
new_dataset.episodes = new_episodes
return new_dataset
def get_splits(
self,
num_splits: int,
episodes_per_split: Optional[int] = None,
remove_unused_episodes: bool = False,
collate_scene_ids: bool = True,
sort_by_episode_id: bool = False,
allow_uneven_splits: bool = False,
) -> List["Dataset"]:
r"""Returns a list of new datasets, each with a subset of the original
episodes. All splits will have the same number of episodes, but no
episodes will be duplicated.
Args:
num_splits: the number of splits to create.
episodes_per_split: if provided, each split will have up to
this many episodes. If it is not provided, each dataset will
have ``len(original_dataset.episodes) // num_splits``
episodes. If max_episodes_per_split is provided and is
larger than this value, it will be capped to this value.
remove_unused_episodes: once the splits are created, the extra
episodes will be destroyed from the original dataset. This
saves memory for large datasets.
collate_scene_ids: if true, episodes with the same scene id are
next to each other. This saves on overhead of switching
between scenes, but means multiple sequential episodes will
be related to each other because they will be in the
same scene.
sort_by_episode_id: if true, sequences are sorted by their episode
ID in the returned splits.
allow_uneven_splits: if true, the last split can be shorter than
the others. This is especially useful for splitting over
validation/test datasets in order to make sure that all
episodes are copied but none are duplicated.
Returns:
a list of new datasets, each with their own subset of episodes.
"""
assert (
len(self.episodes) >= num_splits
), "Not enough episodes to create this many splits."
if episodes_per_split is not None:
assert not allow_uneven_splits, (
"You probably don't want to specify allow_uneven_splits"
" and episodes_per_split."
)
assert num_splits * episodes_per_split <= len(self.episodes)
new_datasets = []
if allow_uneven_splits:
stride = int(np.ceil(len(self.episodes) * 1.0 / num_splits))
split_lengths = [stride] * (num_splits - 1)
split_lengths.append(
(len(self.episodes) - stride * (num_splits - 1))
)
else:
if episodes_per_split is not None:
stride = episodes_per_split
else:
stride = len(self.episodes) // num_splits
split_lengths = [stride] * num_splits
num_episodes = sum(split_lengths)
rand_items = np.random.choice(
len(self.episodes), num_episodes, replace=False
)
if collate_scene_ids:
scene_ids = {}
for rand_ind in rand_items:
scene = self.episodes[rand_ind].scene_id
if scene not in scene_ids:
scene_ids[scene] = []
scene_ids[scene].append(rand_ind)
rand_items = []
list(map(rand_items.extend, scene_ids.values()))
ep_ind = 0
new_episodes = []
for nn in range(num_splits):
new_dataset = copy.copy(self) # Creates a shallow copy
new_dataset.episodes = []
new_datasets.append(new_dataset)
for ii in range(split_lengths[nn]):
new_dataset.episodes.append(self.episodes[rand_items[ep_ind]])
ep_ind += 1
if sort_by_episode_id:
new_dataset.episodes.sort(key=lambda ep: ep.episode_id)
new_episodes.extend(new_dataset.episodes)
if remove_unused_episodes:
self.episodes = new_episodes
return new_datasets
class EpisodeIterator(Iterator):
r"""Episode Iterator class that gives options for how a list of episodes
should be iterated. Some of those options are desirable for the internal
simulator to get higher performance. More context: simulator suffers
overhead when switching between scenes, therefore episodes of the same
scene should be loaded consecutively. However, if too many consecutive
episodes from same scene are feed into RL model, the model will risk to
overfit that scene. Therefore it's better to load same scene consecutively
and switch once a number threshold is reached.
Currently supports the following features:
Cycling: when all episodes are iterated, cycle back to start instead of
throwing StopIteration.
Cycling with shuffle: when cycling back, shuffle episodes groups
grouped by scene.
Group by scene: episodes of same scene will be grouped and loaded
consecutively.
Set max scene repeat: set a number threshold on how many episodes from
the same scene can be loaded consecutively.
Sample episodes: sample the specified number of episodes.
"""
def __init__(
self,
episodes: List[T],
cycle: bool = True,
shuffle: bool = False,
group_by_scene: bool = True,
max_scene_repeat: int = -1,
num_episode_sample: int = -1,
):
r"""
Args:
episodes: list of episodes.
cycle: if true, cycle back to first episodes when StopIteration.
shuffle: if true, shuffle scene groups when cycle.
No effect if cycle is set to false. Will shuffle grouped
scenes if group_by_scene is true.
group_by_scene: if true, group episodes from same scene.
max_scene_repeat: threshold of how many episodes from the same
scene can be loaded consecutively. -1 for no limit
num_episode_sample: number of episodes to be sampled.
-1 for no sampling.
"""
# sample episodes
if num_episode_sample >= 0:
episodes = np.random.choice(
episodes, num_episode_sample, replace=False
)
self.episodes = episodes
self.cycle = cycle
self.group_by_scene = group_by_scene
if group_by_scene:
num_scene_groups = len(
list(groupby(episodes, key=lambda x: x.scene_id))
)
num_unique_scenes = len(set([e.scene_id for e in episodes]))
if num_scene_groups >= num_unique_scenes:
self.episodes = sorted(self.episodes, key=lambda x: x.scene_id)
self.max_scene_repetition = max_scene_repeat
self.shuffle = shuffle
self._rep_count = 0
self._prev_scene_id = None
self._iterator = iter(self.episodes)
def __next__(self):
r"""The main logic for handling how episodes will be iterated.
Returns:
next episode.
"""
next_episode = next(self._iterator, None)
if next_episode is None:
if not self.cycle:
raise StopIteration
self._iterator = iter(self.episodes)
if self.shuffle:
self._shuffle_iterator()
next_episode = next(self._iterator)
if self._prev_scene_id == next_episode.scene_id:
self._rep_count += 1
if (
self.max_scene_repetition > 0
and self._rep_count >= self.max_scene_repetition - 1
):
self._shuffle_iterator()
self._rep_count = 0
self._prev_scene_id = next_episode.scene_id
return next_episode
def _shuffle_iterator(self) -> None:
r"""Internal method that shuffles the remaining episodes.
If self.group_by_scene is true, then shuffle groups of scenes.
Returns:
None.
"""
if self.group_by_scene:
grouped_episodes = [
list(g)
for k, g in groupby(self._iterator, key=lambda x: x.scene_id)
]
random.shuffle(grouped_episodes)
self._iterator = iter(sum(grouped_episodes, []))
else:
episodes = list(self._iterator)
random.shuffle(episodes)
self._iterator = iter(episodes)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
15069,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
198,
2,
770,
2723,
2438,
318,
11971,
739,
262,
17168,
5964,
1043,
287,
262,
198,
2,
38559,
24290,
2393,
287,... | 2.336846 | 5,599 |
import os
class Config:
""" basic configurations. """
DEBUG = False
PORT = os.environ.get('PORT') or 5000
ENV = os.environ.get('FLASK_ENV')
SQLALCHEMY_DATABASE_URI = os.environ.get('SQLALCHEMY_DATABASE_URI')
SQLALCHEMY_TRACK_MODIFICATIONS = False
SECRET_KEY = "your-secret-key"
class development(Config):
""" development configurations """
DEBUG = True
class production(Config):
""" production configurations """
PORT = os.environ.get('PORT') or 8080
| [
11748,
28686,
198,
198,
4871,
17056,
25,
198,
220,
220,
220,
37227,
4096,
25412,
13,
37227,
198,
220,
220,
220,
16959,
796,
10352,
198,
220,
220,
220,
350,
9863,
796,
28686,
13,
268,
2268,
13,
1136,
10786,
15490,
11537,
393,
23336,
19... | 2.626316 | 190 |
import sys
import os
import shutil
import getpass
from random import randint
import Utils
from decrypter import decrypt_file as df
from encrypter import encrypt_file as ef
import config_service as config
import compressor | [
11748,
25064,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
11748,
651,
6603,
198,
6738,
4738,
1330,
43720,
600,
198,
11748,
7273,
4487,
198,
6738,
875,
563,
42104,
1330,
42797,
62,
7753,
355,
47764,
198,
6738,
2207,
563,
42104,
1330,
... | 3.946429 | 56 |
from __future__ import print_function
__all__ = [
'WaterColumnController',
'WaterTempController',
]
KC_LEVEL = 1200.0
HREF = 1.5
WATER_CAPACITANCE = 50000000.0
KC_TEMP = 0.15 * WATER_CAPACITANCE
TREF = 50.0
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
834,
439,
834,
796,
685,
198,
220,
220,
220,
705,
19184,
39470,
22130,
3256,
198,
220,
220,
220,
705,
19184,
30782,
22130,
3256,
198,
60,
198,
198,
36222,
62,
2538,
18697,
796,
... | 2.402174 | 92 |
"""
二叉树排序
2020-12-09: 16:12.99;05:30.08;02:39.92;
2020-12-10: 06:03.32;02:41.62;
"""
from sort import validatetool
if __name__ == '__main__':
validatetool.validate(sort)
| [
37811,
198,
12859,
234,
20998,
231,
43718,
239,
162,
236,
240,
41753,
237,
198,
42334,
12,
1065,
12,
2931,
25,
1467,
25,
1065,
13,
2079,
26,
2713,
25,
1270,
13,
2919,
26,
2999,
25,
2670,
13,
5892,
26,
198,
42334,
12,
1065,
12,
940... | 1.863158 | 95 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-19 01:36
from __future__ import unicode_literals
from django.db import migrations
import home.models
import wagtail.wagtailcore.blocks
import wagtail.wagtailcore.fields
import wagtail.wagtaildocs.blocks
import wagtail.wagtailimages.blocks
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
940,
13,
20,
319,
2177,
12,
486,
12,
1129,
5534,
25,
2623,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
1... | 2.772727 | 110 |
from random import randint
from copy import deepcopy
TEST_ARR_LENGTH = 200
class Sort(object):
"""Sort class contains some sort methods"""
def insert_sort(self):
"""
insert sort
:param arr: array to sort
:return: sorted array
"""
# outer loop: i from second element to the end
for i in range(1, len(self.arr)):
key = self.arr[i]
# inner loop: j from the last element in sorted sub array to the begin
j = i - 1
while j >= 0 and self.arr[j] > key:
self.arr[j + 1] = self.arr[j]
j -= 1
self.arr[j + 1] = key
return self.arr
def select_sort(self):
"""
:param arr: array to sort
:return: sorted array
"""
for i in range(0, len(self.arr) - 1):
min_index = i + 1
for j in range(i + 1, len(self.arr)):
if self.arr[min_index] > self.arr[j]:
min_index = j
if self.arr[i] > self.arr[min_index]:
temp = self.arr[i]
self.arr[i] = self.arr[min_index]
self.arr[min_index] = temp
return self.arr
def bubble_sort(self):
"""
:param arr: array to sort
:return: sorted array
"""
for i in range(0, len(self.arr) - 1):
for j in range(0, len(self.arr) - i - 1):
if self.arr[j] > self.arr[j + 1]:
temp = self.arr[j]
self.arr[j] = self.arr[j + 1]
self.arr[j + 1] = temp
return self.arr
@staticmethod
def merge_sort_static(arr):
"""
sort an array by merge sort function
:param arr: the array need to sort
:return: result: sorted array
"""
# if array length big than 1, sepreate the array to two sub arrays
if len(arr) > 1:
half_index = int(len(arr) / 2)
arr_1 = arr[:half_index]
arr_2 = arr[half_index:]
# sub array call merge_sort function to sort itself
arr_1 = Sort.merge_sort_static(arr_1)
arr_2 = Sort.merge_sort_static(arr_2)
# merge sorted two sub array, return result
result = Sort.merge(arr_1, arr_2)
return result
# if array length is 1, array is sorted, then return itself
else:
return arr
@staticmethod
def merge(arr_1, arr_2):
"""
subprogram used in merge sort function
:param arr_1: first array to merge
:param arr_2: second array to merge
:return: merged and sorted array
"""
# insert a end flag in two arrays
arr_1.append('inf')
arr_2.append('inf')
# define index flag for tow arrays
index_1 = 0
index_2 = 0
# define a contianer to storge result
result = []
# try to merge two arrays
for i in range(0, len(arr_1) + len(arr_2) - 2):
if arr_1[index_1] != 'inf' and arr_2[index_2] != 'inf':
if arr_1[index_1] < arr_2[index_2]:
result.append(arr_1[index_1])
index_1 += 1
else:
result.append(arr_2[index_2])
index_2 += 1
elif arr_1[index_1] == 'inf':
result.append(arr_2[index_2])
index_2 += 1
elif arr_2[index_2] == 'inf':
result.append(arr_1[index_1])
index_1 += 1
return result
if __name__ == '__main__':
# 定义一个逆序数组
test_arr = [n for n in range(TEST_ARR_LENGTH, -1, -1)]
print(test_arr)
sort = Sort(test_arr)
result = sort.merge_sort()
print(result)
| [
6738,
4738,
1330,
43720,
600,
198,
6738,
4866,
1330,
2769,
30073,
198,
198,
51,
6465,
62,
26465,
62,
43,
49494,
796,
939,
628,
198,
4871,
33947,
7,
15252,
2599,
198,
220,
220,
220,
37227,
42758,
1398,
4909,
617,
3297,
5050,
37811,
628... | 1.882238 | 2,055 |
from twisted.internet import reactor
from twisted.internet.protocol import Protocol, Factory
from twisted.internet.endpoints import TCP4ClientEndpoint
from txsocksx.client import SOCKS5ClientEndpoint
socks_addr = '127.0.0.1'
socks_port = 9050
TCPPoint = TCP4ClientEndpoint(reactor, socks_addr, socks_port)
dst_addr = 'checkip.dyndns.com'
dst_port = 80
SOCKSPoint = SOCKS5ClientEndpoint(dst_addr,
dst_port, TCPPoint)
d = SOCKSPoint.connect(GETSlashFactory())
@d.addErrback
reactor.run()
| [
6738,
19074,
13,
37675,
1330,
21905,
198,
6738,
19074,
13,
37675,
13,
11235,
4668,
1330,
20497,
11,
19239,
198,
6738,
19074,
13,
37675,
13,
437,
13033,
1330,
23633,
19,
11792,
12915,
4122,
198,
198,
6738,
27765,
82,
3320,
87,
13,
16366,... | 2.666667 | 189 |
from openslides.core.config import config
from openslides.motions.exceptions import WorkflowError
from openslides.motions.models import Motion, State, Workflow
from openslides.users.models import User
from openslides.utils.test import TestCase
| [
6738,
9808,
75,
1460,
13,
7295,
13,
11250,
1330,
4566,
198,
6738,
9808,
75,
1460,
13,
27926,
507,
13,
1069,
11755,
1330,
5521,
11125,
12331,
198,
6738,
9808,
75,
1460,
13,
27926,
507,
13,
27530,
1330,
20843,
11,
1812,
11,
5521,
11125,... | 3.712121 | 66 |
class Solution:
"""
@param nums: a list of integers
@return: return an integer
"""
| [
4871,
28186,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
2488,
17143,
997,
82,
25,
257,
1351,
286,
37014,
198,
220,
220,
220,
2488,
7783,
25,
1441,
281,
18253,
198,
220,
220,
220,
37227,
198
] | 2.675676 | 37 |
#!/usr/bin/env python
from pwn import *
context(os='linux', arch='amd64')
host = '127.0.0.1'
port = 9999
fd = 4
(rbp, canary, rip) = stage1()
libc_write_leak = stage2(rbp, canary, rip)
stage3(canary, rbp, libc_write_leak)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
6738,
279,
675,
1330,
1635,
198,
198,
22866,
7,
418,
11639,
23289,
3256,
3934,
11639,
28745,
2414,
11537,
198,
198,
4774,
796,
705,
16799,
13,
15,
13,
15,
13,
16,
6,
198,
634,
796,
... | 2.184466 | 103 |
from model.interact import sample_sequence, get_dataset
import torch
import config
import random
from database import database
| [
6738,
2746,
13,
3849,
529,
1330,
6291,
62,
43167,
11,
651,
62,
19608,
292,
316,
198,
11748,
28034,
198,
11748,
4566,
198,
11748,
4738,
198,
6738,
6831,
1330,
6831,
628,
628,
198
] | 4.09375 | 32 |
import ntpath
from glob import glob
from platform import system
from pyxdf import load_xdf
def read_raw_xdf(filename = None):
'''Import a single XDF file format that contains one recording.
Args:
filename : string
full path to a single recording
Returns:
Four arrays containing the data for these streams: EEG, Accelerometer, PPG, Gyroscope.
Additionally, another array is returned with the filename.
Raises:
ValueError: if filename is not specified.
RuntimeError: if filename is not in XDF format or not file found.
See also:
read_raw_xdf_dir
'''
if filename is None:
raise(ValueError('Enter XDF file path.'))
if system() == 'Windows':
filename = r'%s' % filename
if not filename.endswith('.xdf'):
raise(RuntimeError('File type must be XDF.'))
if len(glob(filename)) == 0:
raise(RuntimeError('XDF file not found.'))
return get_files(filename)
def read_raw_xdf_dir(dirname = None):
'''Import a directory with multiple recordings in XDF format.
Args:
dirname : string
full path to directory
Returns:
Four arrays containing the data for these streams: EEG, Accelerometer, PPG, Gyroscope.
Additionally, another array is returned with the filenames
Raises:
ValueError: if dirname is not specified
RuntimeError: if files are not found
See also:
read_raw_xdf
'''
if dirname is None:
raise(ValueError('Enter XDF files directory name.'))
if system() == 'Windows':
dirname = r'%s' % dirname
if dirname[-1] == '\\':
dirname_xdf = dirname + '*.xdf'
else:
dirname_xdf = dirname + '\\*.xdf'
else:
if dirname[-1] == '/':
dirname_xdf = dirname + '*.xdf'
else:
dirname_xdf = dirname + '/*.xdf'
if len(glob(dirname_xdf)) == 0:
raise(RuntimeError('XDF files not found in directory.'))
return get_files(dirname_xdf)
def load_data(filename):
'''Get recordings from a single XDF file and order streams by device.'''
# Load XDF file
streams = load_xdf(filename)
streameeg, streamacc, streamppg, streamgyr, device_name = [], [], [], [], []
# Get the individual names of every Muse device
for stream in streams[0]:
name = stream['info']['name'][0][:9]
if name not in device_name:
device_name.append(name)
# Order the streams in a new list based on the name of the device
streams_ordered = []
for item in device_name:
if len(streams_ordered) < (len(device_name)*4)+len(device_name):
for stream in streams[0]:
if item == stream['info']['name'][0][:9]:
streams_ordered.append(stream)
# Insert each stream inside a file into the corresponding list
for stream in streams_ordered:
if 'EEG' in stream['info']['type'][0]:
streameeg.append(stream)
if 'Accelerometer' in stream['info']['type'][0]:
streamacc.append(stream)
if 'Gyroscope' in stream['info']['type'][0]:
streamgyr.append(stream)
if 'PPG' in stream['info']['type'][0]:
streamppg.append(stream)
return streameeg, streamacc, streamppg, streamgyr
def get_files(files):
'''Get files from directory and insert recordings into arrays.'''
# Get files from directory path
files = sorted(glob(files))
streameeg, streamacc, streamppg, streamgyr, filename = [], [], [], [], []
# Search XDF files and add individual channels to the corresponding list. Store the name of the file.
for f in files:
streameeg_file, streamacc_file, streamppg_file, streamgyr_file = load_data(f)
for item in streameeg_file:
streameeg.append(item)
filename.append(ntpath.basename(f))
for item in streamacc_file:
streamacc.append(item)
for item in streamppg_file:
streamppg.append(item)
for item in streamgyr_file:
streamgyr.append(item)
return streameeg, streamacc, streamppg, streamgyr, filename
| [
11748,
299,
83,
6978,
198,
6738,
15095,
1330,
15095,
198,
6738,
3859,
1330,
1080,
198,
6738,
12972,
87,
7568,
1330,
3440,
62,
87,
7568,
198,
198,
4299,
1100,
62,
1831,
62,
87,
7568,
7,
34345,
796,
6045,
2599,
198,
220,
220,
220,
705... | 2.380685 | 1,781 |
import numpy as np
import pyquaternion
import casadi as cs
import pandas as pd
from sklearn.metrics import mean_squared_error
def unit_quat(q):
"""
Normalizes a quaternion to be unit modulus.
:param q: 4-dimensional numpy array or CasADi object
:return: the unit quaternion in the same data format as the original one
"""
if isinstance(q, np.ndarray):
# if (q == np.zeros(4)).all():
# q = np.array([1, 0, 0, 0])
q_norm = np.sqrt(np.sum(q ** 2))
else:
q_norm = cs.sqrt(cs.sumsqr(q))
return 1 / q_norm * q
| [
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
421,
9205,
295,
198,
11748,
6124,
9189,
355,
50115,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
6738,
1341,
35720,
13,
4164,
10466,
1330,
1612,
62,
16485,
1144,
62,
18224,
628,
198,
... | 2.317269 | 249 |
N = int(input())
result = 1
cnt = 0
total = factorial(result, N)
for i in range(len(total)-1, 0, -1):
if total[i] == '0':
cnt += 1
if cnt > 0:
if total[i] != '0':
break
print(cnt) | [
198,
45,
796,
493,
7,
15414,
28955,
198,
20274,
796,
352,
198,
66,
429,
796,
657,
198,
23350,
796,
1109,
5132,
7,
20274,
11,
399,
8,
198,
1640,
1312,
287,
2837,
7,
11925,
7,
23350,
13219,
16,
11,
657,
11,
532,
16,
2599,
198,
220... | 1.895652 | 115 |
#!/usr/bin/env python2
#coding=utf-8
from sage.all import *
class MatrixCrackLfsr:
"""
通过矩阵乘积的方式,求解 lfsr 的转移矩阵
:param seq: 已经捕获的比特流序列,一个 0,1 数字构成的列表
:param period: 用户可以指定序列的周期,如果周期不对,则会报出 Assert 错误
:return 序列对应的状态转移矩阵
"""
if __name__ == "__main__":
CL = MatrixCrackLfsr([1, 0, 1, 0, 0, 0, 1, 0])
print CL.crack_by_matrix()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
198,
2,
66,
7656,
28,
40477,
12,
23,
198,
6738,
35021,
13,
439,
1330,
1635,
628,
198,
4871,
24936,
13916,
441,
43,
9501,
81,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
16268... | 1.179739 | 306 |
# -*- coding: utf-8 -*-
from django.db import models
from django.utils.translation import gettext_lazy as _
import datetime
from django.conf import settings
#Para la autogneración de passwd
from random import choice
from string import letters
#Para el envio del mail de confirmacion
from django.core.mail import EmailMultiAlternatives
from django.template.loader import render_to_string
# favour django-mailer but fall back to django.core.mail
if "mailer" in settings.INSTALLED_APPS:
from mailer import send_mail, mail_admins
else:
from django.core.mail import send_mail, mail_admins
SEXO = (
(1, _('Male')),
(2, _('Female')),
)
CURSO = (
(1, _('Elementary (A2)')),
(2, _('Pre-Intermediate (B1.1)')),
(3, _('Intermediate (B1.2)')),
(4, _('Upper-Intermediate (B2.1)')),
(5, _('First Certificate (B2.2)')),
(6, _('Pre-Advanded (C1.1)')),
(7, _('Advanced (C1.2)')),
(8, _('Proficiency (C2)')),
)
NIVELES_INTESIVO = (
(1, _('Elementary (A2)')),
(2, _('Pre-Intermediate (B1.1)')),
(3, _('Intermediate (B1.2)')),
(4, _('Upper-Intermediate (B2.1)')),
(5, _('First Certificate (B2.2)')),
(6, _('Pre-Advanded (C1.1)')),
(7, _('Advanced (C1.2)')),
(8, _('Proficiency (C2)')),
)
NIVELES_IDIOMAS = (
(1, _('A1')),
(2, _('A2')),
(3, _('B1')),
(4, _('B2.1')),
(5, _('B2.2')),
(6, _('C1.1')),
(7, _('C1.2')),
(8, _('C2'))
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
651,
5239,
62,
75,
12582,
355,
4808,
198,
11748,
4818,
8079,
220,
198... | 2.082251 | 693 |
#!/usr/bin/env python3
"""
Contains all the file reading logic for the trainer
"""
import codecs
def get_confirmation(warningtext):
"""
Prints a warning message and asks for user confirmation
Return Values:
True: User selected Yes
False: User selected No
"""
print()
print(warningtext)
while True:
try:
user_input = input("Please confirm (Y/N): ")
except:
user_input = ''
if user_input.lower() == 'y' or user_input.lower() == 'yes':
return True
elif user_input.lower() == 'n' or user_input.lower() == 'no':
return False
print("The option: " + str(user_input) + " : is not a valid input")
print("Valid options: [Y]es or [N]o")
def detect_file_encoding(training_file, file_encoding, max_passwords = 10000):
"""
Used for autodetecting file encoding of the training password set
Autodectection requires the python package chardet to be installed
pip install chardet
You can also get it from https://github.com/chardet/chardet
I'm keeping the declarations for the chardet package local to this file
so people can run this tool without installing it if they don't want to
use this feature
Variables:
training_file: (String) The path+name of the file to open
file_encoding: (List) A list to return the possible/recommended file
encodings of the training file
max_passwords: (Int) The maximum number of passwords to parse to
identify the encoding of the file. This is an optimization so this
function doesn't have to parse the whole file.
"""
##Try to import chardet
#
# If that package is not installed print out a warning and use is ok,
# then use ascii as the default values
#
try:
from chardet.universaldetector import UniversalDetector
detector = UniversalDetector()
except ImportError as error:
print("FAILED: chardet not insalled")
print("It is highly recommended that you install the 'chardet' Python package")
print("or manually specify the file encoding of the training set via the command line")
print("You can download chardet from https://pypi.python.org/pypi/chardet")
if get_confirmation("Do you want to continue using the default encoding 'ascii'?"):
file_encoding.append('ascii')
return True
# User wanted to exit instead
print("Understood. Please install chardet or specify an encoding " +
"format on the command line"
)
return False
try:
cur_count = 0
with open(training_file, 'rb') as file:
for line in file.readlines():
detector.feed(line)
if detector.done:
break
cur_count = cur_count + 1
if cur_count >= max_passwords:
break
detector.close()
except IOError as error:
print ("Error opening file " + training_file)
print ("Error is " + str(error))
return False
try:
file_encoding.append(detector.result['encoding'])
print("File Encoding Detected: " + str(detector.result['encoding']))
print("Confidence for file encoding: " + str(detector.result['confidence']))
print("If you think another file encoding might have been used please ")
print("manually specify the file encoding and run the training program again")
print()
except KeyError as error:
print("Error encountered with file encoding autodetection")
print("Error : " + str(error))
return False
return True
def check_valid(input_password):
"""
Checks to see if the input password is valid for this training program
Invalid in this case means you don't want to train on them
Additionaly grammar checks may be run later to futher exclude passwords#
This just features that will likely be universal rejections
Returns
TRUE if the password is valid
FALSE if invalid
"""
# Don't accept blank passwords for training.
if len(input_password) == 0:
return False
# Remove tabs from the training data
# This is important since when the grammar is saved to disk tabs are used
# as seperators. There certainly are other approaches but putting this
# placeholder here for now since tabs are unlikely to be used in passwords
if "\t" in input_password:
return False
# Below are other values that cause problems that we are going to remove.
# These values include things like LineFeed LF
#Invalid characters at the begining of the ASCII table
for invalid_hex in range (0x0,0x20):
if chr(invalid_hex) in input_password:
return False
# UTF-8 Line Seperator
if u"\u2028" in input_password:
return False
return True
class TrainerFileInput:
"""
Reads input passwords from file, one by one
Making this a class so it can return one password at a time from the
training file
"""
def __init__(self, filename, encoding = 'utf-8'):
"""
Open the file for reading
Passes file exceptions back up if they occur
Eg: if the file doesn't exist
"""
# Using surrogateescape to handle errors so we can detect encoding
# issues without raising an exception during the reading
# of the original password
#
self.encoding = encoding
self.filename = filename
self.file = codecs.open(
self.filename,
'r',
encoding= self.encoding,
errors= 'surrogateescape'
)
# Keep track of the number of encoding errors
self.num_encoding_errors = 0
# Keep track of the number of valid passwords that have been parsed
self.num_passwords = 0
# Duplicate password detection
#
# Duplicates are good. If this doesn't see duplicate passwords warn the
# user.
self.duplicates_found = False
# Mini dictionary of the first X passwords to look for duplicates
self.duplicate_detection = {}
# Number of passwords to read in to check for duplicates
self.num_to_look_for_duplicates = 100000
def read_password(self):
"""
Returns one password from the training set
If there are no more passwords returns None
"""
# Read an input password from the training set
try:
# Loop until we find a valid password
while True:
try:
password = self.file.readline()
# Unicode errors will throw an exception here, so catch it
# and skip the password
except UnicodeError:
self.num_encoding_errors += 1
continue
# Check to see if the file is done
if password == "":
# Close file and return None
self.file.close()
return None
## Check the encoding of the file
#
# Re-encode it and detect surrogates, this way we can
# identify encoding errors.
#
# I know, could simplify by throwing an exception during
# the original parsing and not use surrogate escapes, but this
# has helped with troubleshooting in the past
#
try:
password.encode(self.encoding)
except UnicodeEncodeError as msg:
if msg.reason == 'surrogates not allowed':
self.num_encoding_errors += 1
else:
#print("Hmm, there was a weird problem reading in a line")
#print("")
self.num_encoding_errors += 1
continue
# Remove newlines but leave whitespace
clean_password = password.rstrip('\r\n')
# Checks to see if the password is valid
if not check_valid(clean_password):
continue
## This is a valid password
self.num_passwords += 1
# Perform duplicate check if needed
if not self.duplicates_found:
if self.num_passwords < self.num_to_look_for_duplicates:
# It is a duplicate!!
if clean_password in self.duplicate_detection:
self.duplicates_found = True
# clean up duplicate_detection dic since we do
# not need it anymore
self.duplicate_detection.clear()
# Not a duplicate
self.duplicate_detection[clean_password] = 1
# Return the password
return clean_password
# File errors *shouldn't* happen but if they do raise them to make
# sure they don't silently halt the training
#
# Aka we want the training to stop and the user to know something
# went wrong
#
except IOError as error:
print (error)
print ("Error reading file " + self.filename)
raise
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
201,
198,
201,
198,
201,
198,
37811,
201,
198,
201,
198,
4264,
1299,
477,
262,
2393,
3555,
9156,
329,
262,
21997,
201,
198,
201,
198,
37811,
201,
198,
201,
198,
201,
198,
11748,
40481... | 2.249829 | 4,379 |
# Generated by Django 2.2.17 on 2021-04-20 21:52
import json
from os import path
from django.conf import settings
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
1558,
319,
33448,
12,
3023,
12,
1238,
2310,
25,
4309,
198,
198,
11748,
33918,
198,
6738,
28686,
1330,
3108,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
... | 3.18 | 50 |
import frappe
from frappe import _
from frappe.utils.data import add_days, nowdate
@frappe.whitelist()
def make_orientation_meeting(doc, method):
"""Create Orintate"""
meeting = frappe.get_doc({
"doctype": "Meeting",
"title": "Orientation for {0}".format(doc.first_name),
"date": add_days(nowdate(), 1),
"from_time": "09:00",
"to_time": "09:30",
})
print("hello")
print(meeting) | [
11748,
5306,
27768,
198,
6738,
5306,
27768,
1330,
4808,
198,
6738,
5306,
27768,
13,
26791,
13,
7890,
1330,
751,
62,
12545,
11,
783,
4475,
198,
198,
31,
69,
430,
27768,
13,
1929,
270,
46331,
3419,
628,
198,
4299,
787,
62,
13989,
341,
... | 2.25 | 196 |
import copy
import numpy as np
import torch
from collections import OrderedDict
from torch.distributions import Categorical
import rl_sandbox.constants as c
# TODO: Add Q-table if we want
| [
11748,
4866,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
6738,
28034,
13,
17080,
2455,
507,
1330,
327,
2397,
12409,
198,
198,
11748,
374,
75,
62,
38142,
3524,
13,
9979,
... | 3.266667 | 60 |
from typing import Any
from django.core.management.base import BaseCommand
from ...apps import generate_schema, get_schema
| [
6738,
19720,
1330,
4377,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
8692,
1330,
7308,
21575,
198,
198,
6738,
2644,
18211,
1330,
7716,
62,
15952,
2611,
11,
651,
62,
15952,
2611,
628
] | 3.705882 | 34 |
from __future__ import unicode_literals
from django.db import models
# Create your models here.
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
198,
2,
13610,
534,
4981,
994,
13,
628,
220,
220,
220,
220,
198
] | 3.21875 | 32 |
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
# @author: SaltFish
# @file: control.py
# @date: 2020/07/15
import os
import sqlite3
import time
database_dir = os.path.join(os.getcwd(), "instance", "database.sqlite")
def execute_sql(sql, choice):
"""
执行sql语句
:param sql:
:param choice: 'select', 'update', 'insert', 'delete'
:return:
"""
my_db = sqlite3.connect(database_dir)
my_cursor = my_db.cursor()
my_cursor.execute(sql)
results = []
if choice == "select":
results = my_cursor.fetchall()
else:
my_db.commit()
my_cursor.close()
my_db.close()
return results
| [
2,
5145,
14,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2488,
9800,
25,
13754,
39428,
198,
2,
2488,
7753,
25,
1630,
13,
9078,
198,
2,
2488,
4475,
25,
12131,
14,
... | 2.206186 | 291 |
from cricket_db.parsers.parser import Parser
| [
6738,
18836,
62,
9945,
13,
79,
945,
364,
13,
48610,
1330,
23042,
263,
628,
628,
198
] | 3.0625 | 16 |
from django import template
from program.models import Type, MusicFocus, Category, Topic
register = template.Library()
@register.inclusion_tag('boxes/type.html')
@register.inclusion_tag('boxes/musicfocus.html')
@register.inclusion_tag('boxes/category.html')
@register.inclusion_tag('boxes/topic.html') | [
6738,
42625,
14208,
1330,
11055,
198,
198,
6738,
1430,
13,
27530,
1330,
5994,
11,
7849,
34888,
11,
21743,
11,
47373,
198,
198,
30238,
796,
11055,
13,
23377,
3419,
628,
198,
31,
30238,
13,
259,
4717,
62,
12985,
10786,
29305,
14,
4906,
... | 3.344086 | 93 |
constants.physical_constants["helion mass energy equivalent in MeV"] | [
9979,
1187,
13,
42854,
62,
9979,
1187,
14692,
2978,
295,
2347,
2568,
7548,
287,
2185,
53,
8973
] | 4 | 17 |
"""sysl module loader"""
import codecs
import os
import re
import sys
from sysl.proto import sysl_pb2
from sysl.core import syslalgo
from sysl.core import syslparse
from sysl.core import syslx
def fmt_app_name(appname):
"""Format a sysl_pb2.AppName as a syntactically valid string."""
return ' :: '.join(appname.part)
def fmt_call(call):
"""Format a sysl_pb2.Call as a syntactically valid string."""
return fmt_app_name(call.target) + ' <- ' + call.endpoint
def _resolve_mixins(module):
"""Resolve mixin references.
Copies endpoints from mixed-in apps.
"""
# Detect cycles
edges = {
(appname, syslx.fmt_app_name(mixin.name))
for (appname, app) in module.apps.iteritems()
for mixin in app.mixin2}
while True:
more_edges = {
(a, c)
for (a, b1) in edges
for (b2, c) in edges
if b1 == b2
} - edges
if not more_edges:
break
edges |= more_edges
self_edges = {(a, b) for (a, b) in edges if a == b}
if self_edges:
raise RuntimeError(
"mixin cycle(s) detected involving: {}".format(
', '.join(a for (a, _) in self_edges)))
# recursively inject mixins, avoiding double-injection
injected = set()
for appname in module.apps:
inject(appname)
def _check_deps(module, validate):
"""Check app:endpoint dependencies."""
deps = set()
errors = []
for (appname, app) in module.apps.iteritems():
for epname in app.endpoints:
endpt = app.endpoints[epname]
for (_, call) in syslalgo.enumerate_calls(endpt.stmt):
targetname = syslx.fmt_app_name(call.target)
if targetname not in module.apps:
errors.append('{} <- {}: calls non-existent app {}'.format(
appname, epname, targetname))
else:
target = module.apps[targetname]
assert 'abstract' not in syslx.patterns(target.attrs), (
"call target '{}' must not be ~abstract".format(targetname))
if call.endpoint not in target.endpoints:
errors.append(
'{} <- {}: calls non-existent endpoint {} -> {}'.format(
appname, epname, targetname, call.endpoint))
else:
deps.add(
((appname, epname), (targetname, call.endpoint)))
if errors and validate:
raise Exception('broken deps:\n ' + '\n '.join(errors))
return deps
def _map_subscriptions(module):
"""Map pubsub subscriptions into direct calls."""
for appname in module.apps:
app = module.apps[appname]
if 'abstract' in syslx.patterns(app.attrs):
continue
for epname in app.endpoints:
endpt = app.endpoints[epname]
if endpt.HasField('source'):
src_app = module.apps[syslx.fmt_app_name(endpt.source)]
src_ep_name = endpt.name.split(' -> ')[1]
assert src_ep_name in src_app.endpoints, (
appname, epname, src_ep_name, str(src_app))
src_ep = src_app.endpoints[src_ep_name]
# Add call to pubsub endpoint.
stmt = src_ep.stmt.add()
call = stmt.call
call.target.CopyFrom(app.name)
call.endpoint = endpt.name
# Maybe add ret.
ret_payload = syslalgo.return_payload(endpt.stmt)
if ret_payload:
stmt = src_ep.stmt.add()
stmt.ret.payload = ret_payload
def _apply_call_templates(app):
"""Apply call templates found in '.. * <- *' | '*' pseudo-endpoints.
Project-specific metadata may be applied as follows:
MyApp:
.. * <- *:
Foo <- bar [myproj='XYZ-007']
In the above example, whenever MyApp calls Foo <- bar,
_apply_call_templates will attach the attribute myproj='XYZ-007' to the
call.
It will also validate that all templates are applied at least once.
"""
# Look for the pseudo endpoint.
pseudos = {name for name in app.endpoints
if re.match(r'(\.\.\s*\*\s*<-\s*\*|\*)', name)}
if not pseudos:
return
if len(pseudos) > 1:
raise Exception('Too many call templates: {}'.format(
', '.join(repr(p) for p in pseudos)))
pseudo = app.endpoints[pseudos.pop()]
templates = {}
call_templates()
ep_templates()
# Error on unused templates, in case of typos.
call = None # In case of empty loop
unused = {
call
for (call, n) in templates.iteritems()
if n[1] == 0}
# TODO: add better message
# App is referring to an unused app-endpoint
if unused:
raise RuntimeError('Unused templates in {}: {}', fmt_app_name(
app.name), ', '.join(repr(c) for c in unused))
def _infer_types(app):
"""Infer types of views and expressions from their bodies.
Synthesize types for anonymous transforms.
"""
for (vname, v) in app.views.iteritems():
assert (
(v.expr.WhichOneof('expr') == 'transform') ^
('abstract' in syslx.patterns(v.attrs))
), '{}: {}'.format(vname, v.expr)
if v.ret_type.WhichOneof('type') is None:
assert v.expr.type.WhichOneof('type')
v.ret_type.CopyFrom(v.expr.type)
nAnons = [0]
infer_expr_type(v.expr)
def load(names, validate, root):
"""Load a sysl module."""
if isinstance(names, basestring):
names = [names]
module = sysl_pb2.Module()
imports = set()
def do_import(name, indent="-"):
"""Import a sysl module and its dependencies."""
imports.add(name)
(basedir, _) = os.path.split(name)
new_imports = {
root + i if i[:1] == '/' else os.path.join(basedir, i)
for i in syslparse.Parser().parse(
codecs.open(name + '.sysl', 'r', 'utf8'), name + '.sysl', module)
} - imports
while new_imports:
do_import(new_imports.pop(), indent + "-")
new_imports -= imports
for name in names:
if name not in imports:
if name[:1] != '/':
name = '/' + name
if name.endswith('.sysl'):
name = name[:-5]
do_import(root + name)
try:
postprocess(module)
deps = _check_deps(module, validate)
except RuntimeError as ex:
raise Exception('load({!r})'.format(names), ex, sys.exc_info()[2])
return (module, deps, imports)
| [
37811,
17597,
75,
8265,
40213,
37811,
198,
198,
11748,
40481,
82,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
25064,
198,
198,
6738,
827,
6649,
13,
1676,
1462,
1330,
827,
6649,
62,
40842,
17,
198,
198,
6738,
827,
6649,
13,
7295,
1... | 2.088 | 3,250 |
from django.db import models
from simple_history.models import HistoricalRecords
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
2829,
62,
23569,
13,
27530,
1330,
23121,
6690,
3669,
628,
198
] | 4.15 | 20 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import configparser
import shutil
import tempfile
import time
from oslo_concurrency import processutils
from oslo_log import log
from ironic_python_agent import errors
from ironic_python_agent.extensions import base
from ironic_python_agent import utils
LOG = log.getLogger(__name__)
| [
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
7330,
257,
4866,
286,
262,
13789,
379,
198,
2,... | 3.820276 | 217 |
# coding=utf-8
# import spacy
import re, json
from math import log
# nlp = spacy.load('en',disable=["tagger", "parser", "ner"])
re_word = re.compile(r"\w+")
import json
tweets = {}
posting_list = {}
def re_get_word_count(text:str):
'''
params:
text: str
文本
return type:
yield tuple(word,word_count)
'''
_temp_dict = {}
for i in re_word.findall(text):
try:
# tweet_wb[i.lemma_] += 1
_temp_dict[str(i)] += 1
except Exception:
# tweet_wb.update({i.lemma_:1})
_temp_dict.update({str(i):1})
_temp_list = []
for a,b in _temp_dict.items():
yield (a,b)
avdl = 0
with open("../hw3/tweets.txt",'r',encoding='utf-8') as fp:
for i in fp:
tweet = json.loads(i)
tweetId = int(tweet['tweetId'])
text = tweet['text'].lower()
tweets.update({
int(tweetId):text
})
avdl += len(text)
# print(text)
# af = nlp(text.lower())
for i in re_get_word_count(text):
try:
posting_list[i[0]].append((tweetId,i[1]))
except Exception:
posting_list[i[0]] = []
posting_list[i[0]].append((tweetId,i[1]))
'''
af = re_word.findall(text)
tweet_wb = {}
temp_posting_list = []
for i in af:
try:
# tweet_wb[i.lemma_] += 1
tweet_wb[str(i)] += 1
except Exception:
# tweet_wb.update({i.lemma_:1})
tweet_wb.update({str(i):1})
for a,b in tweet_wb.items():
# temp_posting_list.append((a,b))
try:
posting_list[a].append((tweetId,b))
except Exception:
posting_list[a] = []
posting_list[a].append((tweetId,b))
# print(len(posting_list.keys()))
'''
print(avdl / len(tweets))
tf = {}
df = {}
# compute tf
for a,b in posting_list.items():
s = 0
for j in b:
s += j[1]
tf.update({a:s})
df.update({a:len(b)})
# print(tf)
with open("query171-225_cleaned.txt",'r',encoding='utf-8') as fp:
querys = json.load(fp)
avdl = 8
for query_Id,query_text in querys.items():
# print(type(a))
query_Id = int(query_Id)
# print(a)
envalue = {i:0 for i in tweets.keys()}
for query_word,query_word_count in re_get_word_count(query_text):
# print(a)
if query_word in posting_list.keys():
# if found: only deal with word which is found
_temp_word_posting_list = posting_list[query_word]
for tweetId,tweet_word_count in _temp_word_posting_list:
envalue[tweetId] += VSM_F(query_word_count,tweet_word_count,len(tweets[tweetId]),df[query_word])
# envalue[tweetId] += BM25(query_word_count,tweet_word_count,len(tweets[tweetId]),df[query_word])
else:
# if not found: deal with all word which contain query_word
for tweet_word,doc_id_list in posting_list.items():
if query_word in tweet_word:
for tweetId,tweet_word_count in doc_id_list:
envalue[tweetId] += VSM_F(query_word_count,tweet_word_count,len(tweets[tweetId]),df[query_word])
# envalue[tweetId] += BM25(query_word_count,tweet_word_count,len(tweets[tweetId]),df[query_word])
sorted_envalue = []
for i,j in envalue.items():
sorted_envalue.append((i,j))
sorted_envalue.sort(key=lambda x:x[1],reverse=True)
with open('VSM_result.txt','a+',encoding='utf-8') as fp3:
for i in sorted_envalue:
fp3.write(f'{query_Id} {i[0]}\n') | [
2,
19617,
28,
40477,
12,
23,
198,
2,
1330,
599,
1590,
198,
11748,
302,
11,
33918,
198,
6738,
10688,
1330,
2604,
198,
2,
299,
34431,
796,
599,
1590,
13,
2220,
10786,
268,
3256,
40223,
28,
14692,
83,
7928,
1600,
366,
48610,
1600,
366,... | 1.888777 | 1,978 |
from bs4 import BeautifulSoup
import requests
import lxml
Print_title = True
#Forexlive
url = "http://www.forexlive.com/SessionWraps" #put URL here
r = requests.get(url)
data = r.text
soup = BeautifulSoup(data,"lxml") #Doing it like this prevents some weird error
if Print_title == True:
print ("FOREXLIVE LATEST SESSION WRAPS")
a = soup.body.article.ul.find_all("li")
for item in a:
if "orderboard" not in item.text:
if "trade ideas thread" not in item.text:
print (item.text)
| [
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
11748,
7007,
198,
11748,
300,
19875,
198,
198,
18557,
62,
7839,
796,
6407,
198,
198,
2,
16351,
87,
12583,
198,
6371,
796,
366,
4023,
1378,
2503,
13,
754,
87,
12583,
13,
785,
14,
... | 2.639175 | 194 |
from typing import List
from math import inf | [
6738,
19720,
1330,
7343,
198,
6738,
10688,
1330,
1167
] | 4.888889 | 9 |
from numpy import mat
from GQA.quantum.population import Population
from operator import itemgetter
from GQA.functions.knapsack import Knapsack
import math
import random
from tqdm import tqdm
from matplotlib import pyplot as plt
| [
6738,
299,
32152,
1330,
2603,
198,
6738,
402,
48,
32,
13,
40972,
388,
13,
39748,
1330,
20133,
198,
6738,
10088,
1330,
2378,
1136,
353,
198,
6738,
402,
48,
32,
13,
12543,
2733,
13,
15418,
1686,
441,
1330,
6102,
1686,
441,
198,
11748,
... | 3.216216 | 74 |
import netifaces as ni | [
11748,
2010,
361,
2114,
355,
37628
] | 3.666667 | 6 |
# Review:
# Create a function called greet().
# Write 3 print statements inside the function.
# Call the greet() function and run your code.
greet() | [
2,
6602,
25,
220,
198,
2,
13610,
257,
2163,
1444,
12589,
22446,
220,
198,
2,
19430,
513,
3601,
6299,
2641,
262,
2163,
13,
198,
2,
4889,
262,
12589,
3419,
2163,
290,
1057,
534,
2438,
13,
198,
70,
2871,
3419
] | 3.846154 | 39 |
#!/usr/bin/env micropython
# -*- coding: utf-8 -*-
import socket_drcom as socket
import struct
import time
from md5_drcom import MD5Type as md5
import sys
import os
import random_drcom as random
import binascii
import uerrno
# CONFIG
server = "192.168.100.150"
username = ""
password = ""
host_name = "LIYUANYUAN"
host_os = "8089D"
host_ip = "10.30.22.17"
PRIMARY_DNS = "114.114.114.114"
dhcp_server = "0.0.0.0"
mac = 0xb888e3051680
CONTROLCHECKSTATUS = b'\x20'
ADAPTERNUM = b'\x01'
KEEP_ALIVE_VERSION = b'\xdc\x02'
AUTH_VERSION = b'\x0a\x00'
IPDOG = b'\x01'
ror_version = False
# CONFIG_END
'''
AUTH_VERSION:
unsigned char ClientVerInfoAndInternetMode;
unsigned char DogVersion;
'''
nic_name = '' #Indicate your nic, e.g. 'eth0.2'.nic_name
bind_ip = '0.0.0.0'
SALT = ''
IS_TEST = False
# specified fields based on version
CONF = "/etc/drcom_wired.conf"
UNLIMITED_RETRY = True
EXCEPTION = False
DEBUG = False #log saves to file
LOG_PATH = '/tmp/drcom_client.log'
PID_ENABLE = True
PID_PATH = '/var/run/drcom.pid'
if IS_TEST:
DEBUG = True
LOG_PATH = 'drcom_client.log'
if len(sys.argv) >= 2:
CONF = sys.argv[1]
# def packet_CRC(s):
# ret = 0
# for i in re.findall('..', s):
# ret ^= struct.unpack('>h', i)[0]
# ret &= 0xFFFF
# ret = ret * 0x2c7
# return ret
def mkpkt(salt, usr, pwd, mac):
'''
struct _tagLoginPacket
{
struct _tagDrCOMHeader Header;
unsigned char PasswordMd5[MD5_LEN];
char Account[ACCOUNT_MAX_LEN];
unsigned char ControlCheckStatus;
unsigned char AdapterNum;
unsigned char MacAddrXORPasswordMD5[MAC_LEN];
unsigned char PasswordMd5_2[MD5_LEN];
unsigned char HostIpNum;
unsigned int HostIPList[HOST_MAX_IP_NUM];
unsigned char HalfMD5[8];
unsigned char DogFlag;
unsigned int unkown2;
struct _tagHostInfo HostInfo;
unsigned char ClientVerInfoAndInternetMode;
unsigned char DogVersion;
};
'''
data = b'\x03\x01\x00' + bytes([len(usr) + 20])
data += md5sum(b'\x03\x01' + salt + pwd.encode())
data += (usr.encode() + 36 * b'\x00')[:36]
data += CONTROLCHECKSTATUS
data += ADAPTERNUM
data += dump(int(binascii.hexlify(data[4:10]), 16)
^ mac)[-6:] #mac xor md51
data += md5sum(b'\x01' + pwd.encode() + salt + b'\x00' * 4) #md52
data += b'\x01' # number of ip
data += b''.join([bytes([int(i)])
for i in host_ip.split('.')]) #x.x.x.x ->
data += b'\00' * 4 #your ipaddress 2
data += b'\00' * 4 #your ipaddress 3
data += b'\00' * 4 #your ipaddress 4
data += md5sum(data + b'\x14\x00\x07\x0B')[:8] #md53
data += IPDOG
data += b'\x00' * 4 # unknown2
'''
struct _tagOSVERSIONINFO
{
unsigned int OSVersionInfoSize;
unsigned int MajorVersion;
unsigned int MinorVersion;
unsigned int BuildNumber;
unsigned int PlatformID;
char ServicePack[128];
};
struct _tagHostInfo
{
char HostName[HOST_NAME_MAX_LEN];
unsigned int DNSIP1;
unsigned int DHCPServerIP;
unsigned int DNSIP2;
unsigned int WINSIP1;
unsigned int WINSIP2;
struct _tagDrCOM_OSVERSIONINFO OSVersion;
};
'''
data += (host_name.encode() + 32 * b'\x00')[:32] # _tagHostInfo.HostName
data += b''.join([bytes([int(i)])
for i in PRIMARY_DNS.split('.')]) # _tagHostInfo.DNSIP1
data += b''.join([bytes([int(i)]) for i in dhcp_server.split('.')
]) # _tagHostInfo.DHCPServerIP
data += b'\x00\x00\x00\x00' # _tagHostInfo.DNSIP2
data += b'\x00' * 4 # _tagHostInfo.WINSIP1
data += b'\x00' * 4 # _tagHostInfo.WINSIP2
data += b'\x94\x00\x00\x00' # _tagHostInfo.OSVersion.OSVersionInfoSize
data += b'\x05\x00\x00\x00' # _tagHostInfo.OSVersion.MajorVersion
data += b'\x01\x00\x00\x00' # _tagHostInfo.OSVersion.MinorVersion
data += b'\x28\x0A\x00\x00' # _tagHostInfo.OSVersion.BuildNumber
data += b'\x02\x00\x00\x00' # _tagHostInfo.OSVersion.PlatformID
# _tagHostInfo.OSVersion.ServicePack
data += (host_os.encode() + 32 * b'\x00')[:32]
data += b'\x00' * 96
# END OF _tagHostInfo
data += AUTH_VERSION
if ror_version:
'''
struct _tagLDAPAuth
{
unsigned char Code;
unsigned char PasswordLen;
unsigned char Password[MD5_LEN];
};
'''
data += b'\x00' # _tagLDAPAuth.Code
data += bytes([len(pwd)]) # _tagLDAPAuth.PasswordLen
data += ror(md5sum(b'\x03\x01' + salt + pwd),
pwd) # _tagLDAPAuth.Password
'''
struct _tagDrcomAuthExtData
{
unsigned char Code;
unsigned char Len;
unsigned long CRC;
unsigned short Option;
unsigned char AdapterAddress[MAC_LEN];
};
'''
data += b'\x02' # _tagDrcomAuthExtData.Code
data += b'\x0C' # _tagDrcomAuthExtData.Len
data += checksum(data + b'\x01\x26\x07\x11\x00\x00' +
dump(mac)) # _tagDrcomAuthExtData.CRC
data += b'\x00\x00' # _tagDrcomAuthExtData.Option
data += dump(mac) # _tagDrcomAuthExtData.AdapterAddress
# END OF _tagDrcomAuthExtData
data += b'\x00' # auto logout / default: False
data += b'\x00' # broadcast mode / default : False
data += b'\xE9\x13' #unknown, filled numbers randomly =w=
log('[mkpkt]', str(binascii.hexlify(data))[2:][:-1])
return data
socket.log = log
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
12314,
1773,
7535,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
17802,
62,
7109,
785,
355,
17802,
198,
11748,
2878,
198,
11748,
640,
198,
6738,
45243,
20,
62,
7109,... | 2.158165 | 2,529 |
import argparse
from pythonosc import osc_message_builder
from pythonosc import udp_client
if __name__ == '__main__':
main() | [
11748,
1822,
29572,
198,
198,
6738,
21015,
17500,
1330,
267,
1416,
62,
20500,
62,
38272,
198,
6738,
21015,
17500,
1330,
334,
26059,
62,
16366,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
... | 3.093023 | 43 |
from flask import abort
from flask import Blueprint
from flask import render_template
from jinja2 import TemplateNotFound
from flask_login import login_required, current_user
router = Blueprint('profile_route', __name__,
template_folder='templates',
static_folder='static',
static_url_path='/profile/static')
@router.route('/profile')
@login_required
| [
6738,
42903,
1330,
15614,
198,
6738,
42903,
1330,
39932,
198,
6738,
42903,
1330,
8543,
62,
28243,
198,
6738,
474,
259,
6592,
17,
1330,
37350,
3673,
21077,
198,
198,
6738,
42903,
62,
38235,
1330,
17594,
62,
35827,
11,
1459,
62,
7220,
628... | 2.666667 | 156 |