blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 246
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c3a204b93156cbcd8e27787d9c7665ae8196a3c3 | ebd5c4632bb5f85c9e3311fd70f6f1bf92fae53f | /Sourcem8/pirates/instance/DistributedTeleportMgr.py | b9c0736e2218759b8f8a6859c8f75acb8541aa1b | [] | no_license | BrandonAlex/Pirates-Online-Retribution | 7f881a64ec74e595aaf62e78a39375d2d51f4d2e | 980b7448f798e255eecfb6bd2ebb67b299b27dd7 | refs/heads/master | 2020-04-02T14:22:28.626453 | 2018-10-24T15:33:17 | 2018-10-24T15:33:17 | 154,521,816 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 31,217 | py | from pandac.PandaModules import *
from direct.task import Task
from direct.distributed import DistributedObject
from pirates.piratesbase import PiratesGlobals
from pirates.world import ZoneLOD
from direct.showbase.PythonUtil import report
from otp.otpbase import OTPLocalizer
from pirates.piratesbase import PLocalizer
from pirates.piratesgui import PDialog
from otp.otpgui import OTPDialog
from pirates.quest import QuestDB, QuestLadderDB
'''
Congratulations, Disney! You've managed to write this very gay code.
DistributedTeleportMgr is the gayest thing ever existed.
Do not try to understand this shit, I've already done it for you.
By the way it gave me cancer and aids.
'''
class DistributedTeleportMgr(DistributedObject.DistributedObject):
notify = directNotify.newCategory('DistributedTeleportMgr')
def __init__(self, cr):
DistributedObject.DistributedObject.__init__(self, cr)
self.instanceType = None
self.fromInstanceType = None
self.lastLocalTeleportLoc = None
self.teleportQueryId = None
self.inInstanceType = PiratesGlobals.INSTANCE_MAIN
self.instanceName = 'mainWorld'
self.doneCallback = None
self.startedCallback = None
self.oldWorld = None
self.requestData = None
self.localTeleportId = None
self.localTeleportingObj = None
self.localTeleportCallback = None
self.localTeleportDestPos = None
self.popupDialog = None
self.doEffect = False
self.stowawayEffect = False
self.miniLog = None
self.teleportQueue = []
self.teleportQueueProcess = None
def generate(self):
DistributedObject.DistributedObject.generate(self)
base.cr.teleportMgr = self
self.localTeleportingObj = localAvatar
self.__pendingGoNow = [True]
localAvatar.readyToTeleport(self)
self.accept('localAvTeleportFinishedRequest', self.localAvTeleportFinishedRequest)
def requestLocalTeleport(self, locationName = None):
self.requestData = ((), {
'locationName': locationName })
localAvatar.confirmTeleport(self.localTeleportConfirmation, feedback = True)
def localTeleportConfirmation(self, confirmed):
if confirmed:
requestData = self.requestData
self.localTeleport(*requestData[0], **requestData[1])
locationUid = requestData['locationUid']
base.cr.loadingScreen.showTarget(locationUid)
base.cr.loadingScreen.showHint(locationUid)
self.requestData = None
def localTeleportEffect(self, teleportPosHpr, parent=None, smooth=False, goNow=False):
if localAvatar.testTeleportFlag(PiratesGlobals.TFInWater) or goNow:
self.localTeleportPos(teleportPosHpr, parent, smooth)
else:
localAvatar.b_setGameState('TeleportOut')
taskMgr.doMethodLater(5, self.localTeleportPos, self.uniqueName('localTeleportPos'), extraArgs = [
teleportPosHpr,
parent,
smooth])
def localTeleportPos(self, teleportPosHpr, parent = None, smooth = False):
localAvatar.b_setGameState('TeleportOut', [
None,
False])
currParent = localAvatar.getParentObj()
if isinstance(currParent, ZoneLOD.ZoneLOD):
localAvatar.leaveZoneLOD(currParent)
if parent == None:
parent = self.cr.activeWorld.worldGrid
messenger.send('islandPlayerBarrier', [
0])
teleportZone = parent.getZoneFromXYZ(teleportPosHpr[:3])
localAvatar.reparentTo(parent)
localAvatar.setPosHpr(*teleportPosHpr)
localAvatar.spawnWiggle()
localAvatar.b_setLocation(parent.getDoId(), teleportZone)
parent.addObjectToGrid(localAvatar)
parent.setPlayerBarrier(1)
currParent = localAvatar.getParentObj()
if isinstance(currParent, ZoneLOD.ZoneLOD):
localAvatar.enterZoneLOD(currParent)
parent.processVisibility(None)
if base.cr._completeEventCount.num > 0:
self.acceptOnce(base.cr.getAllInterestsCompleteEvent(), localAvatar.b_setGameState, extraArgs = [
'TeleportIn'])
else:
localAvatar.b_setGameState('TeleportIn')
def localTeleport(self, locationName=None, goNow=False, locationUid=None):
if locationName and locationUid:
locationName = None
for currIsle in base.cr.doId2do.values():
if not (hasattr(currIsle, 'getName') and hasattr(currIsle, 'getUniqueId')):
continue
if currIsle.getName() == locationName:
break
elif currIsle.getUniqueId() == locationUid:
break
else:
self.notify.error('not found: (%s, %s)' % (locationName, locationUid))
currInteractive = base.cr.interactionMgr.getCurrentInteractive()
if currInteractive:
currInteractive.requestExit()
questStateSpawnIdx = QuestLadderDB.getPreferredAreaSpawnNode(currIsle.getUniqueId(), localAvatar)
teleportPos = base.cr.activeWorld.getPlayerSpawnPt(currIsle.getDoId(), index = questStateSpawnIdx)
if teleportPos == None:
teleportPos = (0, 0, 0, 0, 0, 0)
self.localTeleportEffect(teleportPos, currIsle, goNow=goNow)
self.lastLocalTeleportLoc = currIsle.getDoId()
def requestTeleportToFishingShip(self):
print 'requestTeleportToFishingShip'
self.cr.teleportMgr.sendUpdate('requestTeleportToFishingShip')
def teleportToFishingShipResponse(self, shipId):
print 'teleportToFishingShipResponse'
print 'shipId=', shipId
self.cr.teleportMgr.localTeleportToId(shipId, localAvatar, showLoadingScreen = False)
def localTeleportToId(self, locationId, teleportingObj = None, destPos = None, callback = None, objectLocation = None, showLoadingScreen = True):
if showLoadingScreen:
self.cr.loadingScreen.show(waitForLocation = True)
if locationId in base.cr.doId2do and base.cr.doId2do[locationId].dclass.getName() == 'DistributedOceanGrid':
logBlock(1, 'localTeleportToId(%s,%s,%s,%s,%s,%s) to ocean grid\n\n' % (locationId, teleportingObj, destPos, callback, objectLocation, showLoadingScreen) + str(StackTrace()))
self.localTeleportId = locationId
self.localTeleportingObj = teleportingObj
self.localTeleportCallback = callback
self.localTeleportDestPos = destPos
destObj = self.cr.doId2do.get(locationId)
if destObj:
self._localTeleportToIdInterestComplete()
self.notify.debug('destination object %s found, teleporting to there now' % locationId)
elif objectLocation:
self._localTeleportToIdResponse(objectLocation[0], objectLocation[1])
self.notify.debug('destination object %s not found, but location %s given' % (locationId, objectLocation))
else:
self.sendUpdate('requestTargetsLocation', [
int(locationId)])
self.notify.debug('destination object %s not found, querying AI for its location' % locationId)
def _localTeleportToIdResponse(self, objectId, parentId, zoneId):
self.localTeleportId = objectId
if parentId != 0 and zoneId != 0:
if self.cr.doId2do.get(parentId):
localAvatar.setInterest(parentId, zoneId, [
'localTeleportToId'], 'localTeleportToIdInterestAddComplete')
self.acceptOnce('localTeleportToIdInterestAddComplete', self._localTeleportToIdInterestComplete)
self.notify.debug('parent %s of destination object found, setting up interest' % parentId)
else:
self.notify.warning('parent %s of destination object not found, teleport failure' % parentId)
else:
self.failTeleport(parentId, zoneId)
def failTeleport(self, parentId = None, zoneId = None, message = PLocalizer.TeleportToPlayerFailMessage):
self.sendUpdate('requestClearPreventDamage')
fallbackAreaId = localAvatar.getReturnLocation()
if fallbackAreaId != '':
areaDoId = base.cr.uidMgr.getDoId(fallbackAreaId)
self.clearAmInTeleport()
if areaDoId:
destPos = base.cr.activeWorld.getPlayerSpawnPt(areaDoId)
if destPos and self.localTeleportingObj:
self.localTeleportToId(areaDoId, self.localTeleportingObj, destPos)
else:
self.initiateTeleport(PiratesGlobals.INSTANCE_MAIN, 'mainWorld', doEffect = False)
else:
self.initiateTeleport(PiratesGlobals.INSTANCE_MAIN, 'mainWorld', doEffect = False)
self._DistributedTeleportMgr__createDialog(message)
else:
self.notify.warning(" teleport to object (%s %s) AND 'return location' %s failed" % (parentId, zoneId, fallbackAreaId))
def _DistributedTeleportMgr__cleanupDialog(self, value = None):
if self.popupDialog:
self.popupDialog.destroy()
del self.popupDialog
self.popupDialog = None
def _DistributedTeleportMgr__createDialog(self, message):
if message:
popupDialogText = message
if self.popupDialog:
self._DistributedTeleportMgr__cleanupDialog()
self.popupDialog = PDialog.PDialog(text = popupDialogText, style = OTPDialog.Acknowledge, command = self._DistributedTeleportMgr__cleanupDialog)
def _localTeleportToIdInterestComplete(self):
teleportToObj = self.cr.doId2do.get(self.localTeleportId)
if not teleportToObj:
self.sendUpdate('requestTargetsLocation', [
self.localTeleportId])
return None
curParent = localAvatar.getParentObj()
parentIsZoneLOD = isinstance(curParent, ZoneLOD.ZoneLOD)
if parentIsZoneLOD:
localAvatar.leaveZoneLOD(curParent)
try:
isAShip = teleportToObj._isShip()
except AttributeError:
isAShip = False
if isAShip:
if not teleportToObj.isSailable():
self.failTeleport(0, 0, PLocalizer.TeleportToGoneShipFailMessage)
return None
elif teleportToObj.gameFSM.getCurrentOrNextState() in ('InBoardingPosition', 'OtherShipBoarded'):
self.failTeleport(0, 0, PLocalizer.TeleportToBoardingShipFailMessage)
return None
teleportToObj.setZoneLevel(3)
teleportToObj.registerMainBuiltFunction(localAvatar.placeOnShip, [
teleportToObj])
teleportToObj.registerBuildCompleteFunction(teleportToObj.enableOnDeckInteractions)
teleportToObj.registerBuildCompleteFunction(self._localTeleportToIdDone)
base.setLocationCode('Ship')
else:
self.__pendingGoNow.append(False)
goNow = self.__pendingGoNow.pop(0)
self.localTeleport(locationUid=teleportToObj.getUniqueId(), goNow=goNow)
def _localTeleportToIdDone(self):
self.cr.loadingScreen.scheduleHide(base.cr.getAllInterestsCompleteEvent())
curParent = localAvatar.getParentObj()
if isinstance(curParent, ZoneLOD.ZoneLOD):
localAvatar.enterZoneLOD(curParent)
if self.localTeleportCallback:
self.localTeleportCallback()
self.localTeleportId = None
self.localTeleportingObj = None
self.localTeleportCallback = None
self.localTeleportDestPos = None
localAvatar.guiMgr.socialPanel.updateAll()
def disable(self):
DistributedObject.DistributedObject.disable(self)
messenger.send('destroyCrewMatchInvite')
taskMgr.removeTasksMatching('teleportRemoveInterest')
taskMgr.removeTasksMatching('teleportAddInterest')
taskMgr.removeTasksMatching(self.uniqueName('localTeleportPos'))
taskMgr.removeTasksMatching(self.uniqueName('fadeDone'))
self.requestData = None
self.ignoreAll()
if base.cr.teleportMgr == self:
base.cr.teleportMgr = None
requestData = self.requestData
self.requestData = None
if self.teleportQueueProcess:
taskMgr.remove(self.teleportQueueProcess)
def requestTeleport(self, instanceType, instanceName, shardId = 0, locationUid = '', instanceDoId = 0, doneCallback = None, startedCallback = None, gameType = -1, friendDoId = 0, friendAreaDoId = 0, doEffect = True):
self.requestData = ((instanceType, instanceName), {
'shardId': shardId,
'locationUid': locationUid,
'instanceDoId': instanceDoId,
'doneCallback': doneCallback,
'startedCallback': startedCallback,
'gameType': gameType,
'friendDoId': friendDoId,
'friendAreaDoId': friendAreaDoId,
'doEffect': doEffect })
localAvatar.confirmTeleport(self.teleportConfirmation, feedback = True)
def teleportConfirmation(self, confirmed):
if confirmed:
requestData = self.requestData
self.initiateTeleport(*requestData[0], **requestData[0])
locationUid = requestData[1]['locationUid']
base.cr.loadingScreen.showTarget(locationUid)
base.cr.loadingScreen.showHint(locationUid)
self.requestData = None
def requestTeleportToAvatar(self, shardId, instanceDoId, avatarId, avatarParentId):
self.requestTeleport(PiratesGlobals.INSTANCE_MAIN, '', shardId, '', instanceDoId, friendDoId = avatarId, friendAreaDoId = avatarParentId)
def teleportToObjectResp(self, shardId, instanceId, objId, parentId):
self.requestTeleport(PiratesGlobals.INSTANCE_MAIN, '', shardId, '', instanceId, friendDoId = objId, friendAreaDoId = parentId)
def requestTeleportToShip(self, shardId, instanceDoId, shipId):
self.initiateTeleport(PiratesGlobals.INSTANCE_MAIN, '', shardId, '', instanceDoId, friendDoId = 0, friendAreaDoId = shipId)
def requestTeleportToIsland(self, islandUid):
def teleportConfirmation(confirmed, islandUid = islandUid):
self.islandTeleportConfirmation(confirmed, islandUid)
localAvatar.setTeleportFlag(PiratesGlobals.TFNoIslandToken, localAvatar.confirmIslandTokenTeleport, [
islandUid])
localAvatar.setTeleportFlag(PiratesGlobals.TFSameArea, localAvatar.confirmNotSameAreaTeleport, [
islandUid])
localAvatar.confirmTeleport(teleportConfirmation, feedback = True)
localAvatar.clearTeleportFlag(PiratesGlobals.TFNoIslandToken)
localAvatar.clearTeleportFlag(PiratesGlobals.TFSameArea)
def islandTeleportConfirmation(self, confirmed, islandUid):
if confirmed:
islandDoId = self.cr.uidMgr.getDoId(islandUid)
island = self.cr.getDo(islandDoId)
if island and island.getParentObj() is self.cr.activeWorld:
self.localTeleport(locationName = island.getName())
else:
self.sendUpdate('requestTeleportToIsland', [
islandUid])
base.cr.loadingScreen.showTarget(islandUid)
base.cr.loadingScreen.showHint(islandUid)
def teleportToIslandResponse(self, instanceDoId, islandDoId):
if instanceDoId and islandDoId:
self.initiateTeleport(PiratesGlobals.INSTANCE_MAIN, '', self.cr.distributedDistrict.doId, '', instanceDoId, friendAreaDoId = islandDoId)
def stowawayTeleportResponse(self, instanceDoId, islandDoId):
if instanceDoId and islandDoId:
self.initiateTeleport(PiratesGlobals.INSTANCE_MAIN, '', self.cr.distributedDistrict.doId, '', instanceDoId, friendAreaDoId = islandDoId, doEffect = False, stowawayEffect = True)
base.cr.loadingScreen.showTarget(base.cr.doId2do[islandDoId].getUniqueId())
def queryAvatarForTeleport(self, avId):
self.setTeleportQueryId(avId)
def teleportConfirmation(confirmed, avId = avId):
if confirmed:
handle = self.cr.identifyAvatar(avId)
if handle:
shardId = self.cr.distributedDistrict.doId
if not localAvatar.getBandId():
pass
(bandMgr, bandId) = (0, 0)
guildId = localAvatar.getGuildId()
handle.sendTeleportQuery(avId, bandMgr, bandId, guildId, shardId)
localAvatar.confirmTeleport(teleportConfirmation, feedback = True)
def handleAvatarTeleportQuery(self, requesterId, requesterBandMgrId, requesterBandId, requesterGuildId, requesterShardId):
handle = self.cr.identifyAvatar(requesterId)
if not handle:
return None
if self.cr.identifyFriend(requesterId):
if requesterId in localAvatar.ignoreList or self.cr.avatarFriendsManager.checkIgnored(requesterId):
handle.sendTeleportResponse(PiratesGlobals.encodeTeleportFlag(PiratesGlobals.TFIgnore), 0, 0, 0, sendToId = requesterId)
return None
avName = handle.getName()
def confirmed(canTeleportTo, avId, failedFlag, avName = avName):
if canTeleportTo:
if self.cr.getActiveWorld() and self.cr.distributedDistrict and localAvatar.getParentObj():
handle.sendTeleportResponse(PiratesGlobals.TAAvailable, self.cr.distributedDistrict.doId, self.cr.getActiveWorld().doId, localAvatar.getParentObj().doId, sendToId = requesterId)
else:
handle.sendTeleportResponse(PiratesGlobals.encodeTeleportFlag(PiratesGlobals.TFUnavailable), 0, 0, 0, sendToId = requesterId)
elif localAvatar.failedTeleportMessageOk(requesterId):
localAvatar.setSystemMessage(requesterId, OTPLocalizer.WhisperFailedVisit % avName)
handle.sendTeleportResponse(PiratesGlobals.encodeTeleportFlag(failedFlag), 0, 0, 0, sendToId = requesterId)
localAvatar.confirmTeleportTo(confirmed, requesterId, avName, requesterBandMgrId, requesterBandId, requesterGuildId)
def handleAvatarTeleportResponse(self, avId, available, shardId, instanceDoId, areaDoId):
if not avId == self.teleportQueryId:
self.clearTeleportQueryId()
return None
self.clearTeleportQueryId()
handle = self.cr.identifyAvatar(avId)
if handle:
avName = handle.getName()
else:
return None
if available == PiratesGlobals.TAAvailable:
def teleportConfirmation(confirmed, shardId = shardId, instanceDoID = instanceDoId, avId = avId, avatarParentId = areaDoId):
if confirmed:
self.requestTeleportToAvatar(shardId, instanceDoId, avatarId = avId, avatarParentId = areaDoId)
localAvatar.setTeleportFlag(PiratesGlobals.TFSameArea, localAvatar.confirmNotSameAreaTeleportToPlayer, [
areaDoId])
localAvatar.confirmTeleport(teleportConfirmation, feedback = True)
localAvatar.clearTeleportFlag(PiratesGlobals.TFSameArea)
else:
flag = PiratesGlobals.decodeTeleportFlag(available)
if flag == PiratesGlobals.TAIgnore:
pass
1
if flag in PiratesGlobals.TFNoTeleportToReasons:
localAvatar.guiMgr.createWarning(PiratesGlobals.TFNoTeleportToReasons[flag] % avName, duration = 10)
def initiateTeleport(self, instanceType, instanceName, shardId = 0, locationUid = '', instanceDoId = 0, doneCallback = None, startedCallback = None, gameType = -1, friendDoId = 0, friendAreaDoId = 0, doEffect = True, queue = False, stowawayEffect = False):
currInteractive = base.cr.interactionMgr.getCurrentInteractive()
if currInteractive:
currInteractive.requestExit()
if self.cr.activeWorld:
fromInstanceType = self.cr.activeWorld.getType()
else:
fromInstanceType = PiratesGlobals.INSTANCE_NONE
if instanceType not in [
PiratesGlobals.INSTANCE_MAIN,
PiratesGlobals.INSTANCE_WELCOME] and fromInstanceType not in [
PiratesGlobals.INSTANCE_MAIN,
PiratesGlobals.INSTANCE_GENERIC,
PiratesGlobals.INSTANCE_NONE]:
if not base.config.GetBool('can-break-teleport-rules', 0):
import pdb as pdb
pdb.set_trace()
return None
if self.amInTeleport():
if queue:
self.queueInitiateTeleport(instanceType, instanceName, shardId, locationUid, instanceDoId, doneCallback, startedCallback, gameType, friendDoId, friendAreaDoId, doEffect, stowawayEffect)
return None
return None
self.setAmInTeleport()
if instanceType == PiratesGlobals.INSTANCE_MAIN and not locationUid:
locationUid = localAvatar.returnLocation
localAvatar.teleportFriendDoId = friendDoId
self.doEffect = doEffect
self.stowawayEffect = stowawayEffect
self.sendUpdate('initiateTeleport', [
instanceType,
fromInstanceType,
shardId,
locationUid,
instanceDoId,
instanceName,
gameType,
friendDoId,
friendAreaDoId])
self.doneCallback = doneCallback
self.startedCallback = startedCallback
self.teleportInit(instanceType, fromInstanceType, instanceName)
def queueInitiateTeleport(self, instanceType, instanceName, shardId = 0, locationUid = '', instanceDoId = 0, doneCallback = None, startedCallback = None, gameType = -1, friendDoId = 0, friendAreaDoId = 0, doEffect = True, stowawayEffect = False):
teleInfo = [
instanceType,
instanceName,
shardId,
locationUid,
instanceDoId,
doneCallback,
startedCallback,
gameType,
friendDoId,
friendAreaDoId,
doEffect,
stowawayEffect]
self.teleportQueue.append(teleInfo)
def processTeleportQueue(task = None):
if self.amInTeleport():
return Task.again
if not self.teleportQueue:
return Task.done
teleportInfo = self.teleportQueue.pop(0)
self.initiateTeleport(*teleportInfo)
if self.teleportQueue:
return Task.again
return Task.done
self.teleportQueueProcess = taskMgr.doMethodLater(1, processTeleportQueue, 'processTeleportQueue')
def amInTeleport(self):
return localAvatar.testTeleportFlag(PiratesGlobals.TFInTeleport)
def setAmInTeleport(self):
localAvatar.b_setTeleportFlag(PiratesGlobals.TFInTeleport)
localAvatar.b_clearTeleportFlag(PiratesGlobals.TFLookoutJoined)
def clearAmInTeleport(self):
localAvatar.clearTeleportFlag(PiratesGlobals.TFInInitTeleport)
localAvatar.b_clearTeleportFlag(PiratesGlobals.TFInTeleport)
def setTeleportQueryId(self, avId):
self.teleportQueryId = avId
def clearTeleportQueryId(self):
self.teleportQueryId = 0
def initiateTeleportAI(self, instanceType, instanceName):
self.teleportInit(instanceType, instanceName)
def teleportInit(self, instanceType, fromInstanceType, instanceName, gameType = None):
self.clearTeleportQueryId()
self.oldWorld = base.cr.activeWorld
self.instanceType = instanceType
self.fromInstanceType = fromInstanceType
self.instanceName = instanceName
self.gameType = gameType
self.miniLog = MiniLog('TeleportLog')
MiniLogSentry(self.miniLog, 'teleportInit', instanceType, fromInstanceType, instanceName, gameType)
def teleportHasBegun(self, instanceType, fromInstanceType, instanceName, gameType):
if not self.miniLog:
self.miniLog = MiniLog('TeleportLog')
s = MiniLogSentry(self.miniLog, 'teleportHasBegun', instanceType, fromInstanceType, instanceName, gameType)
if self.startedCallback:
self.startedCallback()
self.startedCallback = None
if self.oldWorld == None or self.oldWorld.isEmpty():
self.teleportInit(instanceType, fromInstanceType, instanceName, gameType)
def getRemoveInterestEventName(self):
return self.uniqueName('teleportRemoveInterest')
def getAddInterestEventName(self):
return self.uniqueName('teleportAddInterest')
def forceTeleportStart(self, instanceName, tzDoId, thDoId, worldGridDoId, tzParent, tzZone):
s = MiniLogSentry(self.miniLog, 'forceTeleportStart', instanceName, tzDoId, thDoId, worldGridDoId, tzParent, tzZone)
self.setAmInTeleport()
localAvatar.guiMgr.request('Cutscene')
if not base.transitions.fadeOutActive():
base.transitions.fadeOut()
if self.fromInstanceType == PiratesGlobals.INSTANCE_MAIN:
self.inInstanceType = PiratesGlobals.INSTANCE_MAIN
else:
self.inInstanceType = self.instanceType
if self.fromInstanceType == PiratesGlobals.INSTANCE_PVP:
localAvatar.clearTeleportFlag(PiratesGlobals.TFInPVP)
elif self.fromInstanceType == PiratesGlobals.INSTANCE_TUTORIAL:
localAvatar.clearTeleportFlag(PiratesGlobals.TFInTutorial)
def fadeDone():
base.cr.loadingScreen.show()
s = MiniLogSentry(self.miniLog, 'fadeDone')
curParent = localAvatar.getParentObj()
parentIsZoneLOD = isinstance(curParent, ZoneLOD.ZoneLOD)
if parentIsZoneLOD:
localAvatar.leaveZoneLOD(curParent)
curParent.turnOff()
if self.cr.doId2do.get(tzParent) == None:
self.failTeleport(None, None, PLocalizer.TeleportGenericFailMessage)
else:
self.teleportAddInterestTZ(instanceName, tzDoId, thDoId, worldGridDoId, tzParent, tzZone)
localAvatar.guiMgr.request('Interactive')
taskMgr.removeTasksMatching(self.uniqueName('fadeDone'))
taskMgr.doMethodLater(1, fadeDone, self.uniqueName('fadeDone'), extraArgs = [])
def teleportAddInterestTZ(self, instanceName, tzDoId, thDoId, worldGridDoId, tzParent, tzZone):
s = MiniLogSentry(self.miniLog, 'teleportAddInterestTZ', instanceName, tzDoId, thDoId, worldGridDoId, tzParent, tzZone)
addEvent = self.getAddInterestEventName()
self.accept(addEvent, self.teleportAddInterestCompleteTZ, extraArgs = [
tzDoId,
thDoId,
worldGridDoId])
localAvatar.setInterest(tzParent, tzZone, [
'TZInterest'], addEvent)
self.instanceName = instanceName
def teleportAddInterestCompleteTZ(self, tzDoId, thDoId, worldGridDoId):
s = MiniLogSentry(self.miniLog, 'teleportAddInterestCompleteTZ', tzDoId, thDoId, worldGridDoId)
base.cr.relatedObjectMgr.requestObjects([
tzDoId], eachCallback = lambda param1, param2 = thDoId: self.teleportZoneExists(param1, param2))
def teleportZoneExists(self, teleportZone, thDoId):
s = MiniLogSentry(self.miniLog, 'teleportZoneExists', teleportZone, thDoId)
base.cr.relatedObjectMgr.requestObjects([
thDoId], eachCallback = lambda param1, param2 = teleportZone: self.teleportHandlerExists(param1, param2))
def teleportHandlerExists(self, teleportHandler, teleportZone):
s = MiniLogSentry(self.miniLog, 'teleportHandlerExists', teleportHandler, teleportZone)
teleportHandler.instanceName = self.instanceName
teleportHandler.instanceType = self.instanceType
teleportHandler.doneCallback = self.doneCallback
self.doneCallback = None
teleportHandler.oldWorld = self.oldWorld
self.oldWorld = None
teleportHandler.miniLog = self.miniLog
self.miniLog = None
teleportHandler.startTeleport()
def localAvTeleportFinishedRequest(self, task = None):
if not self.amInTeleport():
messenger.send('localAvTeleportFinished')
def createSpawnInterests(self, parents, callback, destGrid, teleportingObj):
s = MiniLogSentry(self.miniLog, 'createSpawnInterests', parents, callback.__name__, destGrid, teleportingObj)
parentsLen = len(parents)
if self.miniLog:
self.miniLog.appendLine('parents - %s' % (parents,))
self.miniLog.appendLine('destGrid - %s' % (destGrid,))
if parentsLen == 0:
logBlock(2, self.miniLog)
callback(destGrid, teleportingObj)
else:
parentObj = base.cr.doId2do.get(parents[0])
if parentObj:
callback(parentObj, teleportingObj)
elif parentsLen > 2 and parents[2] in base.cr.doId2do:
base.cr.relatedObjectMgr.requestObjects([
parents[0]], eachCallback = lambda param1 = None, param2 = teleportingObj: callback(param1, param2))
localAvatar.setInterest(parents[2], parents[1], [
'instanceInterest'])
elif parentsLen > 2:
parentParentId = parents[2]
parentParentZone = parents[1]
else:
parentParentId = '<None Given>'
parentParentZone = '<None Given>'
parentId = parents[0]
self.notify.warning(('createSpawnInterests: parent %s of parent %s in zone %s ' + 'does not exist locally, aborting teleport') % (parentParentId, parentId, parentParentZone))
self.failTeleport(None, None, PLocalizer.TeleportGenericFailMessage)
def initiateCrossShardDeploy(self, shardId = 0, islandUid = '', shipId = 0, doneCallback = None, startedCallback = None, doEffect = True):
if not islandUid or not shipId:
return None
currInteractive = base.cr.interactionMgr.getCurrentInteractive()
if currInteractive:
currInteractive.requestExit()
if self.cr.activeWorld:
fromInstanceType = self.cr.activeWorld.getType()
else:
fromInstanceType = PiratesGlobals.INSTANCE_NONE
if self.amInTeleport():
return None
self.setAmInTeleport()
self.doEffect = doEffect
self.sendUpdate('requestCrossShardDeploy', [
shardId,
islandUid,
shipId])
self.doneCallback = doneCallback
self.startedCallback = startedCallback
self.teleportInit(PiratesGlobals.INSTANCE_MAIN, fromInstanceType, 'Main World')
def notifyFriendVisit(self, avId):
av = base.cr.identifyAvatar(avId)
if av:
avName = av.getName()
else:
avName = PLocalizer.Someone
localAvatar.setSystemMessage(avId, OTPLocalizer.WhisperComingToVisit % avName)
localAvatar.guiMgr.messageStack.addTextMessage(OTPLocalizer.WhisperComingToVisit % avName, icon = ('friends', None))
| [
"brandoncarden12345@gmail.com"
] | brandoncarden12345@gmail.com |
767c75ec5475f49c3ec0b2ee8035aef2f54e30b0 | 6b1d152e7b236b97819b058e303829e3db2b19be | /prac_02/word_generator.py | 7aafa8f1c5b30c4a6eb106b64d6e85386cc92c87 | [] | no_license | Linda-Kirk/cp1404practicals | 4bbc03fc3fdf497b24165129a3d8aaa37c7fb1ef | d40316a8349a01478124f16be35bdfff53623698 | refs/heads/master | 2020-07-05T12:58:21.816450 | 2019-10-29T00:17:46 | 2019-10-29T00:17:46 | 202,653,504 | 0 | 0 | null | 2019-09-23T08:25:42 | 2019-08-16T03:40:48 | Python | UTF-8 | Python | false | false | 524 | py | """
CP1404/CP5632 - Practical
Random word generator - based on format of words
Another way to get just consonants would be to use string.ascii_lowercase
(all letters) and remove the vowels.
"""
import random
VOWELS = "aeiou"
CONSONANTS = "bcdfghjklmnpqrstvwxyz"
word_format = input("Enter the word format e.g. 'ccvcvvc'(c for consonants, v for vowels):".lower())
word = ""
for kind in word_format:
if kind == "c":
word += random.choice(CONSONANTS)
else:
word += random.choice(VOWELS)
print(word)
| [
"linda.kirk@my.jcu.edu.au"
] | linda.kirk@my.jcu.edu.au |
bd0caf3452ccfe76d5df1c8d98d8f0cb9a1b329a | 384d0be5ac54b306b945cf38c10d9b0a44c975ea | /stack/keystone/keystone/logic/types/biller.py | 00a135b11cbea421c4b431b0ff3dd0914cf9dfc5 | [
"Apache-2.0"
] | permissive | ashokcse/openstack-bill | 05ae313637b3cfecba946d2a9b32e8c7609fc721 | 1a3d7575d4b341f64fa1764ed47e47a7504a9bcc | refs/heads/master | 2021-01-18T14:05:24.696165 | 2012-09-12T11:29:20 | 2012-09-12T11:29:20 | 5,424,267 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 19,643 | py | # Copyright (c) 2010-2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import json
from lxml import etree
from datetime import datetime
from keystone.logic.types import fault
LOG = logging.getLogger('keystone.logic.service')
LOG.info('entering Bill_Unit')
class Bill_Unit(object):
"""class for holding bill unit details!"""
def __init__(self,id=None, vcpu=None, ram=None,
vdisk=None, date=None, changed_on=None, enabled=None):
LOG.info('keystone logic biller __init__ id:%s vcpu:%d ram:%d vdisk:%d date:%s changed on : %s enabled:%d'% ( id, vcpu, ram, vdisk, date, changed_on, enabled))
self.id = id
self.vcpu = vcpu
self.ram = ram
self.vdisk = vdisk
self.date = date
self.changed_on = changed_on
self.enabled = enabled and True or False
@staticmethod
def from_xml(xml_str):
try:
dom = etree.Element("root")
dom.append(etree.fromstring(xml_str))
root = dom.find("{http://docs.openstack.org/identity/api/v2.0}" \
"biller")
if root == None:
raise fault.BadRequestFault("Expecting Bill_Unit")
vcpu = root.get("vcpu")
ram = root.get("ram")
vdisk = root.get("vdisk")
date = root.get("date")
enabled = root.get("enabled")
if not vcpu:
raise fault.BadRequestFault("Expecting Bill_Unit")
elif not vdisk:
raise fault.BadRequestFault("Expecting Bill_Unit vdisk")
enabled = enabled is None or enabled.lower() in ["true", "yes"]
LOG.info('keystone logic biller py from_xml dom id:%d vcpu:%d ram:%d vdisk:%d date:%s enabled:%d'% ( id, vcpu, ram, vdisk, date, enabled))
return Bill_Unit( id, vcpu, ram, vdisk, enabled)
except etree.LxmlError as e:
raise fault.BadRequestFault("Cannot parse Bill_Unit", str(e))
@staticmethod
def from_json(json_str):
LOG.info('keystone logic types biller py from_json before try %s' %json_str)
try:
obj = json.loads(json_str)
if not "biller" in obj:
raise fault.BadRequestFault("Expecting Bill_Unit")
LOG.info('keystone logic types biller py from_json object %s' %obj)
biller = obj["biller"]
LOG.info('keystone logic types biller py from_json biller %s' %biller)
vcpu = biller.get('vcpu', None)
LOG.info('keystone logic types biller py from_json before IF vcpu%s' %vcpu)
if(vcpu == None or vcpu == 0):
raise fault.BadRequestFault("Expecting Bill_Unit")
LOG.info('keystone logic types biller py from_json before ram')
if "ram" in biller:
ram = biller["ram"]
else:
ram = None
LOG.info('keystone logic types biller py from_json afterram')
if "date" in biller:
date = biller["date"]
#date =datetime.strptime(biller["date"], "%Y-%m-%d")
if "changed_on" in biller:
changed_on = biller["changed_on"]
LOG.info('keystone logic types biller py from_json after date : %s created date: %s' %(date, changed_on))
if "vdisk" not in biller:
raise fault.BadRequestFault("Expecting Bill_Unit vdisk")
vdisk = biller["vdisk"]
LOG.info('keystone logic types biller py from_json vdisk : %s ' %vdisk)
if "enabled" in biller:
set_enabled = biller["enabled"]
if not isinstance(set_enabled, bool):
raise fault.BadRequestFault("Bad enabled attribute!")
else:
set_enabled = True
LOG.info('keystone logic types biller py from_json set_enabled : %s ' %set_enabled)
id = biller.get('id', None)
LOG.info('before return id :%s vcpu:%d ram:%d vdisk:%d date:%s enabled:%d'% ( id, vcpu, ram, vdisk, date, set_enabled))
return Bill_Unit(id, vcpu, ram, vdisk, date, changed_on, set_enabled)
except (ValueError, TypeError) as e:
raise fault.BadRequestFault("Cannot parse bill Unit", str(e))
def to_dom(self):
dom = etree.Element("biller",
xmlns="http://docs.openstack.org/identity/api/v2.0")
if self.vdisk:
dom.set("vdisk", unicode(self.vdisk))
if self.ram:
dom.set("ram", unicode(self.ram))
if self.id:
dom.set("id", unicode(self.id))
if self.vcpu:
dom.set("vcpu", unicode(self.vcpu))
if self.date:
dom.set("date", unicode(self.date))
if self.changed_on:
dom.set("created_on", unicode(self.changed_on))
if self.enabled:
dom.set("enabled", unicode(self.enabled).lower())
LOG.info('keystone logic biller py to_ dom id:%d vcpu:%d ram:%d vdisk:%d date:%s changed_on : %s enabled:%d'% ( dom.id, dom.vcpu, dom.ram, dom.vdisk, dom.date, dom.changed_on, dom.enabled))
return dom
def to_xml(self):
return etree.tostring(self.to_dom())
def to_dict(self):
biller = {}
if self.id:
biller["id"] = unicode(self.id)
if self.vcpu:
biller["vcpu"] = unicode(self.vcpu)
if self.ram:
biller["ram"] = unicode(self.ram)
biller["vdisk"] = unicode(self.vdisk)
biller["date"] = unicode(self.date)
biller["changed_on"] = unicode(self.changed_on)
biller["enabled"] = self.enabled
return {'biller':biller}
def to_json(self):
return json.dumps(self.to_dict())
class Instance_Bill(object):
"""class for holding instance bill details!"""
def __init__(self,id=None, name=None, total_vcpu=None, total_ram=None,
total_vdisk=None, changed_on=None, total_cost=None, enabled=None):
LOG.info('keystone logic instance biller __init__ start' )
# LOG.info('keystone logic instance biller __init__ id: name : %s toatl vcpu:%d ram:%d vdisk:%d total_cost:%s changed on : %s enabled:%d'% ( name, total_vcpu, total_ram, total_vdisk, total_cost, changed_on, enabled))
self.id = id
self.name = name
self.total_vcpu = total_vcpu
self.total_ram = total_ram
self.total_vdisk = total_vdisk
self.total_cost = total_cost
self.changed_on = changed_on
self.enabled = enabled and True or False
LOG.info('keystone logic instance biller __init__ end' )
@staticmethod
def from_xml(xml_str):
try:
dom = etree.Element("root")
dom.append(etree.fromstring(xml_str))
root = dom.find("{http://docs.openstack.org/identity/api/v2.0}" \
"biller")
if root == None:
raise fault.BadRequestFault("Expecting Bill_Unit")
total_vcpu = root.get("total_vcpu")
total_ram = root.get("total_ram")
total_vdisk = root.get("total_vdisk")
name = root.get("name")
enabled = root.get("enabled")
if not total_vcpu:
raise fault.BadRequestFault("Expecting Bill_Unit")
elif not total_vdisk:
raise fault.BadRequestFault("Expecting Bill_Unit vdisk")
enabled = enabled is None or enabled.lower() in ["true", "yes"]
LOG.info('keystone logic biller py from_xml dom id:%d vcpu:%d ram:%d vdisk:%d date:%s enabled:%d'% ( id, total_vcpu, total_ram, total_vdisk, name, enabled))
return Bill_Unit( id, name, total_vcpu, total_ram, total_vdisk, enabled)
except etree.LxmlError as e:
raise fault.BadRequestFault("Cannot parse Bill_Unit", str(e))
@staticmethod
def from_json(json_str):
LOG.info('keystone logic types biller py from_json before try %s' %json_str)
try:
obj = json.loads(json_str)
if not "biller" in obj:
raise fault.BadRequestFault("Expecting Bill_Unit")
LOG.info('keystone logic types biller py from_json object %s' %obj)
biller = obj["biller"]
LOG.info('keystone logic types biller py from_json biller %s' %biller)
total_vcpu = biller.get('total_vcpu', None)
LOG.info('keystone lllogic types biller py from_json before IF vcpu%s' %total_vcpu)
if(total_vcpu == None or total_vcpu == 0):
raise fault.BadRequestFault("Expecting Instance_Bill_Unit")
LOG.info('keystone logic types biller py from_json before ram')
if "total_ram" in biller:
total_ram = biller["total_ram"]
else:
total_ram = None
LOG.info('keystone logic types biller py from_json afterram')
if "name" in biller:
name = biller["name"]
#date =datetime.strptime(biller["date"], "%Y-%m-%d")
if "total_cost" in biller:
total_cost = biller["total_cost"]
if "changed_on" in biller:
changed_on = biller["changed_on"]
LOG.info('\n keystone logic types biller py from_json after name : %s created date: %s' %(name, changed_on))
if "total_vdisk" not in biller:
raise fault.BadRequestFault("Expecting Bill_Unit vdisk")
total_vdisk = biller["total_vdisk"]
LOG.info('keystone logic types biller py from_json vdisk : %s ' %total_vdisk)
if "enabled" in biller:
set_enabled = biller["enabled"]
if not isinstance(set_enabled, bool):
raise fault.BadRequestFault("Bad enabled attribute!")
else:
set_enabled = True
LOG.info('keystone logic types biller py from_json set_enabled : %s ' %set_enabled)
id = biller.get('id', None)
LOG.info('before instance bill json return id : %s name :%s total_vcpu:%d total_ram:%d total_vdisk:%d total_cost: %s enabled:%d'% (id, name, total_vcpu, total_ram, total_vdisk, total_cost, set_enabled))
return Instance_Bill(id, name, total_vcpu, total_ram, total_vdisk, changed_on, total_cost, set_enabled)
except (ValueError, TypeError) as e:
raise fault.BadRequestFault("Cannot parse Instance bill ", str(e))
def to_dom(self):
dom = etree.Element("biller",
xmlns="http://docs.openstack.org/identity/api/v2.0")
if self.vdisk:
dom.set("total_vdisk", unicode(self.total_vdisk))
if self.ram:
dom.set("total_ram", unicode(self.total_ram))
if self.id:
dom.set("id", unicode(self.id))
if self.vcpu:
dom.set("total_vcpu", unicode(self.total_vcpu))
if self.date:
dom.set("name", unicode(self.name))
if self.total_cost:
dom.set("total_cost", unicode(self.total_cost))
if self.changed_on:
dom.set("created_on", unicode(self.changed_on))
if self.enabled:
dom.set("enabled", unicode(self.enabled).lower())
LOG.info('keystone logic biller py to_ dom id:%d name :- %s vcpu:%d ram:%d vdisk:%d date:%s changed_on : %s enabled:%d'% ( dom.id, dom.total_vcpu, dom.total_ram, dom.total_vdisk, dom.name, dom.changed_on, dom.enabled))
return dom
def to_xml(self):
return etree.tostring(self.to_dom())
def to_dict(self):
biller = {}
if self.id:
biller["id"] = unicode(self.id)
if self.total_vcpu:
biller["total_vcpu"] = unicode(self.total_vcpu)
if self.total_ram:
biller["total_ram"] = unicode(self.total_ram)
biller["total_vdisk"] = unicode(self.total_vdisk)
biller["name"] = unicode(self.name)
biller["total_cost"] = unicode(self.total_cost)
biller["changed_on"] = unicode(self.changed_on)
biller["enabled"] = self.enabled
return {'biller':biller}
def to_json(self):
return json.dumps(self.to_dict())
#-User Bill----------#
class User_Bill(object):
"""class for holding instance bill details!"""
def __init__(self,id=None, user_id=None, tenant_id=None, total_vcpu=None, total_ram=None,
total_vdisk=None, bill_month=None, total_cost=None, enabled=None):
LOG.info('keystone logic User_Billbiller __init__ start' )
# LOG.info('keystone logic instance biller __init__ id: name : %s toatl vcpu:%d ram:%d vdisk:%d total_cost:%s changed on : %s enabled:%d'% ( name, total_vcpu, total_ram, total_vdisk, total_cost, changed_on, enabled))
self.id = id
self.user_id = user_id
self.tenant_id = tenant_id
self.total_vcpu = total_vcpu
self.total_ram = total_ram
self.total_vdisk = total_vdisk
self.total_cost = total_cost
self.bill_month = bill_month
self.enabled = enabled and True or False
LOG.info('keystone logic User_Bill biller __init__ end' )
@staticmethod
def from_xml(xml_str):
try:
dom = etree.Element("root")
dom.append(etree.fromstring(xml_str))
root = dom.find("{http://docs.openstack.org/identity/api/v2.0}" \
"biller")
if root == None:
raise fault.BadRequestFault("Expecting Bill_Unit")
total_vcpu = root.get("total_vcpu")
total_ram = root.get("total_ram")
total_vdisk = root.get("total_vdisk")
name = root.get("name")
enabled = root.get("enabled")
if not total_vcpu:
raise fault.BadRequestFault("Expecting Bill_Unit")
elif not total_vdisk:
raise fault.BadRequestFault("Expecting Bill_Unit vdisk")
enabled = enabled is None or enabled.lower() in ["true", "yes"]
LOG.info('keystone logic biller py from_xml dom id:%d vcpu:%d ram:%d vdisk:%d date:%s enabled:%d'% ( id, total_vcpu, total_ram, total_vdisk, name, enabled))
return Bill_Unit( id, name, total_vcpu, total_ram, total_vdisk, enabled)
except etree.LxmlError as e:
raise fault.BadRequestFault("Cannot parse Bill_Unit", str(e))
@staticmethod
def from_json(json_str):
LOG.info('keystone logic types User Bill biller py from_json before try %s' %json_str)
try:
obj = json.loads(json_str)
if not "biller" in obj:
raise fault.BadRequestFault("Expecting User_Bill")
LOG.info('keystone logic types biller py from_json object %s' %obj)
biller = obj["biller"]
LOG.info('keystone logic types biller py from_json user_bill %s' %biller)
total_vcpu = biller.get('total_vcpu', None)
LOG.info('keystone lllogic types biller py from_json before IF vcpu%s' %total_vcpu)
if(total_vcpu == None or total_vcpu == 0):
raise fault.BadRequestFault("Expecting User_Bill")
LOG.info('keystone logic types biller py from_json before ram')
if "total_ram" in biller:
total_ram = biller["total_ram"]
else:
total_ram = None
LOG.info('keystone logic types biller py from_json afterram')
if "user_id" in biller:
user_id = biller["user_id"]
#date =datetime.strptime(biller["date"], "%Y-%m-%d")
if "tenant_id" in biller:
tenant_id = biller["tenant_id"]
if "total_cost" in biller:
total_cost = biller["total_cost"]
if "bill_month" in biller:
bill_month = biller["bill_month"]
LOG.info('\n keystone logic types biller py from_json after name : %s created date: %s' %(user_id, bill_month))
if "total_vdisk" not in biller:
raise fault.BadRequestFault("Expecting Bill_Unit vdisk")
total_vdisk = biller["total_vdisk"]
LOG.info('keystone logic types biller py from_json vdisk : %s ' %total_vdisk)
if "enabled" in biller:
set_enabled = biller["enabled"]
if not isinstance(set_enabled, bool):
raise fault.BadRequestFault("Bad enabled attribute!")
else:
set_enabled = True
LOG.info('keystone logic types biller py from_json usr_bill set_enabled : %s ' %set_enabled)
id = biller.get('id', None)
LOG.info('before instance bill json return id : %s user_id :%s tenant_id =%s total_vcpu:%d total_ram:%d total_vdisk:%d total_cost: %s billmonth= %s enabled:%d'% (id, user_id, tenant_id, total_vcpu, total_ram, total_vdisk, total_cost, bill_month, set_enabled))
return User_Bill(id, user_id, tenant_id, total_vcpu, total_ram, total_vdisk, bill_month, total_cost, set_enabled)
except (ValueError, TypeError) as e:
raise fault.BadRequestFault("Cannot parse keystone logic types biller py from_json User bill ", str(e))
def to_dom(self):
dom = etree.Element("biller",
xmlns="http://docs.openstack.org/identity/api/v2.0")
if self.vdisk:
dom.set("total_vdisk", unicode(self.total_vdisk))
if self.ram:
dom.set("total_ram", unicode(self.total_ram))
if self.id:
dom.set("id", unicode(self.id))
if self.vcpu:
dom.set("total_vcpu", unicode(self.total_vcpu))
if self.user_id:
dom.set("user_id", unicode(self.user_id))
if self.tenant_id:
dom.set("tenant_id", unicode(self.tenant_id))
if self.total_cost:
dom.set("total_cost", unicode(self.total_cost))
if self.bill_month:
dom.set("bill_month", unicode(self.bill_month))
if self.enabled:
dom.set("enabled", unicode(self.enabled).lower())
LOG.info('keystone logic biller py to_ dom id:%d user_id :- %s vcpu:%d ram:%d vdisk:%d date:%s changed_on : %s enabled:%d'% ( dom.id, dom.user_id, dom.total_vcpu, dom.total_ram, dom.total_vdisk, dom.bill_month, dom.enabled))
return dom
def to_xml(self):
return etree.tostring(self.to_dom())
def to_dict(self):
biller = {}
if self.id:
biller["id"] = unicode(self.id)
if self.total_vcpu:
biller["total_vcpu"] = unicode(self.total_vcpu)
if self.total_ram:
biller["total_ram"] = unicode(self.total_ram)
biller["user_id"] = unicode(self.user_id)
biller["tenant_id"] = unicode(self.tenant_id)
biller["total_vdisk"] = unicode(self.total_vdisk)
biller["total_cost"] = unicode(self.total_cost)
biller["bill_month"] = unicode(self.bill_month)
biller["enabled"] = self.enabled
return {'biller':biller}
def to_json(self):
return json.dumps(self.to_dict())
| [
"ashokcse@live.com"
] | ashokcse@live.com |
acea4cdf9cddd739a1daddc42cb820e70fe0e59c | 3a18085d011b2dfc2c15ca6eb10838c604ef8a2c | /transform_web_traffic.py | 60b01996ed53bf8eaf3b2b944629e994c5dd01a9 | [] | no_license | ericness/when_i_work_code_challenge | 049f986df9cc9c1de29f502f006e138e119bac70 | 7e67505ebc451138327effd51ec967f200ee9d0a | refs/heads/master | 2021-07-24T06:42:34.133140 | 2017-11-02T20:25:05 | 2017-11-02T20:25:05 | 109,053,886 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,285 | py | import argparse
import io
import re
import pandas as pd
import boto3
import botocore
# parse command args to set run configuration
parser = argparse.ArgumentParser(description=('Transform raw web traffic data '
'into a pivoted table of aggregated time per '
'path by user.'))
parser.add_argument('bucket', type=str,
help='Name of S3 bucket that contains web traffic data')
parser.add_argument('--prefix', type=str,
help='Prefix to filter S3 keys by')
parser.add_argument('--output', type=str,
help='Name of output CSV file')
args = parser.parse_args()
# set configuration variables from command line args
S3_BUCKET_NAME = args.bucket
if args.prefix:
S3_PREFIX = args.prefix
else:
S3_PREFIX = ''
if args.output:
OUTPUT_CSV_NAME = args.output
else:
OUTPUT_CSV_NAME = 'web_traffic.csv'
def clean_web_traffic_data(web_traffic_df, s3_object_name):
"""Return a dataframe with any corrupt data removed and data types
corrected.
web_traffic_df - dataframe with fields path, user_id and length
s3_object_name - name of source file to use in status messages
"""
frame_size = len(web_traffic_df.index)
# check that format of path is valid. remove any invalid rows.
web_traffic_df = web_traffic_df[web_traffic_df['path'].str.
match('^(/[\w-]*)+\s*$') == True]
filter_count = frame_size - len(web_traffic_df.index)
if filter_count != 0:
print(f'{filter_count} rows filtered out of {s3_object_name} because '
f'of invalid path formats.')
frame_size = len(web_traffic_df.index)
# check that all length values are integers
if web_traffic_df['length'].dtype != 'int64':
web_traffic_df = web_traffic_df[web_traffic_df['length'].astype('str').
str.isdigit() == True]
filter_count = frame_size - len(web_traffic_df.index)
if filter_count != 0:
print(f'{filter_count} rows filtered out of {s3_object_name} '
f'because field length is non-integer.')
web_traffic_df['length'] = web_traffic_df['length'].astype(int)
return web_traffic_df
# use the UNSIGNED signature version for anonymous access
s3 = boto3.resource('s3', config=botocore.client.
Config(signature_version=botocore.UNSIGNED))
# set up objects to iterate through list of S3 objects
s3_bucket = s3.Bucket(S3_BUCKET_NAME)
if S3_PREFIX != '':
s3_bucket_objects = s3_bucket.objects.filter(Prefix=S3_PREFIX)
else:
s3_bucket_objects = s3_bucket.objects.all()
# list of dataframes created from the CSV files
web_traffic_list = []
print(f'Getting list of CSV files to process.')
# iterate through CSV files and parse them into dataframes
try:
for s3_obj in s3_bucket_objects:
# only process CSV files
if re.match('.*\.csv$', s3_obj.key):
obj = s3.Object(s3_obj.bucket_name, s3_obj.key)
web_traffic_subset = pd.read_csv(io.BytesIO(obj.get()['Body'].
read()), encoding='utf8')
print(f'Processing file {s3_obj.key}.')
# check structure and contents of dataframe
if set(['user_id', 'path', 'length']).issubset(web_traffic_subset.
columns):
web_traffic_subset = clean_web_traffic_data(web_traffic_subset,
s3_obj.key)
web_traffic_list.append(web_traffic_subset[['user_id', 'path',
'length']])
else:
print(f'Data in file {s3_obj.key} was skipped because it does '
f'not contain fields user_id, path and length.')
except botocore.exceptions.ClientError as e:
print(e.response['Error']['Message'])
exit()
# make sure that at least one file was processed
if len(web_traffic_list) == 0:
print(f'There are no CSV files with the proper structure to process.')
exit()
print(f'All files have been ingested. Beginning data transformation.')
# combine the dataframes from all the files into one large dataframe
web_traffic = pd.concat(web_traffic_list, ignore_index=True)
# aggregate the length of time that each user spent on each path
web_traffic_user_path = web_traffic.groupby(['user_id','path'])['length'].sum()
# pivot the table so that the path names are in columns
web_traffic_user = web_traffic_user_path.reset_index()
web_traffic_user = web_traffic_user.pivot(index='user_id', columns='path',
values='length')
# fill in any missing data with zeros
web_traffic_user = web_traffic_user.fillna(0)
# dtype converts to float when pivoting because of the presence of NaNs.
# convert the data type back to int.
web_traffic_user = web_traffic_user.astype(dtype='int')
print(f'Writing transformed data to file {OUTPUT_CSV_NAME}.')
# output data to specified location
web_traffic_user.to_csv(OUTPUT_CSV_NAME)
print(f'Done!') | [
"ericness@UdemySparkCourse.fpcodpc5vfjurkxi5gs5htsn0d.gx.internal.cloudapp.net"
] | ericness@UdemySparkCourse.fpcodpc5vfjurkxi5gs5htsn0d.gx.internal.cloudapp.net |
bdf4f576aceba31d7d274c2ec7efd61e1f4a337c | 5d48aba44824ff9b9ae7e3616df10aad323c260e | /bfs/127.word_ladder.py | 0e02bffe5c4014c13978aea31a08fd842253ceea | [] | no_license | eric496/leetcode.py | 37eab98a68d6d3417780230f4b5a840f6d4bd2a6 | 32a76cf4ced6ed5f89b5fc98af4695b8a81b9f17 | refs/heads/master | 2021-07-25T11:08:36.776720 | 2021-07-01T15:49:31 | 2021-07-01T15:49:31 | 139,770,188 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,705 | py | """
Given two words (beginWord and endWord), and a dictionary's word list, find the length of shortest transformation sequence from beginWord to endWord, such that:
Only one letter can be changed at a time.
Each transformed word must exist in the word list. Note that beginWord is not a transformed word.
Note:
Return 0 if there is no such transformation sequence.
All words have the same length.
All words contain only lowercase alphabetic characters.
You may assume no duplicates in the word list.
You may assume beginWord and endWord are non-empty and are not the same.
Example 1:
Input:
beginWord = "hit",
endWord = "cog",
wordList = ["hot","dot","dog","lot","log","cog"]
Output: 5
Explanation: As one shortest transformation is "hit" -> "hot" -> "dot" -> "dog" -> "cog",
return its length 5.
Example 2:
Input:
beginWord = "hit"
endWord = "cog"
wordList = ["hot","dot","dog","lot","log"]
Output: 0
Explanation: The endWord "cog" is not in wordList, therefore no possible transformation.
"""
from collections import deque
import string
class Solution:
def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int:
word_set = set(wordList)
q = deque([(beginWord, 1)])
visited = {beginWord}
while q:
word, step = q.popleft()
if word == endWord:
return step
for i in range(len(word)):
for c in string.ascii_lowercase:
new_word = word[:i] + c + word[i + 1 :]
if new_word in word_set and new_word not in visited:
q.append((new_word, step + 1))
visited.add(new_word)
return 0
| [
"eric.mlengineer@gmail.com"
] | eric.mlengineer@gmail.com |
fb37d57c90646fad12d0114c9dd4dd83f5fc02e5 | 0edfa72a67e2b0f0437a9868a9b08ddf22f9fa65 | /venv/Scripts/pip-script.py | bb75b067804392863229bfd405d6645c02f475c1 | [] | no_license | neesmusuns/cracker | 2bc54c61ba19d8547774cd2864aec3ecd22d1469 | 376faf6d5b22b49a68ffdbc5b4b914128b1d5bf0 | refs/heads/master | 2021-02-25T21:36:19.940427 | 2020-03-12T12:34:01 | 2020-03-12T12:34:01 | 245,150,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | #!"C:\Users\Irma Leinerte\Documents\2020spring\systems_security\verkefni1\cracker\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
| [
"31275615+neesmusuns@users.noreply.github.com"
] | 31275615+neesmusuns@users.noreply.github.com |
2e54441258e9589bbbcf8cfd910724f80e61d746 | 966d68245763f12d950efbc39928cbb14655b9d1 | /backend/textManipulation/summarization.py | 05f35fdc3ae8da0ba8c1c697fe038840a8d6ad96 | [] | no_license | wcooper90/ivyhacks2020 | ebd1352465eb364d802f7673af06ffa407758f1f | 684d964a5a352cd78faf11df91c3b1bc08355ee8 | refs/heads/main | 2022-12-25T00:14:34.213824 | 2020-10-04T17:57:53 | 2020-10-04T17:57:53 | 301,185,526 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,453 | py | from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
from sumy.parsers.html import HtmlParser
from sumy.parsers.plaintext import PlaintextParser
from sumy.nlp.tokenizers import Tokenizer
from sumy.summarizers.lsa import LsaSummarizer as Summarizer
from sumy.nlp.stemmers import Stemmer
from sumy.utils import get_stop_words
from sumy.summarizers.luhn import LuhnSummarizer
from sumy.summarizers.edmundson import EdmundsonSummarizer
from sumy.summarizers.text_rank import TextRankSummarizer
from sumy.summarizers.lex_rank import LexRankSummarizer
import nltk
nltk.download('punkt')
from UserInputs import UserInputs
from inputs.article_scraper import url_text_conversion, collect_text
# default summarization for text
def summarize_text(text, num_sentences):
LANGUAGE = "english"
# parser = PlaintextParser.from_file("document.txt", Tokenizer(LANGUAGE))
parser = PlaintextParser.from_string(text, Tokenizer(LANGUAGE))
stemmer = Stemmer(LANGUAGE)
# use text rank as default
summarizer = TextRankSummarizer(stemmer)
summarizer.stop_words = get_stop_words(LANGUAGE)
result = ''
counter = 0
for sentence in summarizer(parser.document, num_sentences):
counter += 1
# if counter > num_sentences / 2:
# break
# print(sentence)
result += ' ' + str(sentence)
title = 'to be implemented'
return result, title
# adjust to summarize speech better
def summarize_speech_transcription(text, num_sentences):
body = text
model = Summarizer()
result = model(body, num_sentences=num_sentences)
result = ''.join(result)
title = 'to be implemented'
return result, title
# summarization for an article
def summarize_article_text(url, num_sentences):
LANGUAGE = "english"
parser = HtmlParser.from_url(url, Tokenizer(LANGUAGE))
# or for plain text files
# parser = PlaintextParser.from_file("document.txt", Tokenizer(LANGUAGE))
# parser = PlaintextParser.from_string("Check this out.", Tokenizer(LANGUAGE))
stemmer = Stemmer(LANGUAGE)
summarizer = Summarizer(stemmer)
summarizer.stop_words = get_stop_words(LANGUAGE)
result = ''
for sentence in summarizer(parser.document, num_sentences):
result += str(sentence)
title = 'to be implemented'
return result, title
| [
"wcooperhockey@gmail.com"
] | wcooperhockey@gmail.com |
09681c3b1e3c1c5978a17a6fe55b3707c490dd3e | 0f58fc4ef95b88bdc12feb3fae6f193d3427d322 | /algorithm_loader.py | 52aacde3de169fb033712d462f6b0eb2a6aa0f24 | [] | no_license | makmanalp/samplerun | 0f0869a0d7f8ccf698cc2f55b3aa824d51e0a971 | 97a68223e76f661ccff998e957f66f9cafdadde6 | refs/heads/master | 2021-03-12T23:56:50.266647 | 2015-01-06T22:41:46 | 2015-01-06T22:41:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,125 | py | import pytest
import os
import imp
import inspect
from base_algorithm import Algorithm
def load_plugins(plugin_folder):
plugins = {}
cwd = os.path.abspath(os.path.curdir)
plugin_folder = os.path.join(cwd, plugin_folder)
folders = [x for x in os.listdir(plugin_folder)
if os.path.isdir(os.path.join(plugin_folder, x))]
for f in folders:
path = os.path.join(plugin_folder, f)
if "__init__.py" in os.listdir(path):
module_info = imp.find_module("__init__", [path])
plugins[f] = module_info
return plugins
@pytest.fixture(params=load_plugins("plugins").items())
def algorithm(request):
module_name, module_info = request.param
module = imp.load_module("__init__", *module_info)
is_algorithm = lambda x:\
inspect.isclass(x)\
and x is not Algorithm\
and issubclass(x, Algorithm)
module_members = inspect.getmembers(module, is_algorithm)
assert len(module_members) == 1, "Looks like your algorithm {0} doesn't\
contain a subclass of Algorithm.".format(module_name)
return module_members[0][1]()
| [
"mali@akmanalp.com"
] | mali@akmanalp.com |
aa98278bf982510809e97f209972b9d3ffecdc03 | 4b191334ac835f99236d51ab6a7857f968250df2 | /utils/add_ipv6.py | bb66dc7c153fcc6c478e5b53b9b056124f043392 | [] | no_license | soutzis/Janus-IPv6 | bfdd1e89260a9d5faf9796e9da836d96fbfc607b | a1079a1f3283bc193597b40f90e998a149ae2781 | refs/heads/master | 2021-07-12T17:55:05.936232 | 2020-06-24T14:30:53 | 2020-06-24T14:30:53 | 168,965,023 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,493 | py | #!/usr/bin/python3.6
from subprocess import call
import argparse
'''
For simplicity, this file is also copied in "usr/local/bin", so that it can be run from any directory
by simply calling: add_ipv6 <hostname> <ipv6>.
The file in "usr/local/bin" is renamed to 'add_ipv6' (no ".py" file extension)
'''
add_ipv6_cmd = "ifconfig h{hostname}-eth0 inet6 add 2000::{ip}/64"
add_ipv6_dist_global = "ifconfig h{hostname}-eth0 inet6 add 200{ip}::{ip}/64"
add_ipv6_custom_cmd = "ifconfig h{hostname}-eth0 inet6 add {ip}/64"
parser = argparse.ArgumentParser(description="Add an IPv6 GUA, to the eth0 interface")
parser.add_argument("hostname",
help="Add the number of the host. e.g: if host is 'h4', enter: 4",
type=int)
mutex = parser.add_mutually_exclusive_group()
# mutex.add_argument("-d","--distinct",
# help="add a different IPv6 GUA for this node",
# action=store_true)
mutex.add_argument("-c", "--custom", help="Add a custom IPv6 GUA.", type=str)
args = parser.parse_args()
# if args.distinct:
# command = add_ipv6_dist_global.format(hostname=args.hostname,
# ip=args.hostname)
if args.custom:
command = add_ipv6_custom_cmd.format(hostname=args.hostname, ip=args.custom)
else:
command = add_ipv6_cmd.format(hostname=args.hostname, ip=args.hostname)
print("Executing command: "+command)
call(command.split(" "))
print("IPv6 address added successfully.")
| [
"noreply@github.com"
] | soutzis.noreply@github.com |
d3238509ecaea8d3e0a51a8943890b4578e5a8da | e3d447a81c5462d2d14201f2bc6b82cdcbbca51a | /chapter10/c10_6_addition.py | af50d5e3378247cb7a726c51df05b727370cecc4 | [] | no_license | barcern/python-crash-course | f6026f13f75ecddc7806711d65bc53cb88e24496 | 8b55775c9f0ed49444becb35b8d529620537fa54 | refs/heads/master | 2023-04-19T17:28:44.342022 | 2021-02-07T23:51:06 | 2021-02-07T23:51:06 | 257,201,280 | 2 | 3 | null | 2021-05-12T17:35:56 | 2020-04-20T07:14:28 | Python | UTF-8 | Python | false | false | 2,214 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Jul 26 12:27:25 2020
@author: barbora
One common problem when prompting for numerical input occurs when people
provide text instead of numbers. When you try to convert the input to an int,
you'll get a ValueError. Write a program that prompts for two numbers.
Add them together and print the result. Catch the ValueError if either input
value is not a number, and print a friendly error message. Test your program
by entering two numbers and then by entering some text instead of a number.
"""
# Option 1 - while loop
# Create a while loop to allow for users to input the two values
flag = True
while flag:
message1 = "Please input the first value to add. To quit, type 'q': "
message2 = "Please input the second value to add. To quit, type 'q': "
value1 = input(message1)
# Exit conditions
if (value1 == 'q'):
print("Ending program")
break
value2 = input(message2)
if (value2 == 'q'):
print("Ending program")
break
# Convert to integer and check for a ValueError
try:
int1 = int(value1)
int2 = int(value2)
except ValueError:
print("Please input two integer values")
else:
result = int1 + int2
print(f"Final result: {result}")
# Option 2 - while loop and function
# Create a function to add two values
def addition(value1, value2):
"""Function to add two integer values, with a ValueError check."""
try:
int1 = int(value1)
int2 = int(value2)
except ValueError:
return("Please input two integer values")
else:
result = int1 + int2
return(f"Final result: {result}")
print(addition(2,3))
# While loop to obtain user input
flag = True
while flag:
message1 = "Please input the first value to add. To quit, type 'q': "
message2 = "Please input the second value to add. To quit, type 'q': "
value1 = input(message1)
# Exit conditions
if (value1 == 'q'):
print("Ending program")
break
value2 = input(message2)
if (value2 == 'q'):
print("Ending program")
break
# Call function
print(addition(value1, value2))
| [
"bcernakova01@gmail.com"
] | bcernakova01@gmail.com |
562d159153258105237dee275a61136e7c194853 | e6dab5aa1754ff13755a1f74a28a201681ab7e1c | /.parts/lib/django-1.3/django/contrib/localflavor/generic/forms.py | b8a111a6b5f57fa81698f292b86258925d561b4a | [] | no_license | ronkagan/Euler_1 | 67679203a9510147320f7c6513eefd391630703e | 022633cc298475c4f3fd0c6e2bde4f4728713995 | refs/heads/master | 2021-01-06T20:45:52.901025 | 2014-09-06T22:34:16 | 2014-09-06T22:34:16 | 23,744,842 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 109 | py | /home/action/.parts/packages/googleappengine/1.9.4/lib/django-1.3/django/contrib/localflavor/generic/forms.py | [
"ron.y.kagan@gmail.com"
] | ron.y.kagan@gmail.com |
f2e46158afebdf251132475b4a4284e808cdffbb | 7205218c520b405f01d5fa1ae5728c9f3071f04d | /Exercises - Module III/EX108 - Formatando moeda().py | 49c953dc8e09077c0b5a0a9b8b4069bbd79bf6f3 | [] | no_license | talesritz/Learning-Python---Guanabara-classes | 164b22ca27158b41e851152257750ac5fcd0cecc | 273a06037e3b283a4e78a3f105c0828ae70bfab0 | refs/heads/master | 2020-05-19T23:43:12.007950 | 2019-05-06T21:44:44 | 2019-05-06T21:44:44 | 185,273,748 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | # Adapte o código do desafio 107, criando uma função adicional chamada moeda() que consiga mostrar os valores como um valor monetário formatado.
from uteis import formata, moedab, validacao
#Versão necessária para o ex108
formata.cabecalho('EX108 - Formatando Moeda()')
tmp = validacao.leiaInt('Digite o preço: ')
print(f'A metade de {moedab.moeda(tmp)} é {moedab.moeda(moedab.metade(tmp))}')
print(f'O dobro de {moedab.moeda(tmp)} é {moedab.moeda(moedab.dobro(tmp))}')
print(f'Aumentando 10%, temos {moedab.moeda(moedab.aumentar(tmp))}')
print(f'Diminuindo 15%, temos {moedab.moeda(moedab.diminuir(tmp))}')
| [
"noreply@github.com"
] | talesritz.noreply@github.com |
02185c94d3eb3432880096e3dc2c60d9712cb52f | b78849c6afe4e2a13e464ee21c3e31758d5d17de | /imagedownloader Web_scraping with gui.py | 8065b39c29080438704e3582843739cb9ff955a5 | [] | no_license | kevalmahajan/Python-Projects | 7c9184d91f1506f87ceb9157d88214b3547f5c17 | 91379c4c159ee30019c6e46164374994855d30b1 | refs/heads/master | 2023-02-14T15:46:52.142843 | 2020-12-23T05:53:12 | 2020-12-23T05:53:12 | 280,148,602 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,122 | py | import os
import requests # to sent GET requests
from bs4 import BeautifulSoup # to parse HTML
from tkinter import *
import tkinter as tk
import traceback
from tkinter import messagebox as m_box
yahoo_img = \
'https://in.images.search.yahoo.com/search/images;_ylt=AwrwJSJD2Q1fTlkATCK8HAx.;_ylc=X1MDMjExNDcyMzAwNARfcgMyBGZyAwRncHJpZAN6VDFjeUl0WlFfLnRqMGU1YlNTTGVBBG5fc3VnZwMxMARvcmlnaW4DaW4uaW1hZ2VzLnNlYXJjaC55YWhvby5jb20EcG9zAzAEcHFzdHIDBHBxc3RybAMEcXN0cmwDNARxdWVyeQNkb2dzBHRfc3RtcAMxNTk0NzQzMTEw?fr2=sb-top-in.images.search&'
user_agent = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive',
}
save_folder = 'images'
#---------------------------------------------------------------------------------
def download_n():
root1 = Tk()
z = Canvas(root1, width=400,height=250)
root1.title("Download n Images")
Label(root1, text="What are you looking for?", fg = "Black",
font = "Verdana 10").place(x=90,y=20)
e1=Entry(root1)
e1.place(x=90,y=50)
Label(root1, text="How many images do you want? ", fg = "Black",
font = "Verdana 10").place(x=90,y=90)
e2=Entry(root1)
e2.place(x=90,y=120)
button4 = tk.Button(root1, text='Download', width=17,
bg="#D3D3D3",fg='black',
command=lambda:download_images(e1,e2))
button4.place(x=90,y=160)
button5 = tk.Button(root1, text='Back', width=10,
bg="#D3D3D3",fg='black',
command=lambda:[root1.destroy(),main()]).place(x=225,y=160)
z.pack()
def download_images(e1,e2):
try:
root1 = Tk()
root1.title("Done")
data=e1.get()
n_images=e2.get()
if data=='' or n_images=='':
root1.withdraw()
m_box.showerror('Error','Please fill both entries ')
else:
data=str(data)
n_images=int(n_images)
# print(data,n_images)
z = Canvas(root1, width=260,height=110)
print('Start searching...')
# get url query string
searchurl = yahoo_img + 'p=' + data
#print(searchurl)
# request url, without user_agent the permission gets denied
response = requests.get(searchurl, headers=user_agent)
html = response.text
soup = BeautifulSoup(html, 'html.parser')
results = soup.find_all('img',class_= 'process',limit=n_images)
# extract the link from the img tag
imagelinks= []
for re in results:
url1=re.attrs.get('data-src')
imagelinks.append(url1)
print(f'found {len(imagelinks)} images')
Label(root1, text=f'found {len(imagelinks)} images', fg = "Black",
font = "Verdana 10").place(x=70,y=20)
print('Start downloading...')
# Label(root1, text="Start downloading...", fg = "Black",
# font = "Verdana 10").pack()
for i, imagelink in enumerate(imagelinks):
# open image link and save as file
response = requests.get(imagelink)
imagename = save_folder + '/' + data + str(i+1) + '.jpg'
with open(imagename, 'wb') as file:
file.write(response.content)
print('Done')
Label(root1, text="DOWNLOADING COMPLETE", fg = "Black",
font = "Verdana 10").place(x=40,y=40)
button5 = tk.Button(root1, text='OK', width=10,
bg="#D3D3D3",fg='black',
command=root1.destroy).place(x=90,y=70)
z.pack()
except ValueError:
root1.withdraw()
m_box.showwarning('Error','Enter a Valid Number')
# print("enter valid number")
# root2 = Tk()
# z = Canvas(root2, width=260,height=110)
# Label(root2, text="Enter a valid Number", fg = "Black",
# font = "Verdana 10").place(x=60,y=30)
# button5 = tk.Button(root2, text='OK', width=10,
# bg="#D3D3D3",fg='black',
# command=root2.destroy).place(x=90,y=70)
#
# z.pack()
#------------------------------------------------------------------------------------
def url_n():
root1 = Tk()
root1.title("Download Image using url")
z = Canvas(root1, width=400,height=250)
Label(root1, text="Enter Url : ", fg = "Black",
font = "Verdana 10").place(x=90,y=20)
e1=Entry(root1,width=35)
e1.place(x=90,y=50)
Label(root1, text="Name of the image to be saved :", fg = "Black",
font = "Verdana 10").place(x=90,y=90)
e2=Entry(root1)
e2.place(x=90,y=120)
button4 = tk.Button(root1, text='Download', width=17,
bg="#D3D3D3",fg='black',
command=lambda:url_images(e1,e2)).place(x=90,y=160)
button5 = tk.Button(root1, text='Back', width=10,
bg="#D3D3D3",fg='black',
command=lambda:[root1.destroy(),main()]).place(x=225,y=160)
z.pack()
def url_images(e1,e2):
try:
root1 = Tk()
root1.title("Done")
z = Canvas(root1, width=260,height=110)
imagelink=e1.get()
data=e2.get()
if imagelink=='' or data=='':
root1.withdraw()
m_box.showerror('Error','Please fill both entries ')
else:
response = requests.get(imagelink)
imagename = save_folder + '/' + data + '.jpg'
with open(imagename, 'wb') as file:
file.write(response.content)
print('Done')
Label(root1, text="IMAGE DOWNLOADED", fg = "Black",
font = "Verdana 10").place(x=60,y=30)
button5 = tk.Button(root1, text='OK', width=10,
bg="#D3D3D3",fg='black',
command=root1.destroy).place(x=90,y=70)
z.pack()
except :
root1.withdraw()
m_box.showwarning('Invalid Url','Enter a Valid URL')
#------------------------------------------------------------------------------------------
def insta_n():
root1 = Tk()
root1.title("Download Instagram Image ")
z = Canvas(root1, width=400,height=250)
Label(root1, text="Enter Instagram Image link : ", fg = "Black",
font = "Verdana 10").place(x=90,y=20)
e1=Entry(root1,width=35)
e1.place(x=90,y=50)
Label(root1, text="Name of the image to be saved :", fg = "Black",
font = "Verdana 10").place(x=90,y=90)
e2=Entry(root1)
e2.place(x=90,y=120)
button4 = tk.Button(root1, text='Download', width=17,
bg="#D3D3D3",fg='black',
command=lambda:insta_images(e1,e2)).place(x=90,y=160)
button5 = tk.Button(root1, text='Back', width=10,
bg="#D3D3D3",fg='black',
command=lambda:[root1.destroy(),main()]).place(x=225,y=160)
z.pack()
def insta_images(e1,e2):
try:
root1 = Tk()
root1.title("Done")
z = Canvas(root1, width=260,height=110)
url=e1.get()
data=e2.get()
if data=='' or n_images=='':
root1.withdraw()
m_box.showerror('Error','Please fill both entries ')
else:
usr_agent = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive',
}
response = requests.get(url, headers=usr_agent)
html = response.text
#soup = BeautifulSoup(html, 'html.parser')
soup = BeautifulSoup(html,'html.parser')
metaTag = soup.find_all('meta', {'property':'og:image'})
imagelink = metaTag[0]['content']
response = requests.get(imagelink)
imagename = save_folder + '/' + data + '.jpg'
with open(imagename, 'wb') as file:
file.write(response.content)
print('Done')
Label(root1, text="IMAGE DOWNLOADED", fg = "Black",
font = "Verdana 10").place(x=60,y=30)
button5 = tk.Button(root1, text='OK', width=10,
bg="#D3D3D3",fg='black',
command=root1.destroy).place(x=90,y=70)
z.pack()
except :
root1.withdraw()
m_box.showwarning('Invalid Instagram Link','Enter a Valid URL')
# print("Invalid Image Url")
# root2 = Tk()
# z = Canvas(root2, width=260,height=110)
# Label(root2, text="Invalid Image Url", fg = "Black",
# font = "Verdana 10").place(x=60,y=30)
# button5 = tk.Button(root2, text='OK', width=10,
# bg="#D3D3D3",fg='black',
# command=root2.destroy).place(x=90,y=70)
#
# z.pack()
def main():
if not os.path.exists(save_folder):
os.mkdir(save_folder)
root = Tk()
root.title("Image Downloader")
w = Canvas(root, width=400,height=250)
Label(root, text="Image Downloader", fg = "Black",
font = "Verdana 14",pady=10,padx=10,bg = "LightGrey").place(x=100,y=20)
button1 = tk.Button(root, text='Download n required images', width=35,
command=lambda: [download_n(),root.destroy()]).place(x=75,y=100)
button2 = tk.Button(root, text='Download via url', width=35,
command=lambda: [url_n(),root.destroy()]).place(x=75,y=140)
button3 = tk.Button(root, text='Download instagram images', width=35,
command=lambda: [insta_n(),root.destroy()]).place(x=75,y=180)
w.pack()
mainloop()
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | kevalmahajan.noreply@github.com |
88850f9c8b1aef4142ac6d51fb5ce192a8482057 | be1e8444482e40df5d02d57964f61cfbd9249f13 | /Django-0.90/django/core/db/backends/postgresql.py | b1b2d9cb52d964e5d1fd6012266dabed23eedd4c | [
"BSD-3-Clause",
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | tungvx/deploy | 9946d4350f5fbc5da25d45505b75384fd40e6088 | 9e1917c6c645b4ce0efe115b0da76027d4bc634c | refs/heads/master | 2021-01-02T09:08:45.691746 | 2011-11-12T19:44:48 | 2011-11-12T19:44:48 | 2,763,145 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,276 | py | """
PostgreSQL database backend for Django.
Requires psycopg 1: http://initd.org/projects/psycopg1
"""
from django.core.db import base, typecasts
import psycopg as Database
DatabaseError = Database.DatabaseError
class DatabaseWrapper:
def __init__(self):
self.connection = None
self.queries = []
def cursor(self):
from django.conf.settings import DATABASE_USER, DATABASE_NAME, DATABASE_HOST, DATABASE_PORT, DATABASE_PASSWORD, DEBUG, TIME_ZONE
if self.connection is None:
if DATABASE_NAME == '':
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured, "You need to specify DATABASE_NAME in your Django settings file."
conn_string = "dbname=%s" % DATABASE_NAME
if DATABASE_USER:
conn_string = "user=%s %s" % (DATABASE_USER, conn_string)
if DATABASE_PASSWORD:
conn_string += " password='%s'" % DATABASE_PASSWORD
if DATABASE_HOST:
conn_string += " host=%s" % DATABASE_HOST
if DATABASE_PORT:
conn_string += " port=%s" % DATABASE_PORT
self.connection = Database.connect(conn_string)
self.connection.set_isolation_level(1) # make transactions transparent to all cursors
cursor = self.connection.cursor()
cursor.execute("SET TIME ZONE %s", [TIME_ZONE])
if DEBUG:
return base.CursorDebugWrapper(cursor, self)
return cursor
def commit(self):
return self.connection.commit()
def rollback(self):
if self.connection:
return self.connection.rollback()
def close(self):
if self.connection is not None:
self.connection.close()
self.connection = None
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.
return '"%s"' % name
def dictfetchone(cursor):
"Returns a row from the cursor as a dict"
return cursor.dictfetchone()
def dictfetchmany(cursor, number):
"Returns a certain number of rows from a cursor as a dict"
return cursor.dictfetchmany(number)
def dictfetchall(cursor):
"Returns all rows from a cursor as a dict"
return cursor.dictfetchall()
def get_last_insert_id(cursor, table_name, pk_name):
cursor.execute("SELECT CURRVAL('%s_%s_seq')" % (table_name, pk_name))
return cursor.fetchone()[0]
def get_date_extract_sql(lookup_type, table_name):
# lookup_type is 'year', 'month', 'day'
# http://www.postgresql.org/docs/8.0/static/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT
return "EXTRACT('%s' FROM %s)" % (lookup_type, table_name)
def get_date_trunc_sql(lookup_type, field_name):
# lookup_type is 'year', 'month', 'day'
# http://www.postgresql.org/docs/8.0/static/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC
return "DATE_TRUNC('%s', %s)" % (lookup_type, field_name)
def get_limit_offset_sql(limit, offset=None):
sql = "LIMIT %s" % limit
if offset and offset != 0:
sql += " OFFSET %s" % offset
return sql
def get_random_function_sql():
return "RANDOM()"
def get_table_list(cursor):
"Returns a list of table names in the current database."
cursor.execute("""
SELECT c.relname
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r', 'v', '')
AND n.nspname NOT IN ('pg_catalog', 'pg_toast')
AND pg_catalog.pg_table_is_visible(c.oid)""")
return [row[0] for row in cursor.fetchall()]
def get_relations(cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
cursor.execute("""
SELECT con.conkey, con.confkey, c2.relname
FROM pg_constraint con, pg_class c1, pg_class c2
WHERE c1.oid = con.conrelid
AND c2.oid = con.confrelid
AND c1.relname = %s
AND con.contype = 'f'""", [table_name])
relations = {}
for row in cursor.fetchall():
try:
# row[0] and row[1] are like "{2}", so strip the curly braces.
relations[int(row[0][1:-1]) - 1] = (int(row[1][1:-1]) - 1, row[2])
except ValueError:
continue
return relations
# Register these custom typecasts, because Django expects dates/times to be
# in Python's native (standard-library) datetime/time format, whereas psycopg
# use mx.DateTime by default.
try:
Database.register_type(Database.new_type((1082,), "DATE", typecasts.typecast_date))
except AttributeError:
raise Exception, "You appear to be using psycopg version 2, which isn't supported yet, because it's still in beta. Use psycopg version 1 instead: http://initd.org/projects/psycopg1"
Database.register_type(Database.new_type((1083,1266), "TIME", typecasts.typecast_time))
Database.register_type(Database.new_type((1114,1184), "TIMESTAMP", typecasts.typecast_timestamp))
Database.register_type(Database.new_type((16,), "BOOLEAN", typecasts.typecast_boolean))
OPERATOR_MAPPING = {
'exact': '=',
'iexact': 'ILIKE',
'contains': 'LIKE',
'icontains': 'ILIKE',
'ne': '!=',
'gt': '>',
'gte': '>=',
'lt': '<',
'lte': '<=',
'startswith': 'LIKE',
'endswith': 'LIKE',
'istartswith': 'ILIKE',
'iendswith': 'ILIKE',
}
# This dictionary maps Field objects to their associated PostgreSQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
DATA_TYPES = {
'AutoField': 'serial',
'BooleanField': 'boolean',
'CharField': 'varchar(%(maxlength)s)',
'CommaSeparatedIntegerField': 'varchar(%(maxlength)s)',
'DateField': 'date',
'DateTimeField': 'timestamp with time zone',
'EmailField': 'varchar(75)',
'FileField': 'varchar(100)',
'FilePathField': 'varchar(100)',
'FloatField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'ImageField': 'varchar(100)',
'IntegerField': 'integer',
'IPAddressField': 'inet',
'ManyToManyField': None,
'NullBooleanField': 'boolean',
'OneToOneField': 'integer',
'PhoneNumberField': 'varchar(20)',
'PositiveIntegerField': 'integer CHECK (%(column)s >= 0)',
'PositiveSmallIntegerField': 'smallint CHECK (%(column)s >= 0)',
'SlugField': 'varchar(50)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
'URLField': 'varchar(200)',
'USStateField': 'varchar(2)',
}
# Maps type codes to Django Field types.
DATA_TYPES_REVERSE = {
16: 'BooleanField',
21: 'SmallIntegerField',
23: 'IntegerField',
25: 'TextField',
869: 'IPAddressField',
1043: 'CharField',
1082: 'DateField',
1083: 'TimeField',
1114: 'DateTimeField',
1184: 'DateTimeField',
1266: 'TimeField',
1700: 'FloatField',
}
| [
"toilatung90@gmail.com"
] | toilatung90@gmail.com |
2cd97cb8b0b9c6e273657a730b6c9cceac772bfc | 694760b87cbf4b72eb2cfc554fe3818f666c81a0 | /source code/hello-world.py | 327a22d8d96d42ef4d2e35959436a7d662703bee | [] | no_license | vipermax/dummy-project | a5aac2860849abf46efdbcddd9a35c361aeb9481 | dd0047fac7ad53e428b1ff7b208d319d79efa853 | refs/heads/master | 2021-01-10T01:30:36.137587 | 2016-03-01T08:58:58 | 2016-03-01T08:58:58 | 52,859,628 | 0 | 0 | null | 2016-03-01T08:57:32 | 2016-03-01T08:09:18 | Python | UTF-8 | Python | false | false | 280 | py | print "Hello World!"
print "Hello Again and again"
print "I like typing this."
print "This is a lot of fun."
print 'Yay! Printing.'
print "I'd much rather you 'not'."
print 'I "said" do not touch this.'
Print "Hey, here is something new!"
print "some udpdate from master branch"
| [
"vipermax@gmail.com"
] | vipermax@gmail.com |
e47f21b19c216ae807692a673b8184880a5aa25d | 51761bbf3e42543687664291dd3a7d3ae9a90fd2 | /utils.py | 68c6c6eba79327b7c718fd4655159cd4dda8850b | [] | no_license | MarcelaBarella/luizalabs_challenge | 03612291e8f89875c1572eb301235bc5b6f7948d | 12b977b6836222bcd7a8d8464a7b840425d2afe2 | refs/heads/master | 2020-03-20T11:43:29.243943 | 2019-11-22T14:59:50 | 2019-11-22T14:59:50 | 137,410,494 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | from datetime import datetime
def str_to_datetime(date):
if type(date) != str:
return date
return datetime.strptime(date, '%d/%m/%Y %H:%M')
| [
"marcela.barella@hotmail.com"
] | marcela.barella@hotmail.com |
fb535040a409105773f5e30f68bd636c8b3931a2 | 307d6435a8da159eede9c233dc14bce29d8af11f | /src/fewer_than_15_siblings.py | 0450dcc4a5c0f7321f333cc894452d9854905de2 | [] | no_license | Ahsan45/SSW-CS555 | 7d82d0f039bfb31cbb775718debfde02615a8ce1 | fc4808884a99d48ff29f122d67c197061102c57c | refs/heads/master | 2021-01-25T06:30:25.675681 | 2017-08-03T00:56:58 | 2017-08-03T00:56:58 | 93,584,540 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 282 | py | #Erik Lim
#SSW 555 Agile Methods for Software Development
'''Module for checking if mthere are fewer than 15 siblings in a family'''
from utils import date_first
def fewer_than_15_siblings(fam):
if 'CHIL' in fam:
return len(fam['CHIL']) < 15
return True | [
"noreply@github.com"
] | Ahsan45.noreply@github.com |
59429ee5ee8ca7e0edd5fe634b9e3e46f97d9c73 | 9bf9ba2ff40e63e87efc67d0d90416b3e839ca3f | /TwoPlayerTicTacToe2.py | 1976094f651cf9f5f5bca70443c4c2911928cc3e | [] | no_license | Muhammad-Ahmed-Mansoor/Python-tic-tac-toe | 243353dda52cad2256633cd979fe989c8fdc5f13 | 79d3619aea057bafab015498de3ae85418cf7889 | refs/heads/master | 2022-11-13T21:57:52.504479 | 2020-07-07T15:48:47 | 2020-07-07T15:48:47 | 277,858,848 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,952 | py | import os;
def boardPrint(): #displays the board
global board
print('\n') #two blank lines for clarity's sake
for i in [7,8,9,4,5,6,1,2,3]:#to match keyboard numpad
if i % 3 !=0:#to avoid '|' at the end of a row
print(board[i],'|',end=' ')
else: #to insert new line at end of row
print (board[i])
if i !=3:
print('---------')#no '--------' after last row
continue
print('\n') #two blank lines for clarity's sake
return
#note that variable move is use twice, each time locally, never globally
def boardManager(move): #makes changes to the board according to given move
global board,currentPlayer
board[move]=currentPlayer
return
def moveInput():
global currentPlayer
move='whatever' #initializing move before while loop
while move not in board :
move=input(currentPlayer+' moves:')
continue
move=int(move)#move is not made int at input time to account for mismatched string inputs
return move
def judge():#returns the state of the match
global board,moveCount
#checking for a win in the rows
for a in range(1,10,3): #a takes values 1,4,7
if board[a]==board[a+1]==board[a+2]:#checks an entire row for equality
return 'win'
continue
#checking for a win in the columns
for b in range(1,4):
if board[b]==board[b+3]==board[b+6]:#checks an entire column for equality
return 'win'
continue
#checking for a win in diagonals
if board[1]==board[5]==board[9] or board[3]==board[5]==board[7]:
return 'win'
#check for draw
if moveCount==9:
return 'draw'
#if no win or draw, match continues
return 'continue'
#main program starts here
while True:#so game plays until user closes
board=[str(i) for i in range(10)]#board[0] to be ignored for simplicity & readibilty. board[1:9] to represent
#the 9 squares of a tic tac toe board.
moveCount=0
#starting the game loop:
while judge()=='continue':
if moveCount %2==0:
currentPlayer='X' #as X goes first so gets even numbered moves
else :
currentPlayer='O'
boardPrint()
boardManager(moveInput())
os.system("cls")
moveCount+=1
continue
boardPrint()
if judge()=='win':
print(currentPlayer+' wins.')
elif judge()=='draw':
print ('Match Drawn')
print()
restart=input('Press enter to restart or type exit to exit.')
if restart=='exit':
break;
os.system("cls")
continue
| [
"noreply@github.com"
] | Muhammad-Ahmed-Mansoor.noreply@github.com |
a13fe0f96eed0b4b55663eed124b9bba9ead6cec | 51ea0825d013e4205a74e288a95cec86b379e6ef | /augmentations.py | 01b80e72b84a38d252970a0cf0355501a1d6c22c | [] | no_license | lucaslu1987/faceboxes | 79d6e1f4d34087825cf81d76a4401e0bc40e77e1 | 7d3a459ad7e98c791ce9ad7b9058329f0663f4e4 | refs/heads/master | 2020-03-29T22:06:24.612806 | 2018-08-24T01:25:31 | 2018-08-24T01:25:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,384 | py | import torch
from torchvision import transforms
import cv2
import numpy as np
import types
from numpy import random
class Compose(object):
"""Composes several augmentations together.
Args:
transforms (List[Transform]): list of transforms to compose.
Example:
>>> augmentations.Compose([
>>> transforms.CenterCrop(10),
>>> transforms.ToTensor(),
>>> ])
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img, boxes=None, labels=None):
for t in self.transforms:
img, boxes, labels = t(img, boxes, labels)
return img, boxes, labels
class ConvertFromInts(object):
def __call__(self, image, boxes=None, labels=None):
return image.astype(np.float32), boxes, labels
class RandomSaturation(object):
def __init__(self, lower=0.5, upper=1.5):
self.lower = lower
self.upper = upper
assert self.upper >= self.lower, "contrast upper must be >= lower."
assert self.lower >= 0, "contrast lower must be non-negative."
def __call__(self, image, boxes=None, labels=None):
if random.randint(2):
image[:, :, 1] *= random.uniform(self.lower, self.upper)
return image, boxes, labels
class RandomHue(object):
def __init__(self, delta=18.0):
assert delta >= 0.0 and delta <= 360.0
self.delta = delta
def __call__(self, image, boxes=None, labels=None):
if random.randint(2):
image[:, :, 0] += random.uniform(-self.delta, self.delta)
image[:, :, 0][image[:, :, 0] > 360.0] -= 360.0
image[:, :, 0][image[:, :, 0] < 0.0] += 360.0
return image, boxes, labels
class RandomLightingNoise(object):
def __init__(self):
self.perms = ((0, 1, 2), (0, 2, 1),
(1, 0, 2), (1, 2, 0),
(2, 0, 1), (2, 1, 0))
def __call__(self, image, boxes=None, labels=None):
if random.randint(2):
swap = self.perms[random.randint(len(self.perms))]
shuffle = SwapChannels(swap) # shuffle channels
image = shuffle(image)
return image, boxes, labels
class ConvertColor(object):
def __init__(self, current='BGR', transform='HSV'):
self.transform = transform
self.current = current
def __call__(self, image, boxes=None, labels=None):
if self.current == 'BGR' and self.transform == 'HSV':
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
elif self.current == 'HSV' and self.transform == 'BGR':
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
else:
raise NotImplementedError
return image, boxes, labels
class RandomContrast(object):
def __init__(self, lower=0.5, upper=1.5):
self.lower = lower
self.upper = upper
assert self.upper >= self.lower, "contrast upper must be >= lower."
assert self.lower >= 0, "contrast lower must be non-negative."
# expects float image
def __call__(self, image, boxes=None, labels=None):
if random.randint(2):
alpha = random.uniform(self.lower, self.upper)
image *= alpha
return image, boxes, labels
class RandomBrightness(object):
def __init__(self, delta=32):
assert delta >= 0.0
assert delta <= 255.0
self.delta = delta
def __call__(self, image, boxes=None, labels=None):
if random.randint(2):
delta = random.uniform(-self.delta, self.delta)
image += delta
return image, boxes, labels
class SwapChannels(object):
"""Transforms a tensorized image by swapping the channels in the order
specified in the swap tuple.
Args:
swaps (int triple): final order of channels
eg: (2, 1, 0)
"""
def __init__(self, swaps):
self.swaps = swaps
def __call__(self, image):
"""
Args:
image (Tensor): image tensor to be transformed
Return:
a tensor with channels swapped according to swap
"""
# if torch.is_tensor(image):
# image = image.data.cpu().numpy()
# else:
# image = np.array(image)
image = image[:, :, self.swaps]
return image
class PhotometricDistort(object):
def __init__(self):
self.pd = [
RandomContrast(),
ConvertColor(transform='HSV'),
RandomSaturation(),
RandomHue(),
ConvertColor(current='HSV', transform='BGR'),
RandomContrast()
]
self.rand_brightness = RandomBrightness()
self.rand_light_noise = RandomLightingNoise()
def __call__(self, image, boxes, labels):
im = image.copy()
im, boxes, labels = self.rand_brightness(im, boxes, labels)
if random.randint(2):
distort = Compose(self.pd[:-1])
else:
distort = Compose(self.pd[1:])
im, boxes, labels = distort(im, boxes, labels)
return self.rand_light_noise(im, boxes, labels)
class SSDAugmentation(object):
def __init__(self):
self.augment = Compose([
ConvertFromInts(),
PhotometricDistort(),
])
def __call__(self, img, boxes, labels):
return self.augment(img, boxes, labels) | [
"609632889@qq.com"
] | 609632889@qq.com |
2317b9612a821152993d2c8d3d77909c6a5d504f | 69266a7696f5f8be7c78fd29ef68a7619e41d28d | /Tools/ComputeTool.py | 9353c424e8d6deac1c49914c31c6768d29dd1ec4 | [] | no_license | microelly2/PyFlowWWW | 52deb54deb2db668cd21e9ce251894baaa663823 | 0b3d0009494327b2ec34af9fbca2a5fee1fef4a4 | refs/heads/master | 2022-04-14T02:35:08.999370 | 2020-04-11T19:48:54 | 2020-04-11T19:48:54 | 254,876,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,464 | py | ## Copyright 2015-2019 Ilgar Lunin, Pedro Cabrera, microelly
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
## http://www.apache.org/licenses/LICENSE-2.0
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
from nine import str
from PyFlow.UI.Tool.Tool import ShelfTool
from PyFlow.Core.Common import Direction
import FreeCADGui
from Qt import QtGui
from Qt.QtWidgets import QFileDialog
from nodeeditor.say import *
import sys
if sys.version_info[0] !=2:
from importlib import reload
import os
RESOURCES_DIR = os.path.dirname(os.path.realpath(__file__)) + "/res/"
class ComputeTool(ShelfTool):
"""docstring for PreviewTool."""
def __init__(self):
super( ComputeTool, self).__init__()
@staticmethod
def toolTip():
return "call compute method for selected nodes"
@staticmethod
def getIcon():
return QtGui.QIcon(RESOURCES_DIR + "compute.png")
@staticmethod
def name():
return str("ComputeTool")
def do(self):
nodes=FreeCAD.PF.graphManager.get().getAllNodes()
nodes2 = sorted(nodes, key=lambda node: node.x)
say("selected Nodes ...")
for n in nodes2:
if n.getWrapper().isSelected():
say(n,n.x)
n.compute()
class DeleteTool(ShelfTool):
"""docstring for PreviewTool."""
def __init__(self):
super( DeleteTool, self).__init__()
@staticmethod
def toolTip():
return "Delete the selected nodes"
@staticmethod
def getIcon():
return QtGui.QIcon(RESOURCES_DIR + "delete.png")
@staticmethod
def name():
return str("DeleteTool")
def do(self):
nodes=FreeCAD.PF.graphManager.get().getAllNodes()
nodes2 = sorted(nodes, key=lambda node: node.x)
say("selected Nodes ...")
for n in nodes2:
if n.getWrapper().isSelected():
say(n,n.x)
n.kill()
class ToyTool(ShelfTool):
"""docstring for PreviewTool."""
def __init__(self):
super( ToyTool, self).__init__()
@staticmethod
def toolTip():
return "Toy for Developer"
@staticmethod
def getIcon():
return QtGui.QIcon(RESOURCES_DIR + "toy.png")
@staticmethod
def name():
return str("ToyTool")
def do(self):
import nodeeditor.dev
reload (nodeeditor.dev)
nodeeditor.dev.run_shelfToy(self)
class FreeCADTool(ShelfTool):
"""docstring for PreviewTool."""
def __init__(self):
super( FreeCADTool, self).__init__()
@staticmethod
def toolTip():
return "FreeCAD mainWindow"
@staticmethod
def getIcon():
return QtGui.QIcon(RESOURCES_DIR + "freecad.png")
@staticmethod
def name():
return str("FreeCADTool")
def do(self):
mw=FreeCADGui.getMainWindow()
mw.hide()
mw.show()
def toollist():
return [
ComputeTool,
DeleteTool,
FreeCADTool,
ToyTool,
]
| [
"thomas@freecadbuch.de"
] | thomas@freecadbuch.de |
8eda4c8d2fd5781128748cfa3f14c23c06229fc3 | 10e19b5cfd59208c1b754fea38c34cc1fb14fdbe | /desktop/core/ext-py/Babel-0.9.6/babel/messages/tests/data/project/ignored/this_wont_normally_be_here.py | f26ddee1f7972ffe1050d7bb17ab8f960c38096a | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | sarvex/hue | 780d28d032edd810d04e83f588617d1630ec2bef | 6e75f0c4da2f3231e19c57bdedd57fb5a935670d | refs/heads/master | 2023-08-15T21:39:16.171556 | 2023-05-01T08:37:43 | 2023-05-01T08:37:43 | 32,574,366 | 0 | 0 | Apache-2.0 | 2023-09-14T16:55:28 | 2015-03-20T09:18:18 | Python | UTF-8 | Python | false | false | 295 | py | # -*- coding: utf-8 -*-
# This file won't normally be in this directory.
# It IS only for tests
from gettext import ngettext
def foo():
# Note: This will have the TRANSLATOR: tag but shouldn't
# be included on the extracted stuff
print ngettext('FooBar', 'FooBars', 1)
| [
"bcwalrus@cloudera.com"
] | bcwalrus@cloudera.com |
c1a174860f449f624c4ea77b9f9327c3ae268a44 | 3f90cf7ddbc7afb9c7b8cf26ffee7f26f75d995d | /setup.py | e1037f1deeb0d503439c3bbc94f48aca0a855761 | [
"MIT"
] | permissive | Znigneering/CSCI-3154 | 0c0f9383dc9f0a42c6f653c3fb450410a4b1a642 | bc318efc73d2a80025b98f5b3e4f7e4819e952e4 | refs/heads/master | 2022-12-24T17:49:17.711622 | 2018-11-27T18:18:28 | 2018-11-27T18:18:28 | 158,028,171 | 0 | 0 | MIT | 2022-12-10T08:41:36 | 2018-11-17T21:54:45 | C | UTF-8 | Python | false | false | 345 | py | from setuptools import setup
setup(
name='PyTPG',
version='0.8',
packages=['tpg'],
license='MIT',
description='Python implementation of Tangled Program Graphs.',
long_description=open('README.md').read(),
author='Ryan Amaral',
author_email='ryan_amaral@live.com',
url='https://github.com/Ryan-Amaral/PyTPG')
| [
"zh676054@dal.ca"
] | zh676054@dal.ca |
2a6df882b37651ba09c0543b3c1661bad7bf365e | fe9f4a9c75ec60cd4245b15164e27161567b43ff | /week3/2-Resolve-with-Functions/prime_digit.py | 8506bc8d70591dc4442261e9465aecd2d51d7144 | [] | no_license | manuelavelinova/Programming0 | 5dc8273edc5c4302de37d48226e1ee7b9a062959 | 56132232ea4321f517af3dd6f0139ee35f00ef15 | refs/heads/master | 2016-09-05T13:40:11.416849 | 2015-03-22T10:02:11 | 2015-03-22T10:02:11 | 31,215,081 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 680 | py | def is_prime(n):
start = 2
isPrime = True
while start < n:
if n % start == 0:
isPrime = False
if n == 1:
isPrime = False
start += 1
if isPrime:
return True
else:
return False
def to_digits(n):
digits = []
while n != 0:
digit = n % 10
digits += [digit]
n = n // 10
return digits
n = input("Enter n: ")
n = int(n)
result = to_digits(n)
primeDigit = False
for res in result:
if is_prime(res):
primeDigit = True
break
if primeDigit:
print("There are prime digits")
else:
print("There are no prime digits")
| [
"manuelavelinova@gmail.com"
] | manuelavelinova@gmail.com |
a29c354c212eb6398363a27ee49432b9ad922189 | 2d93f948ba86742bb493403cf038e76444e58842 | /corona/__main__.py | d6e3d059734327c55aed8eef2b771f3978d857c3 | [] | no_license | antista/sir | debe6f31d0050e77ea6c3836c548d25cba2291fa | 2b9d7c5e6c70d5486e55e209352400bc85b589d8 | refs/heads/master | 2022-12-28T01:40:02.843425 | 2020-10-10T11:41:42 | 2020-10-10T11:41:42 | 292,320,034 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 117 | py | from matplotlib import pylab
from corona.corona import main
if __name__ == '__main__':
main()
pylab.show()
| [
"anti2100@yandex.ru"
] | anti2100@yandex.ru |
ea81a3f2769fe2186891c4edce86d5f3c483d4e5 | 940622a48cc8711a39dd7f36122bae1e25ee2fcc | /QuestionTime/QuestionTime/urls.py | a68ebcf6bfba297eff05f5c23e941b75964ca7f5 | [] | no_license | merveealpay/django-vue-question-app | 144d1f9b49cd1f0cbd91820c2c11cc42ff95a09d | f12c88bdbfcac685b7098145370e13be935c8d8f | refs/heads/main | 2023-02-05T12:58:28.651036 | 2020-12-27T18:05:35 | 2020-12-27T18:05:35 | 319,586,207 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,738 | py | """QuestionTime URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path, re_path
from django_registration.backends.one_step.views import RegistrationView
#look at django-registration documentation!!!
from core.views import IndexTemplateView
from users.forms import CustomUserForm
urlpatterns = [
path('admin/', admin.site.urls),
path("accounts/register/",
RegistrationView.as_view(
form_class=CustomUserForm,
success_url="/",
), name="django_registration_register"),
path("accounts/",
include("django_registration.backends.one_step.urls")),
path("accounts/",
include("django.contrib.auth.urls")),
path("api/",
include("users.api.urls")),
path("api/",
include("questions.api.urls")),
path("api-auth/",
include("rest_framework.urls")),
path("api/rest-auth/",
include("rest_auth.urls")),
path("api/rest-auth/registration/",
include("rest_auth.registration.urls")),
re_path(r"^.*$", IndexTemplateView.as_view(), name="entry-point")
]
| [
"merveealpay@gmail.com"
] | merveealpay@gmail.com |
be45bcb1e674793f5bb4889a3cdcada07a013a45 | 5b71e2952f34dd3bb20148874d952fee06d31857 | /app/mf/crud/migrations/0100_auto_20210206_1820.py | 41f0df779972b165576ab9f2962e9261c1ec7a13 | [] | no_license | isela1998/facebook | a937917cddb9ef043dd6014efc44d59d034102b1 | a0f2f146eb602b45c951995a5cb44409426250c5 | refs/heads/master | 2023-07-18T02:14:50.293774 | 2021-08-28T03:26:06 | 2021-08-28T03:26:06 | 400,613,743 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | # Generated by Django 3.1.1 on 2021-02-06 22:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crud', '0099_debts_rate'),
]
operations = [
migrations.AlterField(
model_name='debts',
name='rate',
field=models.DecimalField(decimal_places=2, default=0.0, max_digits=30, verbose_name='Tasa(Bs.)'),
),
]
| [
"infantefernandezisela@gmail.com"
] | infantefernandezisela@gmail.com |
bcd00e175fe8619264aa97bc0a61bbf04c8d0fc0 | 61a887eaf972bda8839728292147bf923103e8a1 | /representations/explicit.py | 4cf27c5cfa3ff12a6320ead1c90aa30c691999e8 | [] | no_license | soltustik/RHG | 45f05fb215f0e2fbcd1b51b8a44b78ae09454b5b | c94de165285cf06f3d101c316173175328874848 | refs/heads/master | 2023-01-01T02:17:36.041309 | 2020-10-07T10:00:24 | 2020-10-07T10:00:24 | 300,230,313 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,063 | py | import heapq
from scipy.sparse import dok_matrix, csr_matrix
from scipy.stats import logistic
import numpy as np
from representations.matrix_serializer import load_vocabulary, load_matrix
class Explicit:
"""
Base class for explicit representations. Assumes that the serialized input is e^PMI.
"""
def __init__(self, path, normalize=True):
self.wi, self.iw = load_vocabulary(path + '.words.vocab')
self.ci, self.ic = load_vocabulary(path + '.contexts.vocab')
self.m = load_matrix(path)
self.m.data = np.log(self.m.data)
self.normal = normalize
if normalize:
self.normalize()
def normalize(self):
m2 = self.m.copy()
m2.data **= 2
norm = np.reciprocal(np.sqrt(np.array(m2.sum(axis=1))[:, 0]))
normalizer = dok_matrix((len(norm), len(norm)))
normalizer.setdiag(norm)
self.m = normalizer.tocsr().dot(self.m)
def represent(self, w):
if w in self.wi:
return self.m[self.wi[w], :]
else:
return csr_matrix((1, len(self.ic)))
def similarity_first_order(self, w, c):
return self.m[self.wi[w], self.ci[c]]
def similarity(self, w1, w2):
"""
Assumes the vectors have been normalized.
"""
return self.represent(w1).dot(self.represent(w2).T)[0, 0]
def closest_contexts(self, w, n=10):
"""
Assumes the vectors have been normalized.
"""
scores = self.represent(w)
return heapq.nlargest(n, zip(scores.data, [self.ic[i] for i in scores.indices]))
def closest(self, w, n=10):
"""
Assumes the vectors have been normalized.
"""
scores = self.m.dot(self.represent(w).T).T.tocsr()
return heapq.nlargest(n, zip(scores.data, [self.iw[i] for i in scores.indices]))
class PositiveExplicit(Explicit):
"""
Positive PMI (PPMI) with negative sampling (neg).
Negative samples shift the PMI matrix before truncation.
"""
def __init__(self, path, normalize=True, neg=1):
Explicit.__init__(self, path, normalize)
self.m.data -= np.log(neg)
self.m.data[self.m.data < 0] = 0
self.m.eliminate_zeros()
if normalize:
self.normalize()
class BPMI(Explicit):
"""
Binarized PMI
"""
def __init__(self, path, normalize=True, neg=1):
Explicit.__init__(self, path, normalize)
self.m.data -= np.log(neg)
self.m.data[self.m.data < 0] = 0
self.m.data[self.m.data > 0] = 1
self.m.eliminate_zeros()
if normalize:
self.normalize()
class Squashed(Explicit):
"""
Squashed SPMI
"""
def __init__(self, path, normalize=True, neg=1):
Explicit.__init__(self, path, normalize)
self.m.data -= np.log(neg)
self.m.data = logistic.cdf(self.m.data)
self.m.eliminate_zeros()
if normalize:
self.normalize() | [
"noreply@github.com"
] | soltustik.noreply@github.com |
f22af6b6113dc3091f9553766e30977fce309d38 | db5264994305e8c926f89cb456f33bd3a4d64f76 | /Sklep zielarski/account/urls.py | 8f5e4dae0dd33bbbd640d540c02340a153233e68 | [] | no_license | marcinpelszyk/Django | 7842e20d5e8b213c4cd42c421c1db9ab7d5f01d5 | aff2b9bd20e978a22a4a98994bf8424892d3c82f | refs/heads/main | 2023-05-01T19:20:37.267010 | 2021-05-18T17:51:53 | 2021-05-18T17:51:53 | 356,532,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,026 | py | from django.contrib.auth import views as auth_views
from django.urls import path
from django.views.generic import TemplateView
from . import views
from .forms import PwdResetConfirmForm, PwdResetForm, UserLoginForm
app_name = 'account'
urlpatterns = [
path('login/', auth_views.LoginView.as_view(template_name='account/login.html',
form_class=UserLoginForm), name='login'),
path('logout/', auth_views.LogoutView.as_view(next_page='/account/login/'), name='logout'),
path('register/', views.account_register, name='register'),
path('activate/<slug:uidb64>/<slug:token>)/', views.account_activate, name='activate'),
# Reset password
path('password_reset/', auth_views.PasswordResetView.as_view(template_name="account/password_reset/password_reset_form.html",
success_url='password_reset_email_confirm',
email_template_name='account/password_reset/password_reset_email.html',
form_class=PwdResetForm), name='pwdreset'),
path('password_reset_confirm/<uidb64>/<token>', auth_views.PasswordResetConfirmView.as_view(template_name='account/password_reset/password_reset_confirm.html',
success_url='password_reset_complete/',
form_class=PwdResetConfirmForm), name="password_reset_confirm"),
path('password_reset/password_reset_email_confirm/',
TemplateView.as_view(template_name="account/password_reset/reset_status.html"), name='password_reset_done'),
path('password_reset_confirm/MTU/password_reset_complete/',
TemplateView.as_view(template_name="account/password_reset/reset_status.html"), name='password_reset_complete'),
# User dashboard
path('dashboard/', views.dashboard, name='dashboard'),
path('profile/edit/', views.edit_details, name='edit_details'),
path('profile/delete_user/', views.delete_user, name='delete_user'),
path('profile/delete_confirm/', TemplateView.as_view(template_name="account/dashboard/delete_confirm.html"), name='delete_confirmation'),
# Addresses
path('addresses/', views.view_address, name='addresses'),
path("add_address/", views.add_address, name="add_address"),
path("addresses/edit/<slug:id>/", views.edit_address, name="edit_address"),
path("addresses/delete/<slug:id>/", views.delete_address, name="delete_address"),
path("addresses/set_default/<slug:id>/", views.set_default, name="set_default"),
path("user_orders/", views.user_orders, name="user_orders"),
#Favorite list
path('favoritelist/', views.favoritelist, name='favoritelist'),
path('favoritelist/add_to_favoritelist/<int:id>', views.add_to_favoritelist, name='user_favorite'),
]
| [
"marcin.pelszyk90@gmail.com"
] | marcin.pelszyk90@gmail.com |
d107a85fb3ea25bf12a822113f007101ca0d82e5 | be82971799d625de703ad2c58d49d8bbb5b06fab | /TI-DSS-Python-ClientExample.py | bf9598bc77a30357a6e2880f0f13c8aa7ac66f07 | [
"Unlicense"
] | permissive | PML-Trey/TI-DSS-Python | 2f4c9fe9c01979c2d11b371b907d180a0aa3c422 | 1862ff434d2eb0d9ad04f3df03ffe5109218a300 | refs/heads/main | 2023-03-07T12:07:27.787467 | 2021-02-23T13:37:45 | 2021-02-23T13:37:45 | 341,315,758 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,073 | py | import Pyro4
dss = Pyro4.Proxy("PYRONAME:dss") # use name server object lookup uri shortcut
# Setup target configuration
# Populated with a path/file of a target configuraiton made using CCS
dss.setConfig("./targetConfig.ccxml")
# Connect to debugger and configure this debug session
# Populate with the name of the connection and the target cpu
# For instance "Texas Instruments XDS110 USB Debug Probe_0/C28xx_CPU1"
# This returns the session name that is used in all subsequent calls
# that interact with the target device
sessionName = dss.startDebugSession("Connection Name")
print('Connected to debugger')
# Connect to the target CPU using the debugger
dss.connectTarget(sessionName)
print('Connected to targets')
# Program the target device
# Change application.out to be the path to your executable
# This can take a while depending on the device
print('Programming target')
dss.loadProgram(sessionName, "./application.out")
print("Done programming")
# End the debug session and stop the debug server
dss.endDebugSession(sessionName)
dss.stopDebugServer()
| [
"trey@polymorphiclabs.com"
] | trey@polymorphiclabs.com |
3073d91c6b25644a57b79bd4617df05083ecfa66 | 9d1b52e3fa86f0873d9f03b79054273a43896e15 | /source code for BOSSdataset/main.py | ce3a8ec23e1d5a1ecd7a39728b0a36178b820592 | [] | no_license | zhenglisec/DeepKeyStego | 5f4b18e3b190f56aa3faa7833a114290fb09e263 | d40a4661414f21b69f2e2023fda094db668df242 | refs/heads/master | 2021-06-17T08:46:04.034153 | 2021-03-19T15:05:55 | 2021-03-19T15:05:55 | 182,971,396 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,304 | py | from __future__ import print_function
import argparse
import os
import random
import warnings
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from model_boss import SSIM
from model_boss.encoder import Encoder
from model_boss.decoder import syDecoder, asyDecoder
from model_boss.discriminator import Discriminator
from model_boss.pkgenerator import pkgenerator
GPU = '0,1,2,3,4,5,6,7'
os.environ['CUDA_VISIBLE_DEVICES'] = GPU
parser = argparse.ArgumentParser(
description='Pytorch Implement with ImageNet')
parser.add_argument('--type', default='symmeric', help='symmeric or asymmeric')
parser.add_argument('--dataroot', default='/data/lizheng/DATASET/BOSSbase_1.01_20/') #'/data/lizheng/DATASET/BOSSbase_1.01/'
parser.add_argument('--train', type=bool, default=True)
parser.add_argument('--num_epochs', type=int, default=100)
parser.add_argument('--batchsize', type=int, default=32)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--beta', '--list', nargs='+',
default=[0.5, 0.5, 0.03, 0.1])
parser.add_argument('--seed', default=22, type=int,
help='seed for initializing training. ')
parser.add_argument('--secret_len', type=int, default=512*256)
parser.add_argument('--key_len', type=int, default=1024)
args = parser.parse_args()
if torch.cuda.is_available():
cudnn.benchmark = True
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
cudnn.deterministic = True
warnings.warn('You have cho5sen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
print('==> Preparing data..')
transform_train = transforms.Compose([
#transforms.Resize((128, 128)),
transforms.ToTensor(),
])
transform_test = transforms.Compose([
# transforms.Resize((128, 128)),
transforms.ToTensor(),])
trainset = torchvision.datasets.ImageFolder(
root=args.dataroot+'train', transform=transform_train)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=args.batchsize, shuffle=True, num_workers=2, drop_last=True)
testset = torchvision.datasets.ImageFolder(
root=args.dataroot+'test', transform=transform_test)
testloader = torch.utils.data.DataLoader(
testset, batch_size=args.batchsize, shuffle=False, num_workers=2, drop_last=True)
# Adversarial ground truths
valid = torch.cuda.FloatTensor(args.batchsize, 1).fill_(1.0)
fake = torch.cuda.FloatTensor(args.batchsize, 1).fill_(0.0)
best_real_acc, best_wm_acc, best_wm_input_acc = 0, 0, 0
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
train_loss, test_loss = [[], []], [[], []]
train_acc, test_acc = [[], []], [[], []]
# Model
print('==> Building model..')
if args.type == 'symmeric':
Decoder = syDecoder(sec_len = args.secret_len, output_function=nn.Sigmoid)
elif args.type == 'asymmeric':
Decoder = asyDecoder(sec_len = args.secret_len, output_function=nn.Sigmoid)
Pkgenerator = pkgenerator()
Encoder = Encoder(sec_len = args.secret_len)
Discriminator = Discriminator()
Encoder = nn.DataParallel(Encoder.cuda())
Decoder = nn.DataParallel(Decoder.cuda())
Discriminator = nn.DataParallel(Discriminator.cuda())
if args.type == 'asymmeric':
Pkgenerator = nn.DataParallel(Pkgenerator.cuda())
# loss function
criterionE_mse = nn.MSELoss().cuda()
criterionE_ssim = SSIM().cuda()
criterionD = nn.L1Loss().cuda()
optimizerE = optim.Adam(Encoder.parameters(), lr=args.lr, betas=(0.5, 0.999))
optimizerD = optim.Adam(Decoder.parameters(), lr=args.lr, betas=(0.5, 0.999))
criterionDis = nn.BCEWithLogitsLoss().cuda()
optimizerDis = optim.Adam(Discriminator.parameters(), lr=args.lr, betas=(0.5, 0.999))
if args.type == 'asymmeric':
optimizerPkGen = optim.Adam(Pkgenerator.parameters(), lr=args.lr, betas=(0.5, 0.999))
print(Encoder)
print(Decoder)
print(Discriminator)
def train(epoch):
print('\nEpoch: %d' % epoch)
Encoder.train()
Decoder.train()
Discriminator.train()
if args.type == 'asymmeric':
Pkgenerator.train()
for batch_idx, (input, _) in enumerate(trainloader):
input = input.cuda()[:, 0:1, :, :]
messages = torch.from_numpy(np.random.randint(2, size=(args.batchsize, args.secret_len))).float().cuda()
skey = torch.from_numpy(np.random.randint(2, size=(args.batchsize, args.key_len))).float().cuda() # secrect key
if args.type == 'asymmeric':
pkey = Pkgenerator(skey)
#############optimize Discriminator##############
optimizerDis.zero_grad()
if args.type == 'symmeric':
stego = Encoder(input, messages, skey)
elif args.type == 'asymmeric':
stego = Encoder(input, messages, pkey)
stego_dis_output = Discriminator(stego.detach())
real_dis_output = Discriminator(input)
loss_D_stego = criterionDis(stego_dis_output, fake)
loss_D_real = criterionDis(real_dis_output, valid)
loss_D = loss_D_stego + loss_D_real
loss_D.backward()
optimizerDis.step()
################optimize Encoder Decoder or Pkgenerator#############
optimizerE.zero_grad()
optimizerD.zero_grad()
if args.type == 'symmeric':
decoded_messages = Decoder(stego, skey)
elif args.type == 'asymmeric':
optimizerPkGen.zero_grad()
decoded_messages = Decoder(stego, pkey, skey)
stego_dis_output = Discriminator(stego)
loss_mse = criterionE_mse(input, stego)
loss_ssim = criterionE_ssim(input, stego)
loss_adv = criterionDis(stego_dis_output, valid)
loss_message = criterionD(decoded_messages, messages)
loss_H = args.beta[0] * loss_mse + args.beta[1] * \
(1 - loss_ssim) + args.beta[2] * loss_adv + args.beta[3] * loss_message
loss_H.backward()
optimizerE.step()
optimizerD.step()
if args.type == 'asymmeric':
optimizerPkGen.step()
decoded_rounded = torch.round(decoded_messages.detach())
bitwise_avg_correct = torch.sum(torch.eq(messages, decoded_rounded))/args.batchsize
print('[%d/%d][%d/%d] Loss D: %.4f () Loss_H: %.4f (mse: %.4f ssim: %.4f adv: %.4f) bitcorrect: %.4f' % (
epoch, args.num_epochs, batch_idx, len(trainloader),
loss_D.item(), loss_H.item(), loss_mse.item(
), loss_ssim.item(), loss_adv.item(), bitwise_avg_correct))
def test(epoch):
Encoder.eval()
Decoder.eval()
if args.type == 'asymmeric':
Pkgenerator.eval()
with torch.no_grad():
for batch_idx, (input, _) in enumerate(testloader):
input = input.cuda()[:, 0:1, :, :]
messages = torch.from_numpy(np.random.randint(2, size=(args.batchsize, args.secret_len))).float().cuda()
skey = torch.from_numpy(np.random.randint(2, size=(args.batchsize, args.key_len))).float().cuda()
if args.type == 'symmeric':
stego = Encoder(input, messages, skey)
decoded_messages = Decoder(stego, skey)
save_img = 'results/symmeric.pgm'
if args.type == 'asymmeric':
pkey = Pkgenerator(skey)
stego = Encoder(input, messages, pkey)
decoded_messages = Decoder(stego, pkey, skey)
save_img = 'results/asymmeric.pgm'
decoded_rounded = torch.round(decoded_messages.detach())#.cpu().numpy().round().clip(0, 1)
bitwise_avg_correct = torch.sum(torch.eq(messages, decoded_rounded))/args.batchsize
concat_img = torch.cat([input[0:10], stego[0:10]], dim=0)
torchvision.utils.save_image(concat_img, save_img, nrow=10, padding=0)
print('BitCorrect: %.4f' % (bitwise_avg_correct))
for epoch in range(args.num_epochs):
test(epoch)
train(epoch)
| [
"zhenglisec@gmail.com"
] | zhenglisec@gmail.com |
e136760c66ba06b8f27043bc427a323157a0c0a0 | a0e4e123e5eb5f91eb5edd7d6d6bac268ca43c22 | /holistic.py | 0c4512029023a4b59d23865638c1ddf4746531f3 | [] | no_license | borodedamie/pose-python | eb50b322d1a327a88b3c851b7c1f650eb1a4d67f | 728135c4de033aeec5d2fcf4c3fc98e1dc4de56f | refs/heads/main | 2023-08-21T13:06:51.120094 | 2021-10-05T06:06:00 | 2021-10-05T06:06:00 | 412,929,437 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,336 | py | import cv2
import mediapipe as mp
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
mp_holistic = mp.solutions.holistic
cap = cv2.VideoCapture(0)
with mp_holistic.Holistic(
min_detection_confidence=0.5,
min_tracking_confidence=0.5) as holistic:
while cap.isOpened():
success, image = cap.read()
if not success:
print("Ignoring empty camera frame.")
# If loading a video, use 'break' instead of 'continue'.
continue
image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
image.flags.writeable = False
results = holistic.process(image)
# Draw landmark annotation on the image.
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
mp_drawing.draw_landmarks(
image,
results.face_landmarks,
mp_holistic.FACEMESH_CONTOURS,
landmark_drawing_spec=None,
connection_drawing_spec=mp_drawing_styles
.get_default_face_mesh_contours_style())
mp_drawing.draw_landmarks(
image,
results.pose_landmarks,
mp_holistic.POSE_CONNECTIONS,
landmark_drawing_spec=mp_drawing_styles
.get_default_pose_landmarks_style())
cv2.imshow('MediaPipe Holistic', image)
if cv2.waitKey(5) & 0xFF == 27:
break
cap.release() | [
"opeoluborode@yahoo.com"
] | opeoluborode@yahoo.com |
540a36640e92a49cebfdc1d2114b07b6d1012de4 | 90909fe5a9f9fdf65bd5b1e7374f5eee0afad325 | /python-data-science-toolbox-part-2/generator-objects-1.py | 37ae50931b731cffcfbe0e6f9149a0110aec0ef7 | [] | no_license | fejesgergorichard/DataCamp | c4a75ecb2f97347c87b55357ac915fd3c1cd7a7f | 5307016e4b7da9569e08d5923a9f6e1283da6c65 | refs/heads/master | 2022-09-05T12:37:37.082755 | 2020-05-18T17:22:44 | 2020-05-18T17:22:44 | 250,319,954 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 572 | py | # A generator object is similar to a list comprehension, but it is defined by ( ) , not by [ ]
# This object, unlike the list comprehensions, does not create the list. It produces a generator
# The generator can be called as an iterable, and generates the required values on the fly, thus saving memory
# Create generator object: result
result = (num for num in range(31))
# Print the first 5 values
print(next(result))
print(next(result))
print(next(result))
print(next(result))
print(next(result))
# Print the rest of the values
for value in result:
print(value)
| [
"fejesgergorichard@gmail.com"
] | fejesgergorichard@gmail.com |
a55f59281307acfcc9afc41d05c3550c1e1f0745 | f77e219f6ab6794c8c52bcb06a936da02b381398 | /libs/rl_salk/agents/sarsa_learner.py | 25cd9e3adbd157433c7fa1fd7d9d318a67fa587f | [
"MIT"
] | permissive | rl-salk/rl-salk | 96a5608e66134f8d5d305d30769d15f0ea372aad | 2e63020fc76c81f863052ccce749353644e2fc9e | refs/heads/master | 2020-06-24T05:21:02.937984 | 2019-08-08T02:14:59 | 2019-08-08T02:14:59 | 198,861,132 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | from rl_salk.agents.td_learner import TDLearner
class SarsaLearner(TDLearner):
def learn(self, prev_state, action, state, reward, done):
super().learn(prev_state, action, state, reward, done,
target_policy='behavior')
| [
"daniel.jaffe.butler@gmail.com"
] | daniel.jaffe.butler@gmail.com |
1621790e8faa136dc64077fdd7cd47ca87f200ae | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/85/usersdata/228/54502/submittedfiles/funcoes1.py | 881c334b3cea4bdaa4890004b8352ae9eab83fdf | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,624 | py | # -*- coding: utf-8 -*-
n=int(input('digite o número de elementos:'))
lista1=[]
lista2=[]
lista3=[]
for i in range(0,n,1):
elemento1=int(input('digite o elemento:'))
lista1.append(elemento1)
elemento2=int(input('digite o elemento:'))
lista2.append(elemento2)
elemento3=int(input('digite o elemento:'))
lista3.append(elemento3)
def crescente(a):
for i in range(0,len(a),1):
if a[i]<a[i+1]:
return True
else:
return False
def decrescente(a):
for i in range(0,len(a),1):
if a[i]>a[i+1]:
return True
else:
return False
def elementosiguais(a):
for i in range(0,len(a),1):
if a[i]==a[i+1]:
return True
else:
return False
if crescent(lista1):
print('S')
if crescent(lista1)==False:
print('N')
if decrescente(lista1):
print('S')
if decrescente(lista1)==False:
print('N')
if elementosiguais(lista1):
print('S')
if elementosiguais(lista1)==False:
print('N')
if crescent(lista2):
print('S')
if crescent(lista2)==False:
print('N')
if decrescente(lista2):
print('S')
if decrescente(lista2)==False:
print('N')
if elementosiguais(lista2):
print('S')
if elementosiguais(lista2)==False:
print('N')
if crescent(lista3):
print('S')
if crescent(lista3)==False:
print('N')
if decrescente(lista3):
print('S')
if decrescente(lista3)==False:
print('N')
if elementosiguais(lista3):
print('S')
if elementosiguais(lista3)==False:
print('N')
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
fb1e1b39572b14702c97b0a9db81638e716cea2e | 4be8f1143abc8e585cc5c751984b9d861a9254dc | /map/migrations/0005_auto_20180212_2334.py | 14a88da48d451e0ddb22e63348d9aae08ffee250 | [] | no_license | jacksty/West-Marches-Website | b6ec14625a7c534e83008d47a22082a50050ec07 | f00223dace7f1eb2c3013265856a5c62decf7be1 | refs/heads/master | 2021-04-27T10:44:56.511037 | 2018-02-26T06:38:04 | 2018-02-26T06:38:04 | 122,544,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | # Generated by Django 2.0.2 on 2018-02-13 04:34
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('map', '0004_edge_map'),
]
operations = [
migrations.RenameField(
model_name='edge',
old_name='node_from',
new_name='source',
),
migrations.RenameField(
model_name='edge',
old_name='node_to',
new_name='target',
),
]
| [
"jacksty@gmx.com"
] | jacksty@gmx.com |
6ef9ff461f84c97d055693cb74ea084ecc008185 | 7173b2d4b647263449174a1c1acd326ee0d85467 | /certbot-dns-ovh/docs/conf.py | 57194666ec5ed0e6633a0bb7e95e43796aefd3f9 | [
"MIT",
"Apache-2.0"
] | permissive | robstradling/certbot | 0ee92d2f362d69342900a6be5e19175666bbab58 | d0f1a3e205902f15b9608ef514cc1f0685da25ea | refs/heads/master | 2020-06-01T04:44:20.728356 | 2019-06-05T21:51:17 | 2019-06-05T21:51:17 | 190,642,181 | 4 | 0 | NOASSERTION | 2019-06-06T20:02:01 | 2019-06-06T20:02:01 | null | UTF-8 | Python | false | false | 5,779 | py | # -*- coding: utf-8 -*-
#
# certbot-dns-ovh documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 12 10:14:31 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode']
autodoc_member_order = 'bysource'
autodoc_default_flags = ['show-inheritance', 'private-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'certbot-dns-ovh'
copyright = u'2018, Certbot Project'
author = u'Certbot Project'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0'
# The full version, including alpha/beta/rc tags.
release = u'0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
default_role = 'py:obj'
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# http://docs.readthedocs.org/en/latest/theme.html#how-do-i-use-this-locally-and-on-read-the-docs
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'certbot-dns-ovhdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'certbot-dns-ovh.tex', u'certbot-dns-ovh Documentation',
u'Certbot Project', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'certbot-dns-ovh', u'certbot-dns-ovh Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'certbot-dns-ovh', u'certbot-dns-ovh Documentation',
author, 'certbot-dns-ovh', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/', None),
'acme': ('https://acme-python.readthedocs.org/en/latest/', None),
'certbot': ('https://certbot.eff.org/docs/', None),
}
| [
"bmw@users.noreply.github.com"
] | bmw@users.noreply.github.com |
36ff8d5e5b50b26b9b5c7383e666b8d5b76ec9ec | 90bd88fbc3676551432d4e4f1ad64260e1d62573 | /Data_processing/feature_extraction.py | 8bb08b7e2599bb30089fa5f215fbb527ea21caca | [] | no_license | LeanneNortje/MultimodalSpeech-to-ImageMatching | 648d009dbbb7a2f6c127e31a15193ab989a58998 | a0ce92dc95d5052fbcd53a9a41cd3b6020345f9d | refs/heads/master | 2023-04-28T10:11:45.132063 | 2021-04-26T13:36:32 | 2021-04-26T13:36:32 | 216,018,780 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,895 | py | #!/usr/bin/env python
#_________________________________________________________________________________________________
#
# Author: Leanne Nortje
# Year: 2020
# Email: nortjeleanne@gmail.com
#_________________________________________________________________________________________________
#
# This sets up the feature extraction of a spesific dataset and all the information required and
# associated with this dataset.
#
from datetime import datetime
from os import path
import argparse
import glob
import numpy as np
import os
from os import path
from scipy.io import wavfile
import matplotlib.pyplot as plt
import logging
import tensorflow as tf
import subprocess
import sys
from tqdm import tqdm
sys.path.append("..")
from paths import data_path
data_path = path.join("..", data_path)
import speech_library
#_____________________________________________________________________________________________________________________________________
#
# Argument function
#
#_____________________________________________________________________________________________________________________________________
def arguments():
parser = argparse.ArgumentParser()
parser.add_argument(
"--dataset", type=str, choices=["buckeye", "TIDigits"],
default="buckeye"
)
parser.add_argument(
"--feat_type", type=str, choices=["fbank", "mfcc"],
default="fbank"
)
return parser.parse_args()
#_____________________________________________________________________________________________________________________________________
#
# Main
#
#_____________________________________________________________________________________________________________________________________
def dataset_library(args):
dataset_lib = {}
if args.dataset == "buckeye":
dataset_lib["feats_type"] = args.feat_type
dataset_lib["dataset"] = args.dataset
dataset_lib["out_dir"] = args.dataset
dataset_lib["wavs"] = path.join(data_path, args.dataset, "*", "*.wav")
dataset_lib["vads"] = path.join(data_path, dataset_lib["dataset"], "english.wrd")
dataset_lib["training_speakers_path"] = path.join(data_path, dataset_lib["dataset"], "devpart1_speakers.list")
dataset_lib["validation_speakers_path"] = path.join(data_path, dataset_lib["dataset"], "devpart2_speakers.list")
dataset_lib["testing_speakers_path"] = path.join(data_path, dataset_lib["dataset"], "zs_speakers.list")
dataset_lib["labels_to_exclude"] = ["SIL", "SPN"]
dataset_lib["include_labels"] = True
dataset_lib["labels_given"] = True
dataset_lib["extract_words_or_not"] = True
elif args.dataset == "TIDigits":
dataset_lib["feats_type"] = args.feat_type
dataset_lib["dataset"] = args.dataset
dataset_lib["out_dir"] = args.dataset
dataset_lib["wavs"] = path.join(data_path, args.dataset, "tidigits_wavs", "*", "*", "*","*.wav")
dataset_lib["vads"] = path.join(data_path, dataset_lib["dataset"], "tidigits_fa", "words.wrd")
dataset_lib["training_speakers_path"] = path.join(data_path, dataset_lib["dataset"], "tidigits_fa", "train_speakers.list")
dataset_lib["validation_speakers_path"] = path.join(data_path, dataset_lib["dataset"], "tidigits_fa", "val_speakers.list")
dataset_lib["testing_speakers_path"] = path.join(data_path, dataset_lib["dataset"], "tidigits_fa", "test_speakers.list")
dataset_lib["labels_to_exclude"] = []
dataset_lib["include_labels"] = True
dataset_lib["labels_given"] = True
dataset_lib["extract_words_or_not"] = True
return dataset_lib
def main():
args = arguments()
lib = dataset_library(args)
feats = speech_library.extract_features(lib)
speech_library.extract_segments(feats, lib)
if __name__ == "__main__":
main() | [
"nortjeleanne@gmail.com"
] | nortjeleanne@gmail.com |
2b63fb46758a1f007ae3ed5ce851d0c3a99bb6e0 | f5788e1e1d8522c0d4ae3b4668faa5537680cb07 | /mutual_sale_discount_total/__openerp__.py | 55acff682f3f211f074ab7660a836cc839f366de | [] | no_license | mutualSecurity/mutual-erp-residential | 8549e179af6df1ffceadf42369d69d4dd44f07ac | 88debefc662dd1510a1d52a877ede4673c319532 | refs/heads/master | 2021-11-11T13:33:37.878051 | 2021-11-02T10:14:49 | 2021-11-02T10:14:49 | 71,433,705 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,007 | py | {
'name': 'Sale Discount on Total Amount',
'version': '1.0',
'category': 'sale',
'sequence': 6,
'summary': "Discount on total in Sale and invoice with Discount limit and approval",
'author': 'Cybrosys Techno Solutions',
'company': 'Cybrosys Techno Solutions',
'website': 'http://www.cybrosys.com',
'description': """
Sale Discount for Total Amount
=======================
Module to manage discount on total amount in Sale.
as an specific amount or percentage
""",
'depends': ['sale','mutual_sales', 'base', 'stock','mutual_inventory','mutual_reports','mutual_followups','mutual_project','mutual_mass_editing'],
'data': [
'views/sale_view.xml',
'views/account_invoice_view.xml',
'views/invoice_report.xml',
'views/sale_order_report.xml',
'views/sale_discount_approval_view.xml',
'views/sale_discount_approval_workflow.xml'
],
'demo': [
],
'installable': True,
'auto_install': False,
}
| [
"pk_bscs@yahoo.com"
] | pk_bscs@yahoo.com |
3a872299b9b73a04afddd47ddc4dda9480c8f34e | f9c12c1c04b51ec62d7d671c02eb471a0afaccda | /tugas1/server3.py | ae2f0a360bf436a78f36b210dbfe282764f61578 | [] | no_license | bastianf19/progjar-b-its-2020 | dc92dbeb980f2c2391232626e4a65941978530c2 | 95405279b8de26c5d89cc39f0b360c7c0a78fb2a | refs/heads/master | 2020-12-27T23:03:50.153132 | 2020-04-24T09:53:08 | 2020-04-24T09:53:08 | 238,096,160 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 747 | py | import sys
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = ('localhost', 31002)
print(sys.stderr, 'starting up on %s port %s' % server_address)
sock.bind(server_address)
sock.listen(1)
while True:
print(sys.stderr, 'waiting for a connection')
connection, client_address = sock.accept()
print(sys.stderr, 'connection from', client_address)
while True:
data = connection.recv(64).decode()
print(sys.stderr, 'received "%s"' % data)
if data:
print(sys.stderr, 'sending data back to the client')
connection.sendall(data.encode())
else:
print(sys.stderr, 'no more data from', client_address)
break
connection.close()
| [
"bastian.farandy@gmail.com"
] | bastian.farandy@gmail.com |
b59262788ee519c9e2a4555e7cb75382fba2da3d | ca920a476e43b68d6d041fb5af098cecf2dbbbd0 | /py-list-vulnerabilities/smartcheck.py | 6c73fdeb20c2d2f5c4cbf17a130a430a41b7c19e | [
"Apache-2.0"
] | permissive | felipecosta09/smartcheck-samples | 8b74a8773bfb21e82b03eccd9f9090bdd6bdfca3 | bdaade3b2c057abbdc1d437132ba043b14a00d14 | refs/heads/master | 2021-01-02T18:59:15.320466 | 2019-07-24T12:58:09 | 2019-07-24T13:09:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,520 | py | #
# Copyright 2019 Trend Micro and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import requests
from docker_image import reference
class _SlightlyImprovedSession(requests.Session):
"""
A _SlightlyImprovedSession keeps track of the base URL and any kwargs that
should be passed to requests.
When you make a `get` or `post` request, the URL you provide will be
`urljoin`'d with the base URL, so relative URLs will work pretty well.
Technically, this is totally broken, because relative URLs should be
evaluated relative to the resource that provided the URL, but for our
purposes this works perfectly and really simplifies life, so we're
going to ignore the pedants.
"""
def __init__(self, base, **kwargs):
super(_SlightlyImprovedSession, self).__init__()
self.base = base
self.kwargs = kwargs
def post(self, url, **kwargs):
for k in self.kwargs:
if k not in kwargs:
kwargs[k] = self.kwargs[k]
return super(_SlightlyImprovedSession, self).post(
requests.compat.urljoin(self.base, url),
**kwargs
)
def get(self, url, **kwargs):
for k in self.kwargs:
if k not in kwargs:
kwargs[k] = self.kwargs[k]
return super(_SlightlyImprovedSession, self).get(
requests.compat.urljoin(self.base, url),
**kwargs
)
def delete(self, url, **kwargs):
for k in self.kwargs:
if k not in kwargs:
kwargs[k] = self.kwargs[k]
return super(_SlightlyImprovedSession, self).delete(
requests.compat.urljoin(self.base, url),
**kwargs
)
class Smartcheck(_SlightlyImprovedSession):
"""
A Smartcheck object provides some convenience methods for performing actions
using the Deep Security Smart Check API.
"""
def __init__(self, base, user, password, verify=True, trace=False, **kwargs):
"""Authenticate with the service and return a session."""
if not base.startswith('http'):
base = 'https://' + base
if not verify:
import urllib3
urllib3.disable_warnings()
# Turn on trace logging if requested
if trace:
import logging
try:
import http.client as http_client
except ImportError:
import httplib as http_client
http_client.HTTPConnection.debuglevel = 1
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger('requests.packages.urllib3')
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
super(Smartcheck, self).__init__(base, verify=verify, **kwargs)
self.headers.update({'X-Api-Version': '2018-05-01'})
self.credentials = {
'user': {
'userID': user,
'password': password
}
}
def __enter__(self):
"""
Context manager method that's called when someone creates a
with Smartcheck(...) as session:
block. We'll start the session when the block is entered.
"""
# Create the session with the credentials that were provided in
# the constructor.
response = self.post('/api/sessions', json=self.credentials)
if not response.ok:
raise CreateSessionException(response)
# Parse the created session
session = response.json()
# Save the session href (needed for later refreshes (TODO)
# or to terminate the session when we're done).
self.session_href = session['href']
# Put the session token into the `Authorization` header so all
# requests in this session get authenticated and authorized.
self.headers.update({
'Authorization': f'Bearer {session["token"]}'
})
return self
def __exit__(self, exception_type, exception_value, exception_traceback):
"""
Context manager method that's called when someone exits a
with Smartcheck(...) as session:
block. We'll use this trigger to terminate the session.
"""
# Send `DELETE {href}` to terminate the session
self.delete(self.session_href)
# Don't eat any exception that might be coming...
return False
def _list(self, url, exception_kind, key, **kwargs):
"""
Generic "list anything in Deep Security Smart Check" method. Is a generator that
will yield the individual items being listed and retrieve additional pages of data
as needed until there are no more.
The way listing resources works in the Deep Security Smart Check API is as follows:
1. Perform `GET /api/things` to get the first page of things.
2. The response will have the structure `{ things: [...] }`
and if there is more data there will be a header `Link: <...>;rel="next"`
that will take you to the next page. If there is no more data,
the `Link rel=next` header won't be there.
This method is the generic implementation that all of the `list*` methods will call.
"""
# Get the first page of results
response = self.get(url, **kwargs)
while True:
# If the request failed, bail out -- we've got a specific exception type
# for each kind of thing we list, so raise the appropriate exception
if not response.ok:
raise exception_kind(response)
# All of the `list*` responses have the same structure:
# { [key]: [items], next: "cursor?" }
# Use the key to extract the items and yield them one at a time.
for item in response.json()[key]:
yield item
# We use the link in the `Link: rel='next'` header as it's easier
# than building a URL based on the cursor in the body. If there is
# no header then there's no more data.
if not 'next' in response.links:
break
# Extract the URL from the `Link: rel='next'` header.
url = response.links['next']['url']
# Get the next page of results, we'll see you again at the top of the loop
response = self.get(url)
def list_scans(self, image_ref=None, limit=None, **kwargs):
"""List scans that match an image reference."""
# If the caller provided any parameters (like `limit`), then extract them here
# as we've got more to add...
params = kwargs.get('params', {})
# Delete `params` from `kwargs` as we'll be passing them in explicitly
if 'params' in kwargs:
del(kwargs['params'])
if image_ref is not None:
# Parse the image reference into its component parts
image_ref = reference.Reference.parse(image_ref)
# The "hostname" part still needs to get split into the registry and repository
registry, repository = image_ref.split_hostname()
# Add query parameters to search on the image reference bits
params.update({
'registry': registry,
'repository': repository,
'tag': image_ref['tag'],
'digest': image_ref['digest'],
'exact': True,
})
if limit is not None:
params['limit'] = limit
# Yield the resulting scans
for scan in self._list('/api/scans', ListScansException, 'scans', params=params, **kwargs):
yield scan
def create_scan(self, image_ref, image_pull_auth=None, insecure_skip_registry_tls_verify=False):
"""Start a scan."""
# Parse the image reference into its component parts
parsed_ref = reference.Reference.parse(image_ref)
# The "hostname" part still needs to get split into the registry and repository
registry, repository = parsed_ref.split_hostname()
# Parse the provided image_pull_auth into an object if it's a string (assuming JSON).
# It will get serialized back into JSON in the request momentarily.
if isinstance(image_pull_auth, str):
image_pull_auth = json.loads(image_pull_auth)
# Send the request
response = self.post('/api/scans',
json={
'name': image_ref,
'source': {
'type': 'docker',
'registry': registry,
'repository': repository,
'tag': parsed_ref['tag'],
'digest': parsed_ref['digest'],
'credentials': image_pull_auth,
'insecureSkipVerify': insecure_skip_registry_tls_verify,
}
})
if not response.ok:
raise CreateScanException(response)
# Return the parsed scan object
return response.json()
def list_malware(self, scan):
"""List the malware found during a scan."""
# Scan results have malware identified per-layer to help folks identify where
# in their process they need to resolve the issue. This means we need to go
# through the layers in order to find any malware findings.
for layer in scan['details']['results']:
if 'malware' in layer:
for package in self._list(layer['malware'], ListMalwareException, 'malware'):
yield package
def list_content_findings(self, scan):
"""List the content findings found during a scan."""
# Scan results have content findings identified per-layer to help folks identify where
# in their process they need to resolve the issue. This means we need to go
# through the layers in order to find any content findings.
for layer in scan['details']['results']:
if 'contents' in layer:
for finding in self._list(layer['contents'], ListContentFindingsException, 'contents'):
yield finding
# Scan results have vulnerabilities identified per-layer (mostly) to help folks identify where
# in their process they need to resolve the issue. This means we need to go
# through the layers in order to find any vulnerability findings.
def list_vulnerable_packages(self, scan):
"""List the vulnerable packages found during a scan."""
for layer in scan['details']['results']:
if 'vulnerabilities' in layer:
for package in self._list(layer['vulnerabilities'], ListVulnerabilitiesException, 'vulnerabilities'):
yield package
# Scan results have checklist findings identified per-checklist and per-profile within
# each checklist. This means we need to go through each checklist and profile to find
# all the results.
def list_checklist_findings(self, scan):
"""List the checklist findings found during a scan."""
if 'checklists' in scan['details']:
for checklist in self._list(scan['details']['checklists'], ListChecklistsException, 'checklists'):
# Save details about the checklist so we can report it with the result
# without creating a new object for each result. This will help if the
# consumer wants to re-create the tree.
checklist_info = {
'id': checklist['id'],
'href': checklist['href'],
'title': checklist.get('title', None),
'version': checklist.get('version', None),
}
for profile in checklist['profiles']:
# Save details about the profile so we can report it with the result
# without creating a new object for each result. This will help if the
# consumer wants to re-create the tree.
profile_info = {
'id': profile['id'],
'title': profile.get('title', None),
}
for rule in self._list(profile['rules'], ListChecklistProfileRuleResultsException, 'rules'):
result = rule['result']
# "pass" and "not-applicable" aren't really findings... we may want a separate
# method to get all checklist results
if result == 'pass' or result == 'not-applicable':
continue
yield {
'checklist': checklist_info,
'profile': profile_info,
'result': rule
}
class CreateException(Exception):
def __init__(self, kind, response):
super(CreateException, self).__init__(
f'could not create {kind}: {response}'
)
self.response = response
class ListException(Exception):
def __init__(self, kind, response):
super(ListException, self).__init__(
f'*** WARNING: could not retrieve {kind}: {response}'
)
class CreateSessionException(CreateException):
def __init__(self, response):
super(CreateSessionException, self).__init__('session', response)
class CreateScanException(CreateException):
def __init__(self, response):
super(CreateScanException, self).__init__('scan', response)
class ListScansException(ListException):
def __init__(self, response):
super(ListScansException, self).__init__('scans', response)
class ListMalwareException(ListException):
def __init__(self, response):
super(ListMalwareException, self).__init__(
'malware', response
)
class ListVulnerabilitiesException(ListException):
def __init__(self, response):
super(ListVulnerabilitiesException, self).__init__(
'vulnerabilities', response
)
class ListContentFindingsException(ListException):
def __init__(self, response):
super(ListContentFindingsException, self).__init__(
'content findings', response
)
class ListChecklistsException(ListException):
def __init__(self, response):
super(ListChecklistsException, self).__init__(
'checklists', response
)
class ListChecklistProfileRuleResultsException(ListException):
def __init__(self, response):
super(ListChecklistProfileRuleResultsException, self).__init__(
'checklist profile rule results', response
)
| [
"Geoff_Baskwill@trendmicro.com"
] | Geoff_Baskwill@trendmicro.com |
677854622ff234e79b8d645e1b88e6e7804ead61 | 4f1299f5af48ac43735dad9a26e091ed26a606ad | /Prac.py | c8072a990624301f4f25d2eebc8056071cc56de0 | [
"MIT"
] | permissive | BikashThapa/PythonPrograms | e163b7c62aced8c77ba4c6ee664a2bcf0c3f025a | f2e81771d6767fd96fea4622ef9fc8fe8d436b22 | refs/heads/master | 2020-07-25T01:23:55.746970 | 2019-12-24T08:30:58 | 2019-12-24T08:30:58 | 208,111,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 217 | py | class Person:
def __init__(self, name):
self.name = name
def talk(self):
print(f'Hi, this is {self.name} here')
person1 = Person("Biaksh Thapa")
person1.talk()
Bob = Person("Bob")
Bob.talk() | [
"bthapa489@gmail.com"
] | bthapa489@gmail.com |
b2aedb04d591a0a051b067311682d48c5f4de51b | ccce8382859124182ab87832e6aab5bc78141503 | /app/__init__.py | 817b64ae76b60d3395952719ff1446b285448028 | [] | no_license | 774525000/train | 52a24b7f3d0420b82e7c5406f4611d725ad7a2bd | ded9cd8a276dc63facc171e405aa34bf0ca672e6 | refs/heads/master | 2022-04-11T12:54:05.764310 | 2020-02-27T01:27:23 | 2020-02-27T01:27:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 306 | py | # -*- coding:utf-8 -*-
from app.train.train import Train
from time import sleep
def run(home_url, email, password, chaojiying_name, chaojiying_pass, chaojiying_app_id, pic_type):
Train.default(home_url, email, password, chaojiying_name, chaojiying_pass, chaojiying_app_id, pic_type)
sleep(100)
| [
"199441201qQ"
] | 199441201qQ |
e3f5bdca5d9a2bf0d000ba393a7b25ae175ccf9a | 63f8b7a3c3b5ab4c67f3ec6c60c3c327245afe66 | /experiments/scripts/compare_throughput.py | 3e3109181a11913d7287b510eae2e8bd42115c33 | [] | no_license | DanielTakeshi/dqn | 719da28568963f1b2ba041652e32a3d2a62ec191 | 6f9dc0d8aedb1319fd5333295e6561027c68bab2 | refs/heads/main | 2021-01-13T01:48:38.235101 | 2020-11-11T01:35:45 | 2020-11-11T01:35:45 | 311,830,436 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,967 | py | """This combines a bunch of learning curves for all the games.
For bar charts, see `combine_student_results.py`.
"""
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib import offsetbox
from matplotlib.ticker import FuncFormatter
import argparse, csv, math, os, pickle, sys, inspect, json
from os.path import join
import numpy as np
import pandas as pd
from dqn import common_config as cfg
from collections import defaultdict
import utils as U
plt.style.use('seaborn-darkgrid')
sns.set_style("darkgrid")
np.set_printoptions(linewidth=180, edgeitems=10)
# ------------------------------------------------------------------------------
# matplotlib stuff
# ------------------------------------------------------------------------------
titlesize = 53
xsize = 42
ysize = 42
ticksize = 42
legendsize = 48
scolors = ['gold', 'red', 'blue', 'purple', 'silver', 'orange']
tcolor = 'black'
error_region_alpha = 0.25
bwidth = 0.3
slw = 7
# ------------------------------------------------------------------------------
CONST = 1e6
LEN_REWARDS = 596 # this should be the length ...
def scale_steps(x):
x = np.array(x) / CONST
return x
def get_info(exp_path, w=100):
"""Gather information, in a similar manner as scripts/quick_student.py.
"""
title = U.get_title(exp_path)
summary_train = U.load_summary_data(exp_path, train=True)
s_steps, s_info = U.load_info_data(exp_path)
s_reward = s_info['true_avg_rew'].values
# Ah this is going to be a bit annoying but w/e, b/c one of Pong failed.
if '_pong_snapshot_2019-08-22-21-57_s64329' in exp_path:
print(' At the problematic: _pong_snapshot_2019-08-22-21-57_s64329')
print(' That one exited early due to Pong-specific stuff.')
s_steps = np.array([(x*10000+50000) for x in range(LEN_REWARDS)])
tmp = np.ones((LEN_REWARDS,)) * s_reward[-1]
for i in range(len(s_reward)):
tmp[i] = s_reward[i]
s_reward = tmp
s_steps = scale_steps(s_steps)
assert len(s_steps) == len(s_reward)
# Get the teacher info. Load teacher model, load path, then plot data. Be
# careful we are allowed to do this 'substitution' to get the expert data.
with open(join(exp_path,'params.txt'), 'r') as f:
params = json.load(f)
teacher_models = params['teacher']['models']
assert len(teacher_models) == 1, \
"assume len(teacher_models) = 1, {}".format(len(teacher_models))
s_last = os.path.basename(os.path.normpath(exp_path))
t_last = os.path.basename(os.path.normpath(teacher_models[0]))
teacher_path = exp_path.replace(s_last, t_last)
teacher_path = teacher_path.replace('students/', 'teachers/')
teacher_title = U.get_title(teacher_path)
# CANNOT DO THIS FOR EARLIER RUNS, dating back to before the summer, I think.
#t_steps, t_info = U.load_info_data(teacher_path)
# AH, we did not record 'true_avg_rew' in the teacher ... ugh. So for this
# just read the root file and parse like I do here. That gives us the same
# values that I use for the 'true_avg_rew' key.
t_steps = []
t_reward = []
teacher_root_file = join(teacher_path, 'root.log')
with open(teacher_root_file, 'r') as f:
for line in f:
if 'completed' in line and '**********' in line and 'steps' in line:
linesp = line.split()
assert linesp[0] == '**********', linesp
assert linesp[2] == 'steps', linesp
steps = int(linesp[1])
t_steps.append(steps)
if 'Last 100 results: avg' in line:
linesp = line.split()
assert linesp[0] == 'Last', linesp
assert linesp[1] == '100', linesp
assert linesp[2] == 'results:', linesp
assert linesp[3] == 'avg', linesp
assert ',' in linesp[4], linesp
rew = float(linesp[4].strip(','))
t_reward.append(rew)
t_steps = scale_steps(t_steps)
assert len(t_steps) == len(t_reward)
# More annoying stuff ...
if len(s_steps) > LEN_REWARDS:
print('for {}, len(s_steps) = {} so chopping to {}'.format(
exp_path, len(s_steps), LEN_REWARDS))
s_steps = s_steps[:LEN_REWARDS]
s_reward = s_reward[:LEN_REWARDS]
if len(t_steps) > LEN_REWARDS:
print('for {}, len(t_steps) = {} so chopping to {}'.format(
exp_path, len(t_steps), LEN_REWARDS))
t_steps = t_steps[:LEN_REWARDS]
t_reward = t_reward[:LEN_REWARDS]
assert len(s_steps) == LEN_REWARDS, len(s_steps)
assert len(s_reward) == LEN_REWARDS, len(s_reward)
assert len(t_steps) == LEN_REWARDS, len(t_steps)
assert len(t_reward) == LEN_REWARDS, len(t_reward)
t_lambda = params['teacher']['supervise_loss']['lambda']
t_condense = params['teacher']['condense_freq']
t_overlap_m = params['teacher']['overlap']['match_method']
if t_overlap_m == 'train_net':
t_overlap_p = params['teacher']['overlap']['overlap_target']
elif t_overlap_m == 'fixed_steps':
t_overlap_p = str(params['teacher']['num_snapshot_ahead']).zfill(2)
assert t_condense == 5, t_condense
else:
raise ValueError(t_overlap_m)
# For now
if 'beamrider' in s_last.lower() or 'pong' in s_last.lower() or \
'robotank' in s_last.lower():
assert t_lambda == 0.01, '{}, {}'.format(t_lambda, s_last)
else:
assert t_lambda == 0.1, '{}, {}'.format(t_lambda, s_last)
result = {
'game_name': U.get_game_name(s_last),
'overlap_param': t_overlap_p,
'match_method': t_overlap_m,
'supervise_lambda': t_lambda,
'student_rew': s_reward, # student reward every 10k steps (starts @ 50k)
'teacher_rew': t_reward, # teacher reward every 10k steps (starts @ 50k)
'student_steps': s_steps, # should be same among all trials but save anyway
'teacher_steps': t_steps, # should be same among all trials but save anyway
'mb_start': params['teacher']['blend']['start'],
'mb_end': params['teacher']['blend']['end'],
'train_freq': params['train']['train_freq_per_step'],
}
return result
def _get_array(list_of_items):
nb = len(list_of_items)
lengths = [len(x) for x in list_of_items]
if len(lengths) > 1 and np.std(lengths) > 0:
print('Error with lengths: {}'.format(lengths))
sys.exit()
return np.array(list_of_items)
def _info_for_plots(stats, t_stats, target_num_trials=2):
"""Go through and collect data for one experimental condition.
Calling this method several times means we should be able to compare many
different settings. Unlike earlier, game_info (and t_stats) needs to have
the x coordinates, since we're doing full learning curves.
Returns a list that has all the game stats we want. It should be a list
with ONE ITEM PER GAME, so a length 9 list here!
"""
all_game_stats = []
game_idx = 0
print('\n\n\t\tNEW GAME: {}'.format(U.GAMES[game_idx]))
game_info = {} # For each game, collect stats, put in `all_game_stats`.
for key in sorted(stats.keys()):
game = U.GAMES[game_idx]
if game.lower() not in key:
game_idx += 1
game = U.GAMES[game_idx]
print('\n\n\t\tNEW GAME: {}'.format(game))
# Add the previously accumulated states to the game_stats.
all_game_stats.append(game_info)
game_info = {}
num_trials = len(stats[key])
print('\n{} len(stats[key]): {}'.format(key, num_trials))
s_rews = _get_array([x['student_rew'] for x in stats[key]])
t_rews = _get_array([x['teacher_rew'] for x in stats[key]])
print('student/teacher rewards: {} {}'.format(s_rews.shape, t_rews.shape))
#print('std(student): {}'.format(np.std(s_rews, axis=0)))
#print('std(teacher): {}'.format(np.std(t_rews, axis=0)))
assert np.max( np.abs(np.std(t_rews,axis=0)) ) < 0.001, \
'We are using the same teacher, right? The StDev should be zero.'
assert num_trials == s_rews.shape[0] == t_rews.shape[0], num_trials
# Let's not do this in case we want to plot standard deviation
#s_rews = np.mean(s_rews, axis=0)
# Eh this could easily be a global list since all the games use the
# same number of steps (thus far) but this may give us flexibility later.
s_steps = np.mean(_get_array([x['student_steps'] for x in stats[key]]), axis=0)
t_steps = np.mean(_get_array([x['teacher_steps'] for x in stats[key]]), axis=0)
# Add teacher stats, should match for all in this loop so we just do once.
t_rews = np.mean(t_rews, axis=0)
if len(t_stats[game]) == 0:
t_stats[game].append( (t_steps,t_rews) )
# Only want student samples for statistics that we will actually be using.
info = key.split('__')
if info[1] == 'fixed_steps':
#assert num_trials == args.num_trials, num_trials
if num_trials != target_num_trials:
print('WARNING! we have {} trials, but should have {}'.format(
num_trials, target_num_trials))
num_ahead = info[2]
game_info[num_ahead] = (s_steps,s_rews)
elif info[1] == 'train_net':
continue
else:
raise ValueError(info)
# Add last game.
all_game_stats.append(game_info)
print('\n\nDone printing, len all games: {}'.format(len(all_game_stats)))
assert len(all_game_stats) == len(U.GAMES) == len(U.G_INDS_FAT)
return all_game_stats
def report_combined_stats(stats_3, stats_4, args, w=100):
"""Report combined stats, ideally for a plot.
:param stats: dict, with key --> list, where the list has one item per
random seed. This helps us combine results more easily.
"""
# Increase factor to `nrows` to make plot 'taller'.
nrows = 2
ncols = 5
fig, ax = plt.subplots(nrows, ncols, squeeze=False, sharex=False,
figsize=(11*ncols,8*nrows))
#gridspec_kw={'height_ratios': [5,5,5,1]})
INDICES = U.G_INDS_FAT
# Teacher data for plots later.
t_stats_3 = defaultdict(list)
t_stats_4 = defaultdict(list)
# Do what I did earlier, except for BOTH of the stats here. Yeah !!
print('\n*************************************************')
print('COLLECTING DATA FROM FIRST EXPERIMENTAL CONDITION')
print('*************************************************\n')
all_game_stats_3 = _info_for_plots(stats=stats_3, t_stats=t_stats_3)
print('\n*************************************************')
print('COLLECTING DATA FROM FIRST EXPERIMENTAL CONDITION')
print('*************************************************\n')
all_game_stats_4 = _info_for_plots(stats=stats_4, t_stats=t_stats_4)
# --------------------------------------------------------------------------
# Plot experiment condition 3 and 4 on the same plot. The shape of `s_y`
# here, i.e., the reward, is (num_trials, num_recorded) so we could do that
# as standard deviation, but might be noisy ... also these ALREADY include
# an implicit smoothing over the past 100 episodes.
# --------------------------------------------------------------------------
def _plot(r, c, key, s_stats_3, s_stats_4, color, label, force_color=False,
std_curves=False):
# Case 1, try to plot everything together w/same color codes:
if False:
s_x, s_y = s_stats_3[key]
s_y = np.mean(s_y, axis=0)
ax[r,c].plot(s_x, s_y, ls='--', lw=slw, color=color, label=label+', 4:1')
s_x, s_y = s_stats_4[key]
s_y = np.mean(s_y, axis=0)
ax[r,c].plot(s_x, s_y, lw=slw, color=color, label=label+', 2:1')
# Case 2, try to use standard deviations?
if True:
if force_color:
cc = 'gold'
else:
cc = 'blue'
s_x, s_y = s_stats_3[key]
s_y = np.mean(s_y, axis=0)
ax[r,c].plot(s_x, s_y, lw=slw, color=cc, label=label+', 4:1')
if std_curves:
ax[r,c].fill_between(s_x,
s_y+np.std(s_y, axis=0),
s_y-np.std(s_y, axis=0),
color=cc,
alpha=error_region_alpha)
if force_color:
cc = 'orange'
else:
cc = 'red'
s_x, s_y = s_stats_4[key]
s_y = np.mean(s_y, axis=0)
ax[r,c].plot(s_x, s_y, lw=slw, color=cc, label=label+', 2:1')
if std_curves:
ax[r,c].fill_between(s_x,
s_y+np.std(s_y, axis=0),
s_y-np.std(s_y, axis=0),
color=cc,
alpha=error_region_alpha)
# --------------------------------------------------------------------------
# Now go through this again, same logic, except plot. Alphabetical order
# from top row, w/one for legend to apply to subplots.
# --------------------------------------------------------------------------
for game, (r,c) in zip(U.GAMES, INDICES):
ax[r,c].set_title('{}'.format(game), fontsize=titlesize)
idx = U.GAMES.index(game)
# Keys: ['-1', '-2', '00', '02', '05', '10'] where -1 and -2 are BA and RA.
print('\nKeys for s_stats_3, and then s_stats_4:')
s_stats_3 = all_game_stats_3[idx]
print(game, ': ', sorted(s_stats_3.keys()))
s_stats_4 = all_game_stats_4[idx]
print(game, ': ', sorted(s_stats_4.keys()))
# Just take first one b/c teacher stats should be the same. Actually
# wait maybe we don't need the teacher here? Think about it ...
t_x, t_y = t_stats_3[game][0]
if True:
ax[r,c].plot(t_x, t_y, lw=10, ls='--', color=tcolor, label='DDQN Teacher')
_t_x, _t_y = t_stats_4[game][0]
assert np.allclose(t_x, _t_x), '{} {}'.format(t_x, _t_x)
assert np.allclose(t_y, _t_y), '{} {}'.format(t_y, _t_y)
# --------------------------------------------------------------------------
# NOTE: adjust based on how many of the student 'keys' I want to post.
# Toggle which ones we want on/off. SAME COLOR CODE AS PRIOR FIGURE, if
# we are using all select functions. But we prob. don't need best
# ahead. Honestly it seems best just to let ONE be used at a time.
# --------------------------------------------------------------------------
if True:
key = '-1'
_plot(r, c, key, s_stats_3, s_stats_4, scolors[0], label='S, Best Ahead',
force_color=True)
if False:
key = '-2'
_plot(r, c, key, s_stats_3, s_stats_4, scolors[1], label='S, Rand Ahead')
if False:
key = '00'
_plot(r, c, key, s_stats_3, s_stats_4, scolors[2], label='S, 0 Ahead')
if False:
key = '02'
_plot(r, c, key, s_stats_3, s_stats_4, scolors[3], label='S, 2 Ahead')
if False:
key = '05'
_plot(r, c, key, s_stats_3, s_stats_4, scolors[4], label='S, 5 Ahead')
if True:
key = '10'
_plot(r, c, key, s_stats_3, s_stats_4, scolors[5], label='S, 10 Ahead')
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# Bells and whistles
for r in range(nrows):
for c in range(ncols):
#leg = ax[r,c].legend(loc="best", ncol=2, prop={'size':legendsize})
#for legobj in leg.legendHandles:
# legobj.set_linewidth(5.0)
ax[r,c].tick_params(axis='x', labelsize=ticksize)
ax[r,c].tick_params(axis='y', labelsize=ticksize)
# I think it's better to share axes in the x direction to be
# consistent with steps, but doing so removes the axis ticks. This
# reverts it so we get the ticks on all the axis.
#ax[r,c].xaxis.set_tick_params(which='both', labelbottom=True)
# Put this on r=0, c=0, then hide it, just to get legend to appear.
ax[0,0].set_visible(False)
handles, labels = ax[1,1].get_legend_handles_labels()
# Location (0,0) is bottom left. Doing (0,1) is upper left but the text
# isn't visible (because `loc` is the lower left part of the legend).
fig.legend(handles, labels, loc=(0.005,0.500), prop={'size':legendsize})
# Finally, save!! Can't do `.[...].png` since overleaf complains.
plt.tight_layout()
figname = 'fig_throughput_student.png'.format()
plt.savefig(figname)
print("Just saved: {}".format(figname))
if __name__ == "__main__":
# --------------------------------------------------------------------------
# NOW WE ASSUME WE'RE COMPARING EXP's 3 AND 4.
# --------------------------------------------------------------------------
EXP_PATH = cfg.SNAPS_STUDENT
pp = argparse.ArgumentParser()
args = pp.parse_args()
args.num_trials_exp_3 = 2
args.num_trials_exp_4 = 2
# Iterate through all the *student* models.
dirs = sorted( [join(EXP_PATH,x) for x in os.listdir(EXP_PATH) \
if U._criteria_for_experiments_throughput(x,args)] )
print("Currently plotting with these models, one trained agent per file:")
stats_3 = defaultdict(list)
stats_4 = defaultdict(list)
for dd in dirs:
last_part = os.path.basename(os.path.normpath(dd))
if last_part in U.STUFF_TO_SKIP:
print(" skipping {} due to STUFF_TO_SKIP".format(last_part))
continue
print("\nAnalyzing: {}".format(dd))
info = get_info(dd)
key = '{}__{}__{}'.format(info['game_name'], info['match_method'],
info['overlap_param'])
mb = info['mb_start']
tf = info['train_freq']
mm = info['match_method']
# We only want experiments 3 and 4.
if mb == 0.50 and tf == 4 and mm != 'train_net':
stats_3[key].append(info)
elif mb == 0.50 and tf == 2 and mm != 'train_net':
stats_4[key].append(info)
else:
print(' skipping {}, mm,tf,mm: {}, {}, {}'.format(key, mb,tf,mm))
continue
print('\nNow going to report on all these stats.')
print(' len stats 3, 4 dicts: {} and {}'.format(len(stats_3), len(stats_4)))
print('')
report_combined_stats(stats_3, stats_4, args)
| [
"takeshidanny@gmail.com"
] | takeshidanny@gmail.com |
18e101f183f35166e395cf8563e66d8b90b148fa | 4dd8416c2a6cf08dd3fa3d197d895f2852106097 | /语法基础/01.Python基础/12-if条件满足或者不满足的时候执行多句代码.py | ff6369886f6d8f6702c0a7020bde5b55db5990cc | [] | no_license | lanboys/HelloPython | 19a74ad416ddadf7ed487023527e08ad9fcecb87 | d411ee29781a9cf00470d05d1be61da8f2081080 | refs/heads/master | 2020-04-07T19:40:17.381564 | 2018-11-23T10:43:00 | 2018-11-23T10:43:00 | 158,657,533 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 517 | py | age = 10
if age > 18:
# 如果条件满足,那么下面的4行代码一定执行
print("----0-----")
print("----1-----")
print("----1-----")
print("----1-----")
else:
# 如果第3行的代码,条件不满足,那么就执行接下来的4行代码
print("----1-----")
print("----1-----")
print("----1-----")
print("----1-----")
# 下面的这行代码,与上面的if没有任何的关系,即第3行的条件是否满足 与这里执行没有任何的影响
print("----9-----")
| [
"lan_bing2013@163.com"
] | lan_bing2013@163.com |
695e2a94c90888c0a054c8a98f9c352950ce7682 | 187a4c23f446e1d82efe2fba2f935c32087a1922 | /usbdeath.py | 5725f4f32be649254379d0f0007dd1f0225ff0fc | [] | no_license | semeion/usbdeath | f38730a23473525633a925360d2c1cf6716ca374 | 8b449a8866e15114562162c40f0cac11feea16c0 | refs/heads/master | 2021-01-22T00:28:46.744110 | 2014-06-02T19:16:00 | 2014-06-02T19:16:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 779 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Coded by Sam (info@sam3.se)
import subprocess, time
from os import system
usb_whitelist =[]
p = subprocess.Popen('lsusb', stdout=subprocess.PIPE)
while True:
line = p.stdout.readline()
if line != '':
usb_whitelist.append(line.strip('\n'))
else:
break
while True:
live_usbs = []
p = subprocess.Popen('lsusb', stdout=subprocess.PIPE)
intruder = True
while True:
line = p.stdout.readline()
if line != '':
live_usbs.append(line.strip('\n'))
else:
break
for usb in live_usbs:
if not usb in usb_whitelist:
system('echo 1 > /proc/sys/kernel/sysrq && echo o > /proc/sysrq-trigger')
time.sleep(1) | [
"info@sam3.se"
] | info@sam3.se |
fbe6fad964090c227647ca05aee2f02fe8a3aafb | cdd8f8ed846820ffa107de24d45b5a55cd3c5bd0 | /boot_device.py | ec1c782e9e183d1a324f82ecfac9d27109542ede | [
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-other-permissive"
] | permissive | rockokw/vesper | d8db2c69e3f69c4b7feb0553021c91b52dc20c00 | caaae6c06fb3df6745b9485f40e3cc799c795a75 | refs/heads/master | 2022-12-29T21:44:54.008370 | 2020-10-18T20:34:15 | 2020-10-18T20:36:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 869 | py | #!/usr/bin/env python
import argparse
import copy
import logging.config
import config as cfg
from device import Device
if __name__ == '__main__':
# Command-line arguments
parser = argparse.ArgumentParser(description='Device boot script')
parser.add_argument('name', type=str, help='device name')
parser.add_argument('-l', '--loglevel', default='WARNING',
help='log level (DEBUG|INFO|WARNING|ERROR|CRITICAL)')
args = parser.parse_args()
# Logging setup
mylogcfg = copy.deepcopy(cfg.LOGCFG)
mylogcfg['handlers']['default']['level'] = args.loglevel.upper()
mylogcfg['handlers']['file']['filename'] = '/dev/null'
logging.config.dictConfig(mylogcfg)
log = logging.getLogger('boot_device')
name = args.name
device = Device(name)
log.info('booting device %s...', name)
device.start()
| [
"kwamelaw@usc.edu"
] | kwamelaw@usc.edu |
c61ff42baf9127aaa9763d25b963025cd51cae7c | af3a6d84270f21fad514f62cbcd3a1e353a3e7b8 | /utils.py | 6cb7e4191064a09eb74188dc550b5af0a9452e09 | [] | no_license | shrish23/Telegram-newsBot | 8690e45a6105032fb1a7b3a3992e7d61c79b556d | 6f87f19dfbe792efe1b5a11e4be67746566685e6 | refs/heads/master | 2023-08-24T08:18:10.362281 | 2021-10-21T10:21:38 | 2021-10-21T10:21:38 | 419,597,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,294 | py | import os
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "Client.json"
import dialogflow_v2 as dialogflow
dialogflow_session_client = dialogflow.SessionsClient()
PROJECT_ID = "newsbot-tuqv"
from gnewsclient import gnewsclient
client = gnewsclient.NewsClient()
def detect_intent_from_text(text, session_id, language_code='en'):
session = dialogflow_session_client.session_path(PROJECT_ID, session_id)
text_input = dialogflow.types.TextInput(text=text, language_code=language_code)
query_input = dialogflow.types.QueryInput(text=text_input)
response = dialogflow_session_client.detect_intent(session=session,query_input=query_input)
return response.query_result
def get_reply(query, chat_id):
response = detect_intent_from_text(query, chat_id)
if response.intent.display_name == 'get_news':
return "get_news", dict(response.parameters)
else:
return "small_talk", response.fulfillment_text
def fetch_news(parameters):
client.language = parameters.get('language')
client.location = parameters.get('geo-country')
client.topic = parameters.get('topic')
return client.get_news()[:5]
topics_keyboard = [
['Top Stories', 'World','Nation'],
['Business','Technology','Entertainment'],
['Sports','Science','Health']
]
| [
"shrishsharma@outlook.com"
] | shrishsharma@outlook.com |
1fc6096204e32445ea9a2db3b0692477f27c3235 | 99cff3a11eac3d1d5c0d16ee80e5b9c0efc2951d | /HttpTrigger/__init__.py | 4ed8c0e66e13e66f103bc26630f69aa458fbf95d | [] | no_license | kevin808/functionapp-python-schemaregistry-demo | af5b6de05fad1b60ef533bd0edfcfdd5100e73e9 | b39a9bf42cee400f3be091cebdc6c14cd80c6bb5 | refs/heads/master | 2023-03-30T01:54:48.541836 | 2021-04-06T13:02:42 | 2021-04-06T13:02:42 | 354,515,352 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,553 | py | import logging
import azure.functions as func
import os
from azure.identity import ClientSecretCredential
from azure.schemaregistry import SchemaRegistryClient
from azure.schemaregistry.serializer.avroserializer import SchemaRegistryAvroSerializer
TENANT_ID='YOUR TENANT_ID'
CLIENT_ID='YOUR CLIENT_ID'
CLIENT_SECRET='YOUR CLIENT_SECRET'
SCHEMA_REGISTRY_ENDPOINT='YOUR_STANDARD_EVENTHUB.servicebus.windows.net'
SCHEMA_GROUP='default'
SCHEMA_STRING = """
{"namespace": "example.avro",
"type": "record",
"name": "User",
"fields": [
{"name": "name", "type": "string"},
{"name": "favorite_number", "type": ["int", "null"]},
{"name": "favorite_color", "type": ["string", "null"]}
]
}"""
token_credential = ClientSecretCredential(
tenant_id=TENANT_ID,
client_id=CLIENT_ID,
client_secret=CLIENT_SECRET
)
# For Managed Identity
# token_credential = DefaultAzureCredential()
def serialize(serializer):
dict_data_ben = {"name": u"Ben", "favorite_number": 7, "favorite_color": u"red"}
dict_data_alice = {"name": u"Alice", "favorite_number": 15, "favorite_color": u"green"}
# Schema would be automatically registered into Schema Registry and cached locally.
payload_ben = serializer.serialize(dict_data_ben, SCHEMA_STRING)
# The second call won't trigger a service call.
payload_alice = serializer.serialize(dict_data_alice, SCHEMA_STRING)
print('Encoded bytes are: ', payload_ben)
print('Encoded bytes are: ', payload_alice)
return [payload_ben, payload_alice]
def deserialize(serializer, bytes_payload):
# serializer.deserialize would extract the schema id from the payload,
# retrieve schema from Schema Registry and cache the schema locally.
# If the schema id is the local cache, the call won't trigger a service call.
dict_data = serializer.deserialize(bytes_payload)
print('Deserialized data is: ', dict_data)
return dict_data
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('Python HTTP trigger function processed a request.')
schema_registry = SchemaRegistryClient(endpoint=SCHEMA_REGISTRY_ENDPOINT, credential=token_credential)
serializer = SchemaRegistryAvroSerializer(schema_registry, SCHEMA_GROUP)
bytes_data_ben, bytes_data_alice = serialize(serializer)
dict_data_ben = deserialize(serializer, bytes_data_ben)
dict_data_alice = deserialize(serializer, bytes_data_alice)
serializer.close()
return func.HttpResponse(
"Schema Registry Executed.",
status_code=200
)
| [
"kevin80828@gmail.com"
] | kevin80828@gmail.com |
604c1e106f08e0be7286bba0d9ef1a3bc66b63e5 | 708a6c274432fee2d25c7e86581f3655cd4be0de | /src/updater/app/lib/packages/vehicle_eco_balance/geo.py | 64ab2e71d7d114cb5d7db70d98dfc40097f995db | [
"MIT"
] | permissive | sampittko/tuke-beautofuel | f65fa2865c1402421224e3ff9182e3ab5e6cd3c9 | 0e7b2528af5f3a96c0abf6dc963d2a5b29779401 | refs/heads/main | 2023-04-11T00:18:19.498448 | 2021-04-23T13:16:10 | 2021-04-23T13:16:10 | 316,191,448 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,753 | py | import numpy as np
import requests as req
from requests.exceptions import HTTPError
import time
from geopy import distance
import osmnx as ox
def calc_gradient_angle(point1, point2):
""" Calculate the gradient angle between two points on the earth's surface
Parameters
----------
point1: tuple (latitude, longitude, altitude)
first coordinate
point2: tuple (latitude, longitude, altitude)
second coordinate
Returns
-------
gradient_angle: float
gradient angle in radians between -pi/2 and pi/2
"""
coord1, alt1 = point1[:-1], point1[-1]
coord2, alt2 = point2[:-1], point2[-1]
dist = calc_distance(coord1, coord2)
if dist != 0:
return np.arctan((alt2 - alt1) / dist)
else:
return 0.0
def calc_distance(coord1, coord2, distance_type="geodetic", ellipsoid="WGS-84"):
""" Calculate distance between two points on the earth's surface using geopy
Great-circle distance is calculated using Vincenty's formula.
Default ellipsoid of the geodetic distance is WGS-84.
Parameters
----------
coord1: tuple (latitude, longitude)
first coordinate
coord2: tuple (latitude, longitude)
second coordinate
distance_type: str
'geodetic' or 'great-circle' (default 'geodetic')
ellipsoid: str
ellipsoid for geodetic distance (default 'WGS-84')
Returns
-------
distance: float
distance in meters
"""
if distance_type == "geodetic":
return distance.geodesic(coord1, coord2, ellipsoid=ellipsoid).km * 1000
elif distance_type == "great-circle":
return distance.great_circle(coord1, coord2).km * 1000
else:
print("distance_type " + distance_type + " is unknown!")
class ElevationAPI:
"""
ElevationAPI
Example APIs:
- Open Topo Data (https://www.opentopodata.org/)
- API: https://api.opentopodata.org/v1/
- Open
- Example: https://api.opentopodata.org/v1/eudem25m?locations=39.7391536,-104.9847034
- Limits
- Max 100 locations per request.
- Max 1 call per second.
- Max 1000 calls per day.
- Google Elevation API (https://developers.google.com/maps/documentation/elevation/overview)
- API: https://maps.googleapis.com/maps/api/elevation/
- Commercial, API key needed
- Example: https://maps.googleapis.com/maps/api/elevation/json?locations=39.7391536,-104.9847034&key=YOUR_API_KEY
Parameters
----------
base_url: str
API base url (default 'https://api.opentopodata.org/v1/')
dataset: str
eudem25m, aster30m, srtm30m, ... (default 'eudem25m', check https://www.opentopodata.org/ for details)
api_key: str (default None)
API key for the service
Attributes
----------
base_url: str
API base url
location_limit: int
number of allowed locations per request
params: dictionary
parameters for the get request (e.g. locations, key)
"""
def __init__(self, base_url='https://api.opentopodata.org/v1/', dataset='eudem25m', api_key=None):
self.base_url = base_url
if self.base_url != 'https://api.opentopodata.org/v1/':
self.location_limit = None
else:
self.location_limit = 100
self.base_url = self.base_url + dataset
self.params = {'key': api_key}
def get_elevation(self, coordinates):
""" Get elevation for the given coordinates from an elevation API
Parameters
----------
coordinates: list of tuples (latitude, longitude)
coordinates in EPSG:4326 (WGS-84)
Returns
-------
elevation: numpy array
elevation for each coordinate
"""
elevation = np.zeros(len(coordinates))
if self.location_limit is None:
print('Download elevation for all {} coordinates'.format(len(coordinates)))
elevation[0, len(coordinates)] = self._make_request(coordinates)
return elevation
# Split request into multiple requests if location limit is provided
for i in range(int(len(coordinates) / self.location_limit) + 1):
start = i * self.location_limit
end = (i + 1) * self.location_limit
print('Download elevation for coordinates {start} to {end}'.format(start=start + 1, end=end))
elevation[start:end] = self._make_request(coordinates[start:end])
time.sleep(1) # for OpenTopoData the limit is max 1 call per second
return elevation
def _make_request(self, coordinates):
locations_str = self._coordinates2param(coordinates)
self.params.update({'locations': locations_str})
elevation_list = []
try:
response = req.get(self.base_url, params=self.params)
response.raise_for_status()
except HTTPError as http_err:
print('An http error occurred during the request: {}'.format(http_err))
except Exception as err:
print('An error occurred during the request: {}'.format(err))
else:
results = response.json()['results']
elevation_list = [result['elevation'] for result in results]
return elevation_list
def _coordinates2param(self, coordinates):
""" Transform coordinates to string in order to set the locations request parameter """
return ''.join([str(coordinate[0]) + ',' + str(coordinate[1]) + '|' for coordinate in coordinates])
def get_cr_from_osm(coordinates):
""" Get rolling coefficient (cr) from osm surface attribute
1) Determine nearest osm edge for each coordinate
2) Determine surface attribute for each osm edge
3) Get rolling coefficient (cr) for the corresponding surface type from literature
Hint: function will take some time when coordinates have a large spatial extent.
Parameters
----------
coordinates: list of tuples (latitude, longitude)
coordinates
Returns
-------
[cr, surface]: list of numpy arrays
first array are rolling coefficient (cr) values and second array are surface attributes
"""
# TODO: Improve performance
# TODO: Check scientific literature for rolling coefficient values
lats = [coordinate[0] for coordinate in coordinates]
lngs = [coordinate[1] for coordinate in coordinates]
min_y = np.min(lats)
max_y = np.max(lats)
min_x = np.min(lngs)
max_x = np.max(lngs)
ox.settings.useful_tags_way = ["surface"]
print('Get graph from bounding box: min_y={}, max_y={}, min_x={}, max_x={}'.format(min_y, max_y, min_x, max_x))
graph = ox.graph_from_bbox(max_y, min_y, max_x, min_x, network_type='drive')
surface = []
cr = []
i = 0
print('Find nearest osm edge and set rolling coefficient according to the surface type of the edge.')
for lat, lng in coordinates:
x = ox.get_nearest_edge(graph, (lat, lng))
p = [x[0], x[1]]
a = ox.utils_graph.get_route_edge_attributes(graph, p)
dic = a[0]
if "surface" in dic:
surface.append(dic["surface"])
else:
surface.append(None)
# Get the rolling resistance coefficient
# Sources
# https://www.engineeringtoolbox.com/rolling-friction-resistance-d_1303.html
# The Automotive Chassis book
if surface[i] == "asphalt":
cr.append(0.02)
elif surface[i] == "cobblestone":
cr.append(0.015)
elif surface[i] == "paving_stones":
cr.append(0.033)
else:
cr.append(0.02)
i = i + 1
return [np.array(cr), np.array(surface)]
| [
"sampittko@gmail.com"
] | sampittko@gmail.com |
bceaab930c75e6fc131b1c92e95c21d9403e01e5 | 28e554c3677c3192de4ae8bfeebdec47ab4e6bb9 | /Autoencoders/autoencoders.py | 4f76b720856b9491751fb462ef281b739a3fe46c | [] | no_license | shrinidhi99/Computer-Vision | 4d94f5ed54b9af78737a73aee5f7b577b6b6e470 | 75147678b56c2d977e06677eab58c67da95e6db7 | refs/heads/main | 2023-04-25T12:06:28.012599 | 2021-05-05T07:49:52 | 2021-05-05T07:49:52 | 334,365,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,176 | py | import random
import tensorflow as tf
from tensorflow import keras
import cv2
import numpy as np
import matplotlib.pyplot as plt
# loads the popular "mnist" training dataset
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
# scales the data. pixel values range from 0 to 255, so this makes it range 0 to 1
x_train = x_train/255.0
# scales the data. pixel values range from 0 to 255, so this makes it range 0 to 1
x_test = x_test/255.0
encoder_input = keras.Input(shape=(28, 28), name='img')
x = keras.layers.Flatten()(encoder_input)
encoder_output = keras.layers.Dense(64, activation="relu")(x)
encoder = keras.Model(encoder_input, encoder_output, name='encoder')
decoder_input = keras.layers.Dense(64, activation="relu")(encoder_output)
x = keras.layers.Dense(784, activation="relu")(decoder_input)
decoder_output = keras.layers.Reshape((28, 28))(x)
opt = tf.keras.optimizers.Adam(lr=0.001, decay=1e-6)
autoencoder = keras.Model(encoder_input, decoder_output, name='autoencoder')
autoencoder.summary()
autoencoder.compile(opt, loss='mse')
epochs = 3
for epoch in range(epochs):
history = autoencoder.fit(
x_train,
x_train,
epochs=1,
batch_size=32, validation_split=0.10
)
# autoencoder.save(f"models/AE-{epoch+1}.model")
for d in x_test[:30]: # just show 5 examples, feel free to show all or however many you want!
ae_out = autoencoder.predict([d.reshape(-1, 28, 28)])
img = ae_out[0]
cv2.imshow("decoded", img)
cv2.imshow("original", np.array(d))
cv2.waitKey(1000) # wait 1000ms, 1 second, and then show the next.
def add_noise(img, random_chance=5):
noisy = []
for row in img:
new_row = []
for pix in row:
if random.choice(range(100)) <= random_chance:
new_val = random.uniform(0, 1)
new_row.append(new_val)
else:
new_row.append(pix)
noisy.append(new_row)
return np.array(noisy)
def remove_values(img, random_chance=5):
noisy = []
for row in img:
new_row = []
for pix in row:
if random.choice(range(100)) <= random_chance:
new_val = 0 # changing this to be 0
new_row.append(new_val)
else:
new_row.append(pix)
noisy.append(new_row)
return np.array(noisy)
# slightly higher chance so we see more impact
some_hidden = remove_values(x_train[0], random_chance=15)
plt.imshow(some_hidden, cmap="gray")
plt.show()
ae_out = autoencoder.predict([some_hidden.reshape(-1, 28, 28)])
# predict is done on a vector, and returns a vector, even if its just 1 element, so we still need to grab the 0th
img = ae_out[0]
plt.imshow(ae_out[0], cmap="gray")
plt.show()
# slightly higher chance so we see more impact
some_hidden = remove_values(x_train[0], random_chance=35)
plt.imshow(some_hidden, cmap="gray")
plt.show()
ae_out = autoencoder.predict([some_hidden.reshape(-1, 28, 28)])
# predict is done on a vector, and returns a vector, even if its just 1 element, so we still need to grab the 0th
img = ae_out[0]
plt.imshow(ae_out[0], cmap="gray")
plt.show() | [
"shrinidhi99.varna@gmail.com"
] | shrinidhi99.varna@gmail.com |
fc11fbea3f492caf574da8adef94d0978313dd7f | 33116209c77798529db2cfe5ea21dfcac13c5e38 | /update_version.py | a82034ad6f96c32f395cd11a4ab4374180bd1335 | [] | no_license | RolfMaster/basicSynth | 5ba7c8de684cecefe3fa4e361aeec4940709b40b | 40153a11b6b9ead769389c989eb3be39c15d3fa2 | refs/heads/master | 2020-04-18T10:15:55.308826 | 2019-01-23T00:08:51 | 2019-01-23T00:08:51 | 167,462,140 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,405 | py | #!/usr/bin/python
# this script will update the versions in plist and installer files to match that in resource.h
import plistlib, os, datetime, fileinput, glob, sys, string
scriptpath = os.path.dirname(os.path.realpath(__file__))
def replacestrs(filename, s, r):
files = glob.glob(filename)
for line in fileinput.input(files,inplace=1):
string.find(line, s)
line = line.replace(s, r)
sys.stdout.write(line)
def main():
MajorStr = ""
MinorStr = ""
BugfixStr = ""
for line in fileinput.input(scriptpath + "/resource.h",inplace=0):
if "#define PLUG_VER " in line:
FullVersion = int(string.lstrip(line, "#define PLUG_VER "), 16)
major = FullVersion & 0xFFFF0000
MajorStr = str(major >> 16)
minor = FullVersion & 0x0000FF00
MinorStr = str(minor >> 8)
BugfixStr = str(FullVersion & 0x000000FF)
FullVersionStr = MajorStr + "." + MinorStr + "." + BugfixStr
today = datetime.date.today()
CFBundleGetInfoString = FullVersionStr + ", Copyright BasicSynthPlugin, " + str(today.year)
CFBundleVersion = FullVersionStr
print "update_version.py - setting version to " + FullVersionStr
print "Updating plist version info..."
plistpath = scriptpath + "/resources/Synthesis-VST2-Info.plist"
vst2 = plistlib.readPlist(plistpath)
vst2['CFBundleGetInfoString'] = CFBundleGetInfoString
vst2['CFBundleVersion'] = CFBundleVersion
vst2['CFBundleShortVersionString'] = CFBundleVersion
plistlib.writePlist(vst2, plistpath)
replacestrs(plistpath, "//Apple//", "//Apple Computer//");
plistpath = scriptpath + "/resources/Synthesis-AU-Info.plist"
au = plistlib.readPlist(plistpath)
au['CFBundleGetInfoString'] = CFBundleGetInfoString
au['CFBundleVersion'] = CFBundleVersion
au['CFBundleShortVersionString'] = CFBundleVersion
plistlib.writePlist(au, plistpath)
replacestrs(plistpath, "//Apple//", "//Apple Computer//");
plistpath = scriptpath + "/resources/Synthesis-VST3-Info.plist"
vst3 = plistlib.readPlist(plistpath)
vst3['CFBundleGetInfoString'] = CFBundleGetInfoString
vst3['CFBundleVersion'] = CFBundleVersion
vst3['CFBundleShortVersionString'] = CFBundleVersion
plistlib.writePlist(vst3, plistpath)
replacestrs(plistpath, "//Apple//", "//Apple Computer//");
plistpath = scriptpath + "/resources/Synthesis-OSXAPP-Info.plist"
app = plistlib.readPlist(plistpath)
app['CFBundleGetInfoString'] = CFBundleGetInfoString
app['CFBundleVersion'] = CFBundleVersion
app['CFBundleShortVersionString'] = CFBundleVersion
plistlib.writePlist(app, plistpath)
replacestrs(plistpath, "//Apple//", "//Apple Computer//");
plistpath = scriptpath + "/resources/Synthesis-RTAS-Info.plist"
rtas = plistlib.readPlist(plistpath)
rtas['CFBundleGetInfoString'] = CFBundleGetInfoString
rtas['CFBundleVersion'] = CFBundleVersion
rtas['CFBundleShortVersionString'] = CFBundleVersion
plistlib.writePlist(rtas, plistpath)
replacestrs(plistpath, "//Apple//", "//Apple Computer//");
plistpath = scriptpath + "/resources/Synthesis-AAX-Info.plist"
aax = plistlib.readPlist(plistpath)
aax['CFBundleGetInfoString'] = CFBundleGetInfoString
aax['CFBundleVersion'] = CFBundleVersion
aax['CFBundleShortVersionString'] = CFBundleVersion
plistlib.writePlist(aax, plistpath)
replacestrs(plistpath, "//Apple//", "//Apple Computer//");
# plistpath = scriptpath + "/resources/Synthesis-IOSAPP-Info.plist"
# iosapp = plistlib.readPlist(plistpath)
# iosapp['CFBundleGetInfoString'] = CFBundleGetInfoString
# iosapp['CFBundleVersion'] = CFBundleVersion
# iosapp['CFBundleShortVersionString'] = CFBundleVersion
# plistlib.writePlist(iosapp, plistpath)
# replacestrs(plistpath, "//Apple//", "//Apple Computer//");
print "Updating Mac Installer version info..."
plistpath = scriptpath + "/installer/Synthesis.pkgproj"
installer = plistlib.readPlist(plistpath)
for x in range(0,6):
installer['PACKAGES'][x]['PACKAGE_SETTINGS']['VERSION'] = FullVersionStr
plistlib.writePlist(installer, plistpath)
replacestrs(plistpath, "//Apple//", "//Apple Computer//");
print "Updating Windows Installer version info..."
for line in fileinput.input(scriptpath + "/installer/Synthesis.iss",inplace=1):
if "AppVersion" in line:
line="AppVersion=" + FullVersionStr + "\n"
sys.stdout.write(line)
if __name__ == '__main__':
main() | [
"prvo.slovo.a@gmail.com"
] | prvo.slovo.a@gmail.com |
e72f4db6ed6a6653152baab96d0fa3235cbf675b | 9c13bffaf12c83b049375cf24e12183fcab3a2aa | /venv/lib/python3.6/site-packages/pip/_vendor/requests/sessions.py | a8e60f360279eb602a4e07bb27447e0a0d22f3b3 | [] | no_license | brielino/SDCCTestAnsible | b702d48c934c8bde9638ceba3b27fabf9dd40071 | 857f66860de2ad889455789b60a162506d3125a1 | refs/heads/master | 2022-12-09T13:15:29.030558 | 2020-09-12T14:51:31 | 2020-09-12T14:51:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,310 | py | # -*- coding: utf-8 -*-
"""
requests.session
~~~~~~~~~~~~~~~~
This module provides a Session object to manage and persist settings across
requests (cookies, auth, proxies).
"""
import os
import sys
import time
from datetime import timedelta
from collections import OrderedDict
from .auth import _basic_auth_str
from .compat import cookielib, is_py3, urljoin, urlparse, Mapping
from .cookies import (
cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies)
from .models import Request, PreparedRequest, DEFAULT_REDIRECT_LIMIT
from .hooks import default_hooks, dispatch_hook
from ._internal_utils import to_native_string
from .utils import to_key_val_list, default_headers, DEFAULT_PORTS
from .exceptions import (
TooManyRedirects, InvalidSchema, ChunkedEncodingError, ContentDecodingError)
from .structures import CaseInsensitiveDict
from .adapters import HTTPAdapter
from .utils import (
requote_uri, get_environ_proxies, get_netrc_auth, should_bypass_proxies,
get_auth_from_url, rewind_body
)
from .status_codes import codes
# formerly defined here, reexposed here for backward compatibility
from .models import REDIRECT_STATI
# Preferred clock, based on which one is more accurate on a given system.
if sys.platform == 'win32':
try: # Python 3.4+
preferred_clock = time.perf_counter
except AttributeError: # Earlier than Python 3.
preferred_clock = time.clock
else:
preferred_clock = time.time
def merge_setting(request_setting, session_setting, dict_class=OrderedDict):
"""Determines appropriate setting for a given request, taking into account
the explicit setting on that request, and the setting in the session. If a
setting is a dictionary, they will be merged together using `dict_class`
"""
if session_setting is None:
return request_setting
if request_setting is None:
return session_setting
# Bypass if not a dictionary (e.g. verify)
if not (
isinstance(session_setting, Mapping) and
isinstance(request_setting, Mapping)
):
return request_setting
merged_setting = dict_class(to_key_val_list(session_setting))
merged_setting.update(to_key_val_list(request_setting))
# Remove keys that are set to None. Extract keys first to avoid altering
# the dictionary during iteration.
none_keys = [k for (k, v) in merged_setting.items() if v is None]
for key in none_keys:
del merged_setting[key]
return merged_setting
def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict):
"""Properly merges both requests and session hooks.
This is necessary because when request_hooks == {'response': []}, the
merge breaks Session hooks entirely.
"""
if session_hooks is None or session_hooks.get('response') == []:
return request_hooks
if request_hooks is None or request_hooks.get('response') == []:
return session_hooks
return merge_setting(request_hooks, session_hooks, dict_class)
class SessionRedirectMixin(object):
def get_redirect_target(self, resp):
"""Receives a Response. Returns a redirect URI or ``None``"""
# Due to the nature of how requests processes redirects this method will
# be called at least once upon the original response and at least twice
# on each subsequent redirect response (if any).
# If a custom mixin is used to handle this logic, it may be advantageous
# to cache the redirect location onto the response object as a private
# attribute.
if resp.is_redirect:
location = resp.headers['location']
# Currently the underlying http module on py3 decode headers
# in latin1, but empirical evidence suggests that latin1 is very
# rarely used with non-ASCII characters in HTTP headers.
# It is more likely to get UTF8 header rather than latin1.
# This causes incorrect handling of UTF8 encoded location headers.
# To solve this, we re-encode the location in latin1.
if is_py3:
location = location.encode('latin1')
return to_native_string(location, 'utf8')
return None
def should_strip_auth(self, old_url, new_url):
"""Decide whether Authorization header should be removed when redirecting"""
old_parsed = urlparse(old_url)
new_parsed = urlparse(new_url)
if old_parsed.hostname != new_parsed.hostname:
return True
# Special case: allow http -> https redirect when using the standard
# ports. This isn't specified by RFC 7235, but is kept to avoid
# breaking backwards compatibility with older versions of requests
# that allowed any redirects on the same host.
if (old_parsed.scheme == 'http' and old_parsed.port in (80, None)
and new_parsed.scheme == 'https' and new_parsed.port in (443, None)):
return False
# Handle default port usage corresponding to scheme.
changed_port = old_parsed.port != new_parsed.port
changed_scheme = old_parsed.scheme != new_parsed.scheme
default_port = (DEFAULT_PORTS.get(old_parsed.scheme, None), None)
if (not changed_scheme and old_parsed.port in default_port
and new_parsed.port in default_port):
return False
# Standard case: root URI must match
return changed_port or changed_scheme
def resolve_redirects(self, resp, req, stream=False, timeout=None,
verify=True, cert=None, proxies=None, yield_requests=False, **adapter_kwargs):
"""Receives a Response. Returns a generator of Responses or Requests."""
hist = [] # keep track of history
url = self.get_redirect_target(resp)
previous_fragment = urlparse(req.url).fragment
while url:
prepared_request = req.copy()
# Update history and keep track of redirects.
# resp.history must ignore the original request in this loop
hist.append(resp)
resp.history = hist[1:]
try:
resp.content # Consume socket so it can be released
except (ChunkedEncodingError, ContentDecodingError, RuntimeError):
resp.raw.read(decode_content=False)
if len(resp.history) >= self.max_redirects:
raise TooManyRedirects('Exceeded {} redirects.'.format(self.max_redirects), response=resp)
# Release the connection back into the pool.
resp.close()
# Handle redirection without scheme (see: RFC 1808 Section 4)
if url.startswith('//'):
parsed_rurl = urlparse(resp.url)
url = ':'.join([to_native_string(parsed_rurl.scheme), url])
# Normalize url case and attach previous fragment if needed (RFC 7231 7.1.2)
parsed = urlparse(url)
if parsed.fragment == '' and previous_fragment:
parsed = parsed._replace(fragment=previous_fragment)
elif parsed.fragment:
previous_fragment = parsed.fragment
url = parsed.geturl()
# Facilitate relative 'location' headers, as allowed by RFC 7231.
# (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
# Compliant with RFC3986, we percent encode the url.
if not parsed.netloc:
url = urljoin(resp.url, requote_uri(url))
else:
url = requote_uri(url)
prepared_request.url = to_native_string(url)
self.rebuild_method(prepared_request, resp)
# https://github.com/psf/requests/issues/1084
if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect):
# https://github.com/psf/requests/issues/3490
purged_headers = ('Content-Length', 'Content-Type', 'Transfer-Encoding')
for header in purged_headers:
prepared_request.headers.pop(header, None)
prepared_request.body = None
headers = prepared_request.headers
headers.pop('Cookie', None)
# Extract any cookies sent on the response to the cookiejar
# in the new request. Because we've mutated our copied prepared
# request, use the old one that we haven't yet touched.
extract_cookies_to_jar(prepared_request._cookies, req, resp.raw)
merge_cookies(prepared_request._cookies, self.cookies)
prepared_request.prepare_cookies(prepared_request._cookies)
# Rebuild auth and proxy information.
proxies = self.rebuild_proxies(prepared_request, proxies)
self.rebuild_auth(prepared_request, resp)
# A failed tell() sets `_body_position` to `object()`. This non-None
# value ensures `rewindable` will be True, allowing us to raise an
# UnrewindableBodyError, instead of hanging the connection.
rewindable = (
prepared_request._body_position is not None and
('Content-Length' in headers or 'Transfer-Encoding' in headers)
)
# Attempt to rewind consumed file-like object.
if rewindable:
rewind_body(prepared_request)
# Override the original request.
req = prepared_request
if yield_requests:
yield req
else:
resp = self.send(
req,
stream=stream,
timeout=timeout,
verify=verify,
cert=cert,
proxies=proxies,
allow_redirects=False,
**adapter_kwargs
)
extract_cookies_to_jar(self.cookies, prepared_request, resp.raw)
# extract redirect url, if any, for the next loop
url = self.get_redirect_target(resp)
yield resp
def rebuild_auth(self, prepared_request, response):
"""When being redirected we may want to strip authentication from the
request to avoid leaking credentials. This method intelligently removes
and reapplies authentication where possible to avoid credential loss.
"""
headers = prepared_request.headers
url = prepared_request.url
if 'Authorization' in headers and self.should_strip_auth(response.request.url, url):
# If we get redirected to a new host, we should strip out any
# authentication headers.
del headers['Authorization']
# .netrc might have more auth for us on our new host.
new_auth = get_netrc_auth(url) if self.trust_env else None
if new_auth is not None:
prepared_request.prepare_auth(new_auth)
def rebuild_proxies(self, prepared_request, proxies):
"""This method re-evaluates the proxy configuration by considering the
environment variables. If we are redirected to a URL covered by
NO_PROXY, we strip the proxy configuration. Otherwise, we set missing
proxy keys for this URL (in case they were stripped by a previous
redirect).
This method also replaces the Proxy-Authorization header where
necessary.
:rtype: dict
"""
proxies = proxies if proxies is not None else {}
headers = prepared_request.headers
url = prepared_request.url
scheme = urlparse(url).scheme
new_proxies = proxies.copy()
no_proxy = proxies.get('no_proxy')
bypass_proxy = should_bypass_proxies(url, no_proxy=no_proxy)
if self.trust_env and not bypass_proxy:
environ_proxies = get_environ_proxies(url, no_proxy=no_proxy)
proxy = environ_proxies.get(scheme, environ_proxies.get('all'))
if proxy:
new_proxies.setdefault(scheme, proxy)
if 'Proxy-Authorization' in headers:
del headers['Proxy-Authorization']
try:
username, password = get_auth_from_url(new_proxies[scheme])
except KeyError:
username, password = None, None
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username, password)
return new_proxies
def rebuild_method(self, prepared_request, response):
"""When being redirected we may want to change the method of the request
based on certain specs or browser behavior.
"""
method = prepared_request.method
# https://tools.ietf.org/html/rfc7231#section-6.4.4
if response.status_code == codes.see_other and method != 'HEAD':
method = 'GET'
# Do what the browsers do, despite standards...
# First, turn 302s into GETs.
if response.status_code == codes.found and method != 'HEAD':
method = 'GET'
# Second, if a POST is responded to with a 301, turn it into a GET.
# This bizarre behaviour is explained in Issue 1704.
if response.status_code == codes.moved and method == 'POST':
method = 'GET'
prepared_request.method = method
class Session(SessionRedirectMixin):
"""A Requests session.
Provides cookie persistence, connection-pooling, and configuration.
Basic Usage::
>>> import requests
>>> s = requests.Session()
>>> s.get('https://httpbin.org/get')
<Response [200]>
Or as a context manager::
>>> with requests.Session() as s:
... s.get('https://httpbin.org/get')
<Response [200]>
"""
__attrs__ = [
'headers', 'cookies', 'auth', 'proxies', 'hooks', 'params', 'verify',
'cert', 'adapters', 'stream', 'trust_env',
'max_redirects',
]
def __init__(self):
#: A case-insensitive dictionary of headers to be sent on each
#: :class:`Request <Request>` sent from this
#: :class:`Session <Session>`.
self.headers = default_headers()
#: Default Authentication tuple or object to attach to
#: :class:`Request <Request>`.
self.auth = None
#: Dictionary mapping protocol or protocol and host to the URL of the proxy
#: (e.g. {'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}) to
#: be used on each :class:`Request <Request>`.
self.proxies = {}
#: Event-handling hooks.
self.hooks = default_hooks()
#: Dictionary of querystring data to attach to each
#: :class:`Request <Request>`. The dictionary values may be lists for
#: representing multivalued query parameters.
self.params = {}
#: Stream response content default.
self.stream = False
#: SSL Verification default.
self.verify = True
#: SSL client certificate default, if String, path to ssl client
#: cert file (.pem). If Tuple, ('cert', 'key') pair.
self.cert = None
#: Maximum number of redirects allowed. If the request exceeds this
#: limit, a :class:`TooManyRedirects` exception is raised.
#: This defaults to requests.models.DEFAULT_REDIRECT_LIMIT, which is
#: 30.
self.max_redirects = DEFAULT_REDIRECT_LIMIT
#: Trust environment settings for proxy configuration, default
#: authentication and similar.
self.trust_env = True
#: A CookieJar containing all currently outstanding cookies set on this
#: session. By default it is a
#: :class:`RequestsCookieJar <requests.cookies.RequestsCookieJar>`, but
#: may be any other ``cookielib.CookieJar`` compatible object.
self.cookies = cookiejar_from_dict({})
# Default connection adapters.
self.adapters = OrderedDict()
self.mount('https://', HTTPAdapter())
self.mount('http://', HTTPAdapter())
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def prepare_request(self, request):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for
transmission and returns it. The :class:`PreparedRequest` has settings
merged from the :class:`Request <Request>` instance and those of the
:class:`Session`.
:param request: :class:`Request` instance to prepare with this
session's settings.
:rtype: requests.PreparedRequest
"""
cookies = request.cookies or {}
# Bootstrap CookieJar.
if not isinstance(cookies, cookielib.CookieJar):
cookies = cookiejar_from_dict(cookies)
# Merge with session cookies
merged_cookies = merge_cookies(
merge_cookies(RequestsCookieJar(), self.cookies), cookies)
# Set environment's basic authentication if not explicitly set.
auth = request.auth
if self.trust_env and not auth and not self.auth:
auth = get_netrc_auth(request.url)
p = PreparedRequest()
p.prepare(
method=request.method.upper(),
url=request.url,
files=request.files,
data=request.data,
json=request.json,
headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict),
params=merge_setting(request.params, self.params),
auth=merge_setting(auth, self.auth),
cookies=merged_cookies,
hooks=merge_hooks(request.hooks, self.hooks),
)
return p
def request(self, method, url,
params=None, data=None, headers=None, cookies=None, files=None,
auth=None, timeout=None, allow_redirects=True, proxies=None,
hooks=None, stream=None, verify=None, cert=None, json=None):
"""Constructs a :class:`Request <Request>`, prepares it and sends it.
Returns :class:`Response <Response>` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query
string for the :class:`Request`.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json to send in the body of the
:class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the
:class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the
:class:`Request`.
:param files: (optional) Dictionary of ``'filename': file-like-objects``
for multipart encoding upload.
:param auth: (optional) Auth tuple or callable to enable
Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How long to wait for the src to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Set to True by default.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol or protocol and
hostname to the URL of the proxy.
:param stream: (optional) whether to immediately download the response
content. Defaults to ``False``.
:param verify: (optional) Either a boolean, in which case it controls whether we verify
the src's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use. Defaults to ``True``.
:param cert: (optional) if String, path to ssl client cert file (.pem).
If Tuple, ('cert', 'key') pair.
:rtype: requests.Response
"""
# Create the Request.
req = Request(
method=method.upper(),
url=url,
headers=headers,
files=files,
data=data or {},
json=json,
params=params or {},
auth=auth,
cookies=cookies,
hooks=hooks,
)
prep = self.prepare_request(req)
proxies = proxies or {}
settings = self.merge_environment_settings(
prep.url, proxies, stream, verify, cert
)
# Send the request.
send_kwargs = {
'timeout': timeout,
'allow_redirects': allow_redirects,
}
send_kwargs.update(settings)
resp = self.send(prep, **send_kwargs)
return resp
def get(self, url, **kwargs):
r"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return self.request('GET', url, **kwargs)
def options(self, url, **kwargs):
r"""Sends a OPTIONS request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return self.request('OPTIONS', url, **kwargs)
def head(self, url, **kwargs):
r"""Sends a HEAD request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', False)
return self.request('HEAD', url, **kwargs)
def post(self, url, data=None, json=None, **kwargs):
r"""Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request('POST', url, data=data, json=json, **kwargs)
def put(self, url, data=None, **kwargs):
r"""Sends a PUT request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request('PUT', url, data=data, **kwargs)
def patch(self, url, data=None, **kwargs):
r"""Sends a PATCH request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request('PATCH', url, data=data, **kwargs)
def delete(self, url, **kwargs):
r"""Sends a DELETE request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request('DELETE', url, **kwargs)
def send(self, request, **kwargs):
"""Send a given PreparedRequest.
:rtype: requests.Response
"""
# Set defaults that the hooks can utilize to ensure they always have
# the correct parameters to reproduce the previous request.
kwargs.setdefault('stream', self.stream)
kwargs.setdefault('verify', self.verify)
kwargs.setdefault('cert', self.cert)
kwargs.setdefault('proxies', self.proxies)
# It's possible that users might accidentally send a Request object.
# Guard against that specific failure case.
if isinstance(request, Request):
raise ValueError('You can only send PreparedRequests.')
# Set up variables needed for resolve_redirects and dispatching of hooks
allow_redirects = kwargs.pop('allow_redirects', True)
stream = kwargs.get('stream')
hooks = request.hooks
# Get the appropriate adapter to use
adapter = self.get_adapter(url=request.url)
# Start time (approximately) of the request
start = preferred_clock()
# Send the request
r = adapter.send(request, **kwargs)
# Total elapsed time of the request (approximately)
elapsed = preferred_clock() - start
r.elapsed = timedelta(seconds=elapsed)
# Response manipulation hooks
r = dispatch_hook('response', hooks, r, **kwargs)
# Persist cookies
if r.history:
# If the hooks create history then we want those cookies too
for resp in r.history:
extract_cookies_to_jar(self.cookies, resp.request, resp.raw)
extract_cookies_to_jar(self.cookies, request, r.raw)
# Resolve redirects if allowed.
if allow_redirects:
# Redirect resolving generator.
gen = self.resolve_redirects(r, request, **kwargs)
history = [resp for resp in gen]
else:
history = []
# Shuffle things around if there's history.
if history:
# Insert the first (original) request at the start
history.insert(0, r)
# Get the last request made
r = history.pop()
r.history = history
# If redirects aren't being followed, store the response on the Request for Response.next().
if not allow_redirects:
try:
r._next = next(self.resolve_redirects(r, request, yield_requests=True, **kwargs))
except StopIteration:
pass
if not stream:
r.content
return r
def merge_environment_settings(self, url, proxies, stream, verify, cert):
"""
Check the environment and merge it with some settings.
:rtype: dict
"""
# Gather clues from the surrounding environment.
if self.trust_env:
# Set environment's proxies.
no_proxy = proxies.get('no_proxy') if proxies is not None else None
env_proxies = get_environ_proxies(url, no_proxy=no_proxy)
for (k, v) in env_proxies.items():
proxies.setdefault(k, v)
# Look for requests environment configuration and be compatible
# with cURL.
if verify is True or verify is None:
verify = (os.environ.get('REQUESTS_CA_BUNDLE') or
os.environ.get('CURL_CA_BUNDLE'))
# Merge all the kwargs.
proxies = merge_setting(proxies, self.proxies)
stream = merge_setting(stream, self.stream)
verify = merge_setting(verify, self.verify)
cert = merge_setting(cert, self.cert)
return {'verify': verify, 'proxies': proxies, 'stream': stream,
'cert': cert}
def get_adapter(self, url):
"""
Returns the appropriate connection adapter for the given URL.
:rtype: requests.adapters.BaseAdapter
"""
for (prefix, adapter) in self.adapters.items():
if url.lower().startswith(prefix.lower()):
return adapter
# Nothing matches :-/
raise InvalidSchema("No connection adapters were found for {!r}".format(url))
def close(self):
"""Closes all adapters and as such the session"""
for v in self.adapters.values():
v.close()
def mount(self, prefix, adapter):
"""Registers a connection adapter to a prefix.
Adapters are sorted in descending order by prefix length.
"""
self.adapters[prefix] = adapter
keys_to_move = [k for k in self.adapters if len(k) < len(prefix)]
for key in keys_to_move:
self.adapters[key] = self.adapters.pop(key)
def __getstate__(self):
state = {attr: getattr(self, attr, None) for attr in self.__attrs__}
return state
def __setstate__(self, state):
for attr, value in state.items():
setattr(self, attr, value)
def session():
"""
Returns a :class:`Session` for context-management.
.. deprecated:: 1.0.0
This method has been deprecated since version 1.0.0 and is only kept for
backwards compatibility. New code should use :class:`~requests.sessions.Session`
to create a session. This may be removed at a future date.
:rtype: Session
"""
return Session()
| [
"melissaripaolo@gmail.com"
] | melissaripaolo@gmail.com |
188cde0f74544b63ad4efac44b60ccebb54020fa | cabe9566267e24ee9ca827ccf47e75bfc1cd5021 | /features/migrations/0002_features_status.py | 07aa745905af3aef8a5e2b45fb0c0d8678873f50 | [] | no_license | Code-Institute-Submissions/ecommerce-project | 0a73d98feeb7f667fa541f4f5181a818ffb347a7 | 1c35f760b4f82fc513a1c3f27005b03922c60020 | refs/heads/master | 2020-05-18T09:40:33.804158 | 2019-04-30T21:09:42 | 2019-04-30T21:09:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 456 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-04-17 13:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('features', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='features',
name='status',
field=models.CharField(default='Todo', max_length=254),
),
]
| [
"noelle.browne@yahoo.ie"
] | noelle.browne@yahoo.ie |
5acdd79baad3e3b1e64e2899d6958a752a4e1fbd | ec2490a6628ea5240ee16d7ee0ab35c4bdf3f954 | /gaurabda/GCEarthData.py | 6312b600eae9579137e7dd37368ff3a49ed3cacb | [
"MIT"
] | permissive | tuksik/gaurabda-calendar-ekadasi | 26e0f13112949ec9a8895bc1b0bccbc587544ae5 | 36f00a497bc30c041619baa1e9551e3a16021e4e | refs/heads/master | 2022-12-30T09:32:51.683966 | 2020-10-16T14:35:03 | 2020-10-16T14:35:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,436 | py | import math
import gaurabda.GCMath as GCMath
import gaurabda.GCAyanamsha as GCAyanamsha
import gaurabda.GCTimeZone as GCTimeZone
import gaurabda.GCStrings as GCStrings
import gaurabda.GCCoords as GCCoords
import gaurabda.GCUT as GCUT
def calc_epsilon_phi(date):
arg_mul = [
[ 0, 0, 0, 0, 1],
[-2, 0, 0, 2, 2],
[ 0, 0, 0, 2, 2],
[ 0, 0, 0, 0, 2],
[ 0, 1, 0, 0, 0],
[ 0, 0, 1, 0, 0],
[-2, 1, 0, 2, 2],
[ 0, 0, 0, 2, 1],
[ 0, 0, 1, 2, 2],
[-2,-1, 0, 2, 2],
[-2, 0, 1, 0, 0],
[-2, 0, 0, 2, 1],
[ 0, 0,-1, 2, 2],
[ 2, 0, 0, 0, 0],
[ 0, 0, 1, 0, 1],
[ 2, 0,-1, 2, 2],
[ 0, 0,-1, 0, 1],
[ 0, 0, 1, 2, 1],
[-2, 0, 2, 0, 0],
[ 0, 0,-2, 2, 1],
[ 2, 0, 0, 2, 2],
[ 0, 0, 2, 2, 2],
[ 0, 0, 2, 0, 0],
[-2, 0, 1, 2, 2],
[ 0, 0, 0, 2, 0],
[-2, 0, 0, 2, 0],
[ 0, 0,-1, 2, 1],
[ 0, 2, 0, 0, 0],
[ 2, 0,-1, 0, 1],
[-2, 2, 0, 2, 2],
[ 0, 1, 0, 0, 1]
]
arg_phi = [
[-171996,-1742],
[ -13187, -16],
[ -2274, -2],
[ 2062, 2],
[ 1426, -34],
[ 712, 1],
[ -517, 12],
[ -386, -4],
[ -301, 0],
[ 217, -5],
[ -158, 0],
[ 129, 1],
[ 123, 0],
[ 63, 0],
[ 63, 1],
[ -59, 0],
[ -58, -1],
[ -51, 0],
[ 48, 0],
[ 46, 0],
[ -38, 0],
[ -31, 0],
[ 29, 0],
[ 29, 0],
[ 26, 0],
[ -22, 0],
[ 21, 0],
[ 17, -1],
[ 16, 0],
[ -16, 1],
[ -15, 0]
]
arg_eps = [
[ 92025, 89],
[ 5736, -31],
[ 977, -5],
[ -895, 5],
[ 54, -1],
[ -7, 0],
[ 224, -6],
[ 200, 0],
[ 129, -1],
[ -95, 3],
[ 0, 0],
[ -70, 0],
[ -53, 0],
[ 0, 0],
[ -33, 0],
[ 26, 0],
[ 32, 0],
[ 27, 0],
[ 0, 0],
[ -24, 0],
[ 16, 0],
[ 13, 0],
[ 0, 0],
[ -12, 0],
[ 0, 0],
[ 0, 0],
[ -10, 0],
[ 0, 0],
[ -8, 0],
[ 7, 0],
[ 9, 0]
]
t = ( date -2451545.0)/36525
delta_phi = 0.0
# longitude of rising knot
omega =GCMath.putIn360(125.04452+(-1934.136261+(0.0020708+1.0/450000*t)*t)*t)
if True:
l = 280.4665+36000.7698*t
ls = 218.3165+481267.8813*t
delta_epsilon = 9.20 * GCMath.cosDeg(omega)+ 0.57* GCMath.cosDeg(2*l)+ 0.10* GCMath.cosDeg(2*ls) - 0.09*GCMath.cosDeg(2*omega)
delta_phi =(-17.20* GCMath.sinDeg(omega)- 1.32*GCMath.sinDeg(2*l)-0.23*GCMath.sinDeg(2*ls) + 0.21*GCMath.sinDeg(2*omega))/3600
else:
# mean elongation of moon to sun
d = GCMath.putIn360(297.85036+(445267.111480+(-0.0019142+t/189474)*t)*t)
# mean anomaly of the sun
m =GCMath.putIn360(357.52772+(35999.050340+(-0.0001603-t/300000)*t)*t)
# mean anomaly of the moon
ms =GCMath.putIn360(134.96298+(477198.867398+(0.0086972+t/56250)*t)*t)
# argument of the latitude of the moon
f = GCMath.putIn360(93.27191+(483202.017538+(-0.0036825+t/327270)*t)*t)
delta_phi = 0
delta_epsilon = 0
for i in range(31):
s= arg_mul[i][0]*d + arg_mul[i][1]*m + arg_mul[i][2]*ms + arg_mul[i][3]*f + arg_mul[i][4]*omega
delta_phi = delta_phi+(arg_phi[i][0]+arg_phi[i][1]*t*0.1)*GCMath.sinDeg(s)
delta_epsilon = delta_epsilon+(arg_eps[i][0] + arg_eps[i][1]*t*0.1) * GCMath.cosDeg(s)
delta_phi=delta_phi*0.0001/3600
delta_epsilon=delta_epsilon*0.0001/3600
# angle of ecliptic
epsilon_0=84381.448+(-46.8150+(-0.00059+0.001813*t)*t)*t
epsilon=(epsilon_0+delta_epsilon)/3600
return delta_phi, epsilon
def eclipticalToEquatorialCoords(ecc,date):
eqc = GCCoords.GCEquatorialCoords()
epsilon = 0.0
delta_phi = 0.0
alpha = delta = 0.0
delta_phi,epsilon = calc_epsilon_phi(date)
ecc.longitude = GCMath.putIn360(ecc.longitude + delta_phi)
eqc.rightAscension = GCMath.arcTan2Deg( GCMath.sinDeg(ecc.longitude) * GCMath.cosDeg(epsilon) - GCMath.tanDeg(ecc.latitude) * GCMath.sinDeg(epsilon), GCMath.cosDeg(ecc.longitude));
eqc.declination = GCMath.arcSinDeg( GCMath.sinDeg(ecc.latitude) * GCMath.cosDeg(epsilon) + GCMath.cosDeg(ecc.latitude) * GCMath.sinDeg(epsilon) * GCMath.sinDeg(ecc.longitude));
return eqc,ecc
def equatorialToHorizontalCoords(eqc, obs, date):
hc = GCCoords.GCHorizontalCoords()
h = GCMath.putIn360(star_time(date) - eqc.rightAscension + obs.longitude_deg)
hc.azimut = GCMath.rad2deg( math.atan2(GCMath.sinDeg(h), GCMath.cosDeg(h) * GCMath.sinDeg(obs.latitude_deg) - GCMath.tanDeg(eqc.declination) * GCMath.cosDeg(obs.latitude_deg) ))
hc.elevation = GCMath.rad2deg( math.asin(GCMath.sinDeg(obs.latitude_deg) * GCMath.sinDeg(eqc.declination) + GCMath.cosDeg(obs.latitude_deg) * GCMath.cosDeg(eqc.declination) * GCMath.cosDeg(h)));
return hc
def GetTextLatitude(d):
c0 = 'S' if d < 0.0 else 'N'
d = math.fabs(d)
a0 = int(math.floor(d))
a1 = int(math.floor((d - a0)*60 + 0.5))
return "{}{}{:02d}".format(a0, c0, a1)
def GetTextLongitude(d):
c0 = 'W' if d < 0.0 else 'E'
d = math.fabs(d)
a0 = int(math.floor(d))
a1 = int(math.floor((d - a0)*60 + 0.5))
return "{}{}{:02d}".format(a0, c0, a1)
def star_time(date):
jd = date
t =(jd-2451545.0)/36525.0
delta_phi, epsilon = calc_epsilon_phi(date)
return GCMath.putIn360(280.46061837+360.98564736629*(jd-2451545.0)+
t*t*(0.000387933-t/38710000)+
delta_phi*GCMath.cosDeg(epsilon) )
class EARTHDATA:
def __init__(self):
# observated event
# 0 - center of the sun
# 1 - civil twilight
# 2 - nautical twilight
# 3 - astronomical twilight
self.obs = 0
self.longitude_deg = 0.0
self.latitude_deg = 0.0
self.tzone = 0.0
self.dst = 0
def __str__(self):
return '{}: {} {}: {} {}: {}'.format(
GCStrings.getString(10), GetTextLatitude(latitude_deg),
GCStrings.getString(11), GetTextLongitude(longitude_deg),
GCStrings.getString(12), GCTimeZone.GetTimeZoneOffsetText(tzone))
def GetHorizontDegrees(self,jday):
return GCMath.putIn360(star_time(jday) - self.longitude_deg - GCAyanamsha.GetAyanamsa(jday) + 155)
def GetNextAscendentStart(self, startDate):
phi = 30.0
l1 = l2 = 0.0
jday = startDate.GetJulianComplete()
xj = 0.0
d = GCGregorianDate(date=startDate)
xd = GCGregorianDate()
scan_step = 0.05
prev_tit = 0
new_tit = -1
l1 = self.GetHorizontDegrees(jday)
prev_tit = int(math.floor(l1/phi))
counter = 0
while counter < 20:
xj = jday
xd.Set(d)
jday += scan_step
d.shour += scan_step
if d.shour > 1.0:
d.shour -= 1.0
d.NextDay()
l2 = self.GetHorizontDegrees(jday)
new_tit = int(math.floor(l2/phi))
if prev_tit != new_tit:
jday = xj
d.Set(xd)
scan_step *= 0.5
counter+=1
continue
else:
l1 = l2
nextDate = GCGregorianDate.GCGregorianDate(date=d)
return new_tit, nextDate
def unittests():
GCUT.info('earth data')
GCUT.val(GetTextLatitude(12.5),'12N30','text latitude')
GCUT.val(GetTextLongitude(-15.25),'15W15','text longitude')
GCUT.val(star_time(2451545.0),280.45704234942144,'start time')
dp,ep = calc_epsilon_phi(2451545.0)
GCUT.val(dp,-0.0038975991170544155,'delta phi')
GCUT.val(ep,23.437690731210242,'epsilon')
| [
"root@gopal.home.sk"
] | root@gopal.home.sk |
a79901d7cd6230cf60535dc9cffd9a91da0145c5 | 82e0c57e5b133d27e2380c9f809c2b338b3bc52c | /test/aqua/operators/test_op_construction.py | e5419631592af242da1fd109f730d14fcc7fdf60 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ryzbaka/qiskit-aqua | 62b3e50d60f080ed8aa1b9a484fcd508bc1139b3 | c1375ff5a1e7cf06d6691519f3ca4feb32e1a747 | refs/heads/master | 2022-07-19T03:55:58.393568 | 2020-05-20T14:41:12 | 2020-05-20T14:41:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,065 | py | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test Operator construction, including OpPrimitives and singletons. """
import unittest
from test.aqua import QiskitAquaTestCase
import itertools
import numpy as np
from qiskit import QuantumCircuit
from qiskit.quantum_info.operators import Operator, Pauli
from qiskit.circuit.library import CZGate
from qiskit.aqua.operators import X, Y, Z, I, CX, T, H, PrimitiveOp, SummedOp, PauliOp, Minus
# pylint: disable=invalid-name
class TestOpConstruction(QiskitAquaTestCase):
"""Operator Construction tests."""
def test_pauli_primitives(self):
""" from to file test """
newop = X ^ Y ^ Z ^ I
self.assertEqual(newop.primitive, Pauli(label='XYZI'))
kpower_op = (Y ^ 5) ^ (I ^ 3)
self.assertEqual(kpower_op.primitive, Pauli(label='YYYYYIII'))
kpower_op2 = (Y ^ I) ^ 4
self.assertEqual(kpower_op2.primitive, Pauli(label='YIYIYIYI'))
# Check immutability
self.assertEqual(X.primitive, Pauli(label='X'))
self.assertEqual(Y.primitive, Pauli(label='Y'))
self.assertEqual(Z.primitive, Pauli(label='Z'))
self.assertEqual(I.primitive, Pauli(label='I'))
def test_composed_eval(self):
""" Test eval of ComposedOp """
self.assertAlmostEqual(Minus.eval('1'), -.5 ** .5)
def test_evals(self):
""" evals test """
# pylint: disable=no-member
# TODO: Think about eval names
self.assertEqual(Z.eval('0').eval('0'), 1)
self.assertEqual(Z.eval('1').eval('0'), 0)
self.assertEqual(Z.eval('0').eval('1'), 0)
self.assertEqual(Z.eval('1').eval('1'), -1)
self.assertEqual(X.eval('0').eval('0'), 0)
self.assertEqual(X.eval('1').eval('0'), 1)
self.assertEqual(X.eval('0').eval('1'), 1)
self.assertEqual(X.eval('1').eval('1'), 0)
self.assertEqual(Y.eval('0').eval('0'), 0)
self.assertEqual(Y.eval('1').eval('0'), -1j)
self.assertEqual(Y.eval('0').eval('1'), 1j)
self.assertEqual(Y.eval('1').eval('1'), 0)
# Check that Pauli logic eval returns same as matrix logic
self.assertEqual(PrimitiveOp(Z.to_matrix()).eval('0').eval('0'), 1)
self.assertEqual(PrimitiveOp(Z.to_matrix()).eval('1').eval('0'), 0)
self.assertEqual(PrimitiveOp(Z.to_matrix()).eval('0').eval('1'), 0)
self.assertEqual(PrimitiveOp(Z.to_matrix()).eval('1').eval('1'), -1)
self.assertEqual(PrimitiveOp(X.to_matrix()).eval('0').eval('0'), 0)
self.assertEqual(PrimitiveOp(X.to_matrix()).eval('1').eval('0'), 1)
self.assertEqual(PrimitiveOp(X.to_matrix()).eval('0').eval('1'), 1)
self.assertEqual(PrimitiveOp(X.to_matrix()).eval('1').eval('1'), 0)
self.assertEqual(PrimitiveOp(Y.to_matrix()).eval('0').eval('0'), 0)
self.assertEqual(PrimitiveOp(Y.to_matrix()).eval('1').eval('0'), -1j)
self.assertEqual(PrimitiveOp(Y.to_matrix()).eval('0').eval('1'), 1j)
self.assertEqual(PrimitiveOp(Y.to_matrix()).eval('1').eval('1'), 0)
pauli_op = Z ^ I ^ X ^ Y
mat_op = PrimitiveOp(pauli_op.to_matrix())
full_basis = list(map(''.join, itertools.product('01', repeat=pauli_op.num_qubits)))
for bstr1, bstr2 in itertools.product(full_basis, full_basis):
# print('{} {} {} {}'.format(bstr1, bstr2, pauli_op.eval(bstr1, bstr2),
# mat_op.eval(bstr1, bstr2)))
np.testing.assert_array_almost_equal(pauli_op.eval(bstr1).eval(bstr2),
mat_op.eval(bstr1).eval(bstr2))
gnarly_op = SummedOp([(H ^ I ^ Y).compose(X ^ X ^ Z).tensor(Z),
PrimitiveOp(Operator.from_label('+r0I')),
3 * (X ^ CX ^ T)], coeff=3 + .2j)
gnarly_mat_op = PrimitiveOp(gnarly_op.to_matrix())
full_basis = list(map(''.join, itertools.product('01', repeat=gnarly_op.num_qubits)))
for bstr1, bstr2 in itertools.product(full_basis, full_basis):
np.testing.assert_array_almost_equal(gnarly_op.eval(bstr1).eval(bstr2),
gnarly_mat_op.eval(bstr1).eval(bstr2))
def test_circuit_construction(self):
""" circuit construction test """
hadq2 = H ^ I
cz = hadq2.compose(CX).compose(hadq2)
qc = QuantumCircuit(2)
qc.append(cz.primitive, qargs=range(2))
ref_cz_mat = PrimitiveOp(CZGate()).to_matrix()
np.testing.assert_array_almost_equal(cz.to_matrix(), ref_cz_mat)
def test_io_consistency(self):
""" consistency test """
new_op = X ^ Y ^ I
label = 'XYI'
# label = new_op.primitive.to_label()
self.assertEqual(str(new_op.primitive), label)
np.testing.assert_array_almost_equal(new_op.primitive.to_matrix(),
Operator.from_label(label).data)
self.assertEqual(new_op.primitive, Pauli(label=label))
x_mat = X.primitive.to_matrix()
y_mat = Y.primitive.to_matrix()
i_mat = np.eye(2, 2)
np.testing.assert_array_almost_equal(new_op.primitive.to_matrix(),
np.kron(np.kron(x_mat, y_mat), i_mat))
hi = np.kron(H.to_matrix(), I.to_matrix())
hi2 = Operator.from_label('HI').data
hi3 = (H ^ I).to_matrix()
np.testing.assert_array_almost_equal(hi, hi2)
np.testing.assert_array_almost_equal(hi2, hi3)
xy = np.kron(X.to_matrix(), Y.to_matrix())
xy2 = Operator.from_label('XY').data
xy3 = (X ^ Y).to_matrix()
np.testing.assert_array_almost_equal(xy, xy2)
np.testing.assert_array_almost_equal(xy2, xy3)
# Check if numpy array instantiation is the same as from Operator
matrix_op = Operator.from_label('+r')
np.testing.assert_array_almost_equal(PrimitiveOp(matrix_op).to_matrix(),
PrimitiveOp(matrix_op.data).to_matrix())
# Ditto list of lists
np.testing.assert_array_almost_equal(PrimitiveOp(matrix_op.data.tolist()).to_matrix(),
PrimitiveOp(matrix_op.data).to_matrix())
# TODO make sure this works once we resolve endianness mayhem
# qc = QuantumCircuit(3)
# qc.x(2)
# qc.y(1)
# from qiskit import BasicAer, QuantumCircuit, execute
# unitary = execute(qc, BasicAer.get_backend('unitary_simulator')).result().get_unitary()
# np.testing.assert_array_almost_equal(new_op.primitive.to_matrix(), unitary)
def test_to_matrix(self):
"""to matrix text """
np.testing.assert_array_equal(X.to_matrix(), Operator.from_label('X').data)
np.testing.assert_array_equal(Y.to_matrix(), Operator.from_label('Y').data)
np.testing.assert_array_equal(Z.to_matrix(), Operator.from_label('Z').data)
op1 = Y + H
np.testing.assert_array_almost_equal(op1.to_matrix(), Y.to_matrix() + H.to_matrix())
op2 = op1 * .5
np.testing.assert_array_almost_equal(op2.to_matrix(), op1.to_matrix() * .5)
op3 = (4 - .6j) * op2
np.testing.assert_array_almost_equal(op3.to_matrix(), op2.to_matrix() * (4 - .6j))
op4 = op3.tensor(X)
np.testing.assert_array_almost_equal(op4.to_matrix(),
np.kron(op3.to_matrix(), X.to_matrix()))
op5 = op4.compose(H ^ I)
np.testing.assert_array_almost_equal(op5.to_matrix(), np.dot(op4.to_matrix(),
(H ^ I).to_matrix()))
op6 = op5 + PrimitiveOp(Operator.from_label('+r').data)
np.testing.assert_array_almost_equal(
op6.to_matrix(), op5.to_matrix() + Operator.from_label('+r').data)
def test_adjoint(self):
""" adjoint test """
gnarly_op = 3 * (H ^ I ^ Y).compose(X ^ X ^ Z).tensor(T ^ Z) + \
PrimitiveOp(Operator.from_label('+r0IX').data)
np.testing.assert_array_almost_equal(np.conj(np.transpose(gnarly_op.to_matrix())),
gnarly_op.adjoint().to_matrix())
def test_primitive_strings(self):
""" get primitives test """
self.assertEqual(X.primitive_strings(), {'Pauli'})
gnarly_op = 3 * (H ^ I ^ Y).compose(X ^ X ^ Z).tensor(T ^ Z) + \
PrimitiveOp(Operator.from_label('+r0IX').data)
self.assertEqual(gnarly_op.primitive_strings(), {'QuantumCircuit', 'Matrix'})
def test_to_pauli_op(self):
""" Test to_pauli_op method """
gnarly_op = 3 * (H ^ I ^ Y).compose(X ^ X ^ Z).tensor(T ^ Z) + \
PrimitiveOp(Operator.from_label('+r0IX').data)
mat_op = gnarly_op.to_matrix_op()
pauli_op = gnarly_op.to_pauli_op()
self.assertIsInstance(pauli_op, SummedOp)
for p in pauli_op:
self.assertIsInstance(p, PauliOp)
np.testing.assert_array_almost_equal(mat_op.to_matrix(), pauli_op.to_matrix())
def test_circuit_permute(self):
r""" Test the CircuitOp's .permute method """
perm = range(7)[::-1]
c_op = (((CX ^ 3) ^ X) @
(H ^ 7) @
(X ^ Y ^ Z ^ I ^ X ^ X ^ X) @
(Y ^ (CX ^ 3)) @
(X ^ Y ^ Z ^ I ^ X ^ X ^ X))
c_op_perm = c_op.permute(perm)
self.assertNotEqual(c_op, c_op_perm)
c_op_id = c_op_perm.permute(perm)
self.assertEqual(c_op, c_op_id)
if __name__ == '__main__':
unittest.main()
| [
"noreply@github.com"
] | ryzbaka.noreply@github.com |
ce57dd3bfd78b96f46a44a4c20d89d7a8798c623 | 5b07f9a868de71ce61aea540f4e437d277611cd0 | /AC04/produtoconsumidor.py | 1ac330c2b8dc5762fd0df30a3fbc3bc48fb63a9c | [] | no_license | DevAltOfCtrl/Arquitetura_e_Projeto_de_Sistemas | 2d738b328ad51a8a92113a6cd77704dbabe8f2f7 | c034ba33fd56601af68b2963a2f22e32f1fa146d | refs/heads/main | 2023-09-06T02:48:09.106918 | 2021-10-23T03:00:22 | 2021-10-23T03:00:22 | 406,593,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,540 | py |
from threading import Thread, Condition
import time
import random
prateleiraMercado = []
controladorFila = Condition()
class Produto(Thread):
def run(self):
produtos = ["carne", "ovo", "arroz", "feijão", "macarrão", "banana",
"maça", "miojo", "abacaxi", "laranja", "cerveja", "vinho",
"cachaça", "creme de leite", "leite condensado", "frango",
"café", "óleo", "açucar", "leite", "sal", "detergente",
"chocolate", "batata", "cenoura", "quiabo", "acerola",
"agua", "suco", "refrigerante", "xuxu", "pepino"]
global prateleiraMercado
while True:
produto = random.choice(produtos)
controladorFila.acquire()
prateleiraMercado.append(produto)
print("Funcionário: Repositor incluiu", produto, "na prateleira.")
controladorFila.notify()
controladorFila.release()
time.sleep(random.random())
class Consumidor(Thread):
def run(self):
global prateleiraMercado
while True:
controladorFila.acquire()
if not prateleiraMercado:
print("Prateleira sem produtos, cliente aguardando repositor.")
controladorFila.wait()
produto = prateleiraMercado.pop(0)
print("Consumidor: Cliente pegou", produto, "da prateleira.")
controladorFila.release()
time.sleep(random.random())
| [
"noreply@github.com"
] | DevAltOfCtrl.noreply@github.com |
9eab7ac63befed2d25ff7d06879122dcee603533 | bf1711af678a07b2030166d98b77f1320f16b940 | /scripts/diagnostics/gradlogpconvadv.py | 2987d2f89a2c7b9e2b7c7ba81ac2e6c924ada93e | [
"MIT"
] | permissive | SFPD/rlreloaded | 81e2ee489389145092cd425f305f9f50a7fd1ec9 | 650c64ec22ad45996c8c577d85b1a4f20aa1c692 | refs/heads/master | 2021-01-01T17:47:55.592481 | 2015-06-16T16:33:06 | 2015-06-16T16:37:19 | 37,540,592 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,398 | py | #!/usr/bin/env python
import matplotlib.pyplot as plt
import theano #pylint: disable=F0401
import copy
import numpy as np
from control4.misc.console_utils import Message
from control4.algs.save_load_utils import load_agent_and_mdp,get_mdp,construct_agent
from control4.algs.advantage_est import demean_timeserieses
from control4.misc.randomness import random_indices
from control4.config import floatX #pylint: disable=W0611
from control4.core.rollout import rollout
from control4.maths import symbolic
import theano.tensor as TT
# ipython --pylab -i gradlogpconvadv.py -- --num_trajs=1000 --max_steps=200 --horizon=50 --agent_module=control4.agents.nn_reactive_agent --mdp_name=mjc:3swimmer
def pairwise_correlate(x_sm, y_tn,mode='valid'):
"""
Use FFT to compute correlation between pairs of channels of x and y
"""
S,M = x_sm.shape
T,N = y_tn.shape
U = S+T-1 # if mode==valid we can use less padding
px_um = np.concatenate([x_sm,np.zeros((U-S,M))])
py_un = np.concatenate([y_tn,np.zeros((U-T,N))])
qpx_um = np.fft.fft(px_um,axis=0) #pylint: disable=E1103,E1101
qpy_un = np.fft.fft(py_un,axis=0) #pylint: disable=E1103,E1101
qconv_umn = qpx_um[:,:,None] * np.conj(qpy_un[:,None,:])
conv_umn = np.fft.ifft(qconv_umn,axis=0).real #pylint: disable=E1103,E1101
if mode == "valid":
assert T<S
return conv_umn[:S-T+1]
else:
raise NotImplementedError
def test_pairwise_correlate():
x = np.random.randn(10,3)
y = np.random.randn(8,2)
corr0 = pairwise_correlate(x,y,'valid')
corr1 = np.empty((x.shape[0] - y.shape[0] + 1, x.shape[1],y.shape[1]))
for (i,xcol) in enumerate(x.T):
for (j,ycol) in enumerate(y.T):
corr1[:,i,j] = np.correlate(xcol,ycol,mode='valid')
assert np.allclose(corr0,corr1,atol=1e-7)
def make_gradlogps(mdp,agent):
o = TT.matrix("o",mdp.output_dtype("o"))
b = TT.matrix("b",agent.output_dtype("b"))
newa = agent.ponder({"o":o})["a"]
logp_n = agent.cpd().logliks(newa, b)
def onegrad(i):
logp1 = theano.clone(logp_n, replace = {b:b[i:i+1],o:o[i:i+1]})[0]
return symbolic.flatten(TT.grad(logp1, agent.policy_vars()))
gradlogps,_ = theano.map(onegrad, TT.arange(logp_n.shape[0]))
f = theano.function([o,b],gradlogps)
return f
def main():
# test_pairwise_correlate()
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--hdf")
parser.add_argument("--agent_module")
parser.add_argument("--mdp_name")
parser.add_argument("--horizon",type=int,default=20)
parser.add_argument("--num_trajs",type=int,default=100)
parser.add_argument("--max_steps",type=int,default=500)
parser.add_argument("--load_idx",type=int,default=-1)
parser.add_argument("--plot_mode",choices=["off","save","interactive"],default="off")
parser.add_argument("--plot_save_prefix")
parser.add_argument("--gamma",type=float,default=0.99)
parser.add_argument("--outfile")
# ValueParams.add_to_parser(parser)
np.random.seed(0)
args = parser.parse_args()
assert bool(args.hdf) != (bool(args.agent_module) and bool(args.mdp_name))
if args.hdf:
agent, mdp, _hdf = load_agent_and_mdp(args.hdf,args.load_idx)
elif args.agent_module:
mdp = get_mdp(args.mdp_name)
agent = construct_agent({"agent_module":args.agent_module},mdp)
fs_gradlogps = make_gradlogps(mdp,agent)
n_params = agent.policy.size()
horizon=args.horizon
from collections import namedtuple
Path = namedtuple("Path",['o','c','b'])
save_arrs = ["o","b","c"]
with Message("Doing rollouts"):
paths = []
for i_path in xrange(args.num_trajs):
if i_path % 20 == 0:
print "%i/%i done"%(i_path,args.num_trajs)
init,traj = rollout(mdp, agent, args.max_steps,save_arrs=save_arrs)
o = np.concatenate([init["o"]]+traj["o"][:-1])
c = np.concatenate(traj["c"])
b = np.concatenate(traj["b"])
paths.append(Path(o,c,b))
# vf = LinearVF(use_m=False, use_o=True, legendre_degree = 2, use_product_features=False)
# vf.make_funcs()
# with Message("Fitting value function"):
# fit_linear_vf_single_path(vf, paths, args)
li_c_t = [path.c.sum(axis=1) for path in paths]
li_dmc_t = copy.deepcopy(li_c_t)
demean_timeserieses(li_dmc_t)
# li_delta_t = []
# for (path,c_t) in zip(paths,li_c_t):
# v_t = vf.fs_vfunc(path.prevm_tg,path.o_tf)
# li_delta_t.append( c_t + args.gamma*v_t[1:] - v_t[:-1] )
li_serieses = zip(li_dmc_t)
series_names=["demeaned costs"]
n_series = len(series_names)
li_corr = [np.zeros((horizon,n_params)) for _ in xrange(n_series)]
corr_tkz = np.zeros((horizon,n_series,n_params))
sqcorr_tkz = np.zeros((horizon,n_series,n_params))
count = 0
for (i_path,path,serieses) in zip(xrange(len(paths)),paths,li_serieses):
if i_path % 20 == 0:
print "%i/%i done"%(i_path,len(paths))
sig_tk = np.array(serieses).T
grad_tz = fs_gradlogps(path.o,path.b)
newcorr_tzk = pairwise_correlate( sig_tk, grad_tz[:-horizon+1], mode='valid')
corr_tkz += newcorr_tzk
sqcorr_tkz += newcorr_tzk**2
# for (li_series_t,corr_tz) in zip(li_li_series,li_corr):
# for z in xrange(n_params):
# corr_tkz[:,z] += scipy.signal.correlate(li_series_t[i_path], grad_tz[:-horizon+1,z],mode='valid')
# count += (grad_tz.shape[0]-horizon)
count += 1
corr_tkz /= count
sqcorr_tkz /= count
stderr_tkz = np.sqrt( (sqcorr_tkz - corr_tkz**2)/len(paths) )
# NOTE stderr is not totally legit
plot_stderr = True
zs = random_indices(n_params,30)
# plot_stderr = False
# zs = np.arange(n_params)
for (i_series,_corr_tz) in enumerate(li_corr):
plt.figure(i_series+1)
plt.clf()
plt.title(series_names[i_series])
for z in zs:
line, = plt.plot(corr_tkz[:,i_series,z])
if plot_stderr: plt.fill_between(np.arange(horizon), corr_tkz[:,i_series,z] - stderr_tkz[:,i_series,z],corr_tkz[:,i_series,z] + stderr_tkz[:,i_series,z],alpha=.1,color=line.get_color())
if args.outfile: plt.savefig(args.outfile)
if __name__ == "__main__":
main() | [
""
] | |
9568a5759861050bec15f02ba00c8e901ff92fc8 | 70f854c9a34be625b882bde0e5c5269522842ccf | /week 2/sumposled.py | 2bd590a34df68a174f615b039d21e36b26771d83 | [] | no_license | MariaMedvede/coursera | 6d3d897c05045e250d3b5e6e9b25a1d2de3a0df9 | 7ccc53845535bc9e341d3c42d9475e832b4cc7f4 | refs/heads/master | 2020-09-15T07:03:31.479505 | 2019-12-01T19:29:08 | 2019-12-01T19:29:08 | 223,374,208 | 0 | 2 | null | 2019-12-01T19:29:10 | 2019-11-22T09:55:15 | Python | UTF-8 | Python | false | false | 109 | py | now = int(input())
seqSum = 0
while now != 0:
seqSum = seqSum + now
now = int(input())
print(seqSum)
| [
"manya1999m09@yandex.ru"
] | manya1999m09@yandex.ru |
977efed259353b51d96b6ea3d218a036d809fef3 | d456bae1077867108bc7cc3fcc34f18b0ef75a30 | /linkedin/linkedin_top25byname_parser.py | b516009ad5cfacb9570a25261c707f0dd13eb0e8 | [] | no_license | vlivashkin/parsers | efb1cfc3f3bd909b2a8ffc92afbbb1c1154c9279 | a0334df863b4cf94cb567f3b5bbd00aab07f4444 | refs/heads/master | 2021-05-30T18:09:50.482475 | 2016-03-11T19:17:04 | 2016-03-11T19:17:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,975 | py | import requests
from bs4 import BeautifulSoup
from requests.exceptions import ChunkedEncodingError
class LinkedinNoAuthTop25ByNameParser:
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1'}
top25_url = 'https://www.linkedin.com/pub/dir/{}/{}/'
def get_people_by_name(self, first_name, last_name):
url = self.top25_url.format(first_name, last_name)
try:
response = requests.get(url, headers=self.headers)
except ChunkedEncodingError:
print("Incomplete read(")
return []
if response.history: # if there are only one person with this name linkedin redirects you to his page
person = {
'name': first_name + " " + last_name,
'url': response.url,
}
return [person]
else:
page = response.text
soup = BeautifulSoup(page, "html.parser")
people = []
for profile in soup.find_all('div', 'profile-card'):
content = profile.find("div", "content")
img_url = profile.find("a", "profile-img").img['src']
person = {
'name': content.h3.a.text,
'url': content.h3.a['href'],
# 'headline': content.find('p', "headline").text,
# "location": content.find("dl", "basic").findAll("dd")[0].text,
# 'industry': content.find("dl", "basic").findAll("dd")[1].text,
'img_url': img_url if "ghost" not in img_url else "ghost"
}
people.append(person)
return people
def main():
parser = LinkedinNoAuthTop25ByNameParser()
people = parser.get_people_by_name("Vladimir", "Ivashkin")
# people = parser.get_people_by_name("Taras", "Pustovoy")
for person in people:
print(person)
if __name__ == "__main__":
main()
| [
"illusionww@gmail.com"
] | illusionww@gmail.com |
c52d673bdcbfae703d470556fea4604762501224 | 91f2e23782b05aa1fb273f3170c50dc4185e8dc1 | /clif/pybind11/staging/virtual_funcs_basics_test.py | 6238c3144d5233fd2ad32b961ceef33c93be6b74 | [
"Apache-2.0"
] | permissive | anukaal/clif | 152fd58e575b90d626a300875aac71cdf69ec6a3 | 8ff675bf93599f4d4a4865376b441d8d0551fd54 | refs/heads/main | 2023-08-03T19:47:00.538660 | 2021-09-14T05:50:43 | 2021-09-30T01:00:14 | 406,238,691 | 0 | 0 | Apache-2.0 | 2021-09-14T05:39:04 | 2021-09-14T05:39:03 | null | UTF-8 | Python | false | false | 3,058 | py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for clif.pybind11.staging.virtual_funcs_basics.
This file is a copy of clif/testing/python/virtual_funcs_basics_test.py.
"""
import unittest
from clif.pybind11.staging import virtual_funcs_basics
class B(virtual_funcs_basics.B):
def __init__(self):
virtual_funcs_basics.B.__init__(self)
self.c = -1
def set_c(self, v):
self.c = v
class K(virtual_funcs_basics.K):
def inc(self, n):
self.i += n
class L(virtual_funcs_basics.Q):
def __init__(self, max_len):
virtual_funcs_basics.Q.__init__(self)
self._q = []
self._max = max_len
def data(self):
return list(self._q)
def PossiblyPush(self, data):
if len(self._q) < self._max:
self._q.append(data)
return True
return False
class AbstractClassNonDefConstImpl(
virtual_funcs_basics.AbstractClassNonDefConst):
def DoSomething(self):
return self.a * self.b
class ClassNonDefConstImpl(virtual_funcs_basics.ClassNonDefConst):
def __init__(self, a, b):
super().__init__(a, b)
self.c = [1, 2, 3] # Must have a non-trivial container to enable gc.
# Remove self.invalidated after gaining (limited) access to invalidated ptr.
self.invalidated = False
def DoSomething(self):
return -1 if self.invalidated else self.a * self.b
class VirtualFuncsTest(unittest.TestCase):
def testInitConcreteClassWithVirtualMethods(self):
b = virtual_funcs_basics.B()
b.set_c(2)
self.assertEqual(b.c, 2)
c = virtual_funcs_basics.ClassNonDefConst(1, 2)
self.assertEqual(c.DoSomething(), 3)
def testBasicCall(self):
b = B()
b.set_c(2)
self.assertEqual(b.c, 2)
virtual_funcs_basics.Bset(b, 4)
self.assertEqual(b.c, 4)
def testVirtual(self):
self.assertEqual(virtual_funcs_basics.seq(K(), 2, 6), [0, 2, 4, 6])
abc_non_def_impl = AbstractClassNonDefConstImpl(4, 5)
self.assertEqual(abc_non_def_impl.DoSomething(), 20)
self.assertEqual(virtual_funcs_basics.DoSomething1(abc_non_def_impl), 20)
non_def_impl = ClassNonDefConstImpl(4, 5)
self.assertEqual(non_def_impl.DoSomething(), 20)
self.assertEqual(virtual_funcs_basics.DoSomething2(non_def_impl), 20)
def testVirtual2(self):
q = L(3)
self.assertEqual(virtual_funcs_basics.add_seq(q, 2, 6), 3)
self.assertEqual(q.data(), [0, 2, 4])
def testVirtualProperty(self):
c = virtual_funcs_basics.D()
c.pos_c = -1
self.assertEqual(c.pos_c, 1)
if __name__ == '__main__':
unittest.main()
| [
"rwgk@google.com"
] | rwgk@google.com |
389cfec1280691576be87dab17cbace3b76cb636 | 06bf95f2d0310f2a740affdc9d36b3303ecb4645 | /WebMallProj/WebMall/apps.py | 39848b31b8cc3a26d2d91bbce9311d10a42491e8 | [] | no_license | vinee-sha/WebMall | 3dcf9d1e8c1c91c62f15c0bd534f009995063c3e | e4dd622782a26d1afc0ff12ccda0972401b9a4ba | refs/heads/master | 2022-12-25T16:47:04.462969 | 2020-10-04T13:36:33 | 2020-10-04T13:36:33 | 301,137,258 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 94 | py | from django.apps import AppConfig
class WebmallConfig(AppConfig):
name = 'WebMall'
| [
"vineeshanamana321@gmail.com"
] | vineeshanamana321@gmail.com |
1b32af6d4a9df22d779af09836fddb308d08536b | a331345b1269d863107ebaf525fb9e06443722c6 | /drf-intro/simple_crud/measurements/admin.py | 987929793feb8430a7621da7c283948f38f97a89 | [] | no_license | Pfliger/dj-homeworks | 19de12f8b2146751b24c89e59bdd307c571ff71d | c9f5d5070a467d4f7b35d416b8f91ad290008ab6 | refs/heads/master | 2023-03-09T01:09:49.251297 | 2021-03-05T16:24:22 | 2021-03-05T16:24:22 | 335,931,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 241 | py | from django.contrib import admin
from measurements.models import Project, Measurement
@admin.register(Project)
class ProjectAdmin(admin.ModelAdmin):
pass
@admin.register(Measurement)
class MeasurementAdmin(admin.ModelAdmin):
pass | [
"pfliger@list.ru"
] | pfliger@list.ru |
d1ca5c3c0478ab8a9e58042e82b7a9186a494789 | fd93fbb2162423b66636c8576548b4ad7f0564d3 | /Data_Structures/SkipList.py | d067f998de54422ab1108059a642ad543da37125 | [] | no_license | AlpacaMax/Algorithm_Miscellaneous | a595436386eb68353bf98f4ced09bf2ba06874c5 | e0a731ce0642cd602bcb8e8b7542d4b3806d9916 | refs/heads/master | 2020-11-27T21:26:38.454722 | 2020-10-22T19:20:58 | 2020-10-22T19:20:58 | 229,607,388 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,169 | py | import random
class SkipList:
class Item:
def __init__(self, key, value=None):
self.key = key
self.value = value
def __repr__(self):
return "({}, {})".format(self.key, self.value)
class Node:
def __init__(self, item=None, prev=None, next=None, above=None, below=None):
self.item = item
self.next = next
self.prev = prev
self.above = above
self.below = below
def disconnect(self):
self.__init__(self)
def __init__(self):
self.header = SkipList.Node()
self.trailer = SkipList.Node()
self.header.next = self.trailer
self.trailer.prev = self.header
self.g_header = SkipList.Node()
self.g_trailer = SkipList.Node()
self.g_header.next = self.g_trailer
self.g_trailer.prev = self.g_header
self.header.below = self.g_header
self.g_header.above = self.header
self.trailer.below = self.g_trailer
self.g_trailer.above = self.trailer
self.size = 0
self.height = 1
def __len__(self):
return self.size
def is_empty(self):
return self.size == 0
def find(self, key):
cursor = self.header
while (cursor.below is not None):
cursor = cursor.below
while (cursor.next.item is not None and key >= cursor.next.item.key):
cursor = cursor.next
return cursor
def insert(self, key, value=None):
node = self.find(key)
if (node.item is not None and node.item.key == key):
node.item.value = value
else:
cursor = self.add_after(node, key, value)
self.size += 1
index = 1
while (self.flip()):
index += 1
if (index > self.height):
self.add_level_below(self.header, self.trailer)
cursor = self.add_above(cursor, key)
def add_level_below(self, header, trailer):
above_header = header
above_trailer = trailer
below_header = header.below
below_trailer = trailer.below
new_header = SkipList.Node(above=above_header, below=below_header)
new_trailer = SkipList.Node(above=above_trailer, below=below_trailer)
new_header.next = new_trailer
new_trailer.prev = new_header
above_header.below = new_header
below_header.above = new_header
above_trailer.below = new_trailer
below_trailer.above = new_trailer
self.height += 1
def __getitem__(self, key):
node = self.find(key)
if (node.item is None or node.item.key != key):
raise KeyError(str(key) + " does not exist!")
return node.item.value
def __setitem__(self, key, value):
node = self.find(key)
if (node.item is not None and node.item.key == key):
node.item.value = value
else:
self.insert(key, value)
def __delitem__(self, key):
node = self.find(key)
if (node.item is None or node.item.key != key):
raise KeyError(str(key) + " does not exist!")
cursor = node
while (cursor is not None):
node_to_delete = cursor
cursor = cursor.above
self.delete_node(node_to_delete)
self.size -= 1
def __iter__(self):
cursor = self.g_header.next
while (cursor is not self.g_trailer):
yield cursor.item.key
cursor = cursor.next
def add_after(self, node, key, value=None):
prev_node = node
next_node = node.next
new_item = SkipList.Item(key, value)
new_node = SkipList.Node(item=new_item, next=next_node, prev=prev_node)
prev_node.next = new_node
next_node.prev = new_node
return new_node
def add_above(self, node, key, value=None):
cursor = node.prev
while (cursor.above is None):
cursor = cursor.prev
cursor = cursor.above
below_node = node
above_node = self.add_after(cursor, key, value)
below_node.above = above_node
above_node.below = below_node
return above_node
def delete_node(self, node):
prev_node = node.prev
next_node = node.next
prev_node.next = next_node
next_node.prev = prev_node
item = node.item
node.disconnect()
return item
def flip(self):
return random.random() > 0.5
def display(self):
header = self.header
while (header.below is not None):
header = header.below
cursor = header
while (header is not None):
while (cursor is not None):
print(cursor.item, end='-')
cursor = cursor.above
print()
header = header.next
cursor = header
if __name__ == "__main__":
sl = SkipList()
for i in range(10):
sl[i] = i
for i in sl:
print(i) | [
"gabrielyang233@outlook.com"
] | gabrielyang233@outlook.com |
3ac5c2036716fd233c20c1b5d0ed1d8bf60ea49a | 49ae5bd9089a2b096fabc970156803b21b1be9d7 | /env/Lib/site-packages/django_extensions/management/commands/sync_s3.py | 7efb71ae0fe0937e29582c1f031e37010ee7bd81 | [] | no_license | scortes1989/sfotipy | ea7cfd4abe52dfb9b5094397a9f7a80e6d78713d | d3ed677f8bee0452f1ac14dfc718ca5091cf95eb | refs/heads/master | 2022-11-06T21:06:47.534330 | 2015-05-15T20:26:48 | 2015-05-15T20:26:48 | 33,416,364 | 0 | 1 | null | 2022-10-24T08:55:08 | 2015-04-04T18:55:33 | Python | UTF-8 | Python | false | false | 15,704 | py | """
Sync Media to S3
================
Django command that scans all files in your settings.MEDIA_ROOT and
settings.STATIC_ROOT folders and uploads them to S3 with the same directory
structure.
This command can optionally do the following but it is off by default:
* gzip compress any CSS and Javascript files it finds and adds the appropriate
'Content-Encoding' header.
* set a far future 'Expires' header for optimal caching.
* upload only media or static files.
* use any other provider compatible with Amazon S3.
* set other than 'public-read' ACL.
Note: This script requires the Python boto library and valid Amazon Web
Services API keys.
Required settings.py variables:
AWS_ACCESS_KEY_ID = ''
AWS_SECRET_ACCESS_KEY = ''
AWS_BUCKET_NAME = ''
When you call this command with the `--renamegzip` param, it will add
the '.gz' extension to the file name. But Safari just doesn't recognize
'.gz' files and your site won't work on it! To fix this problem, you can
set any other extension (like .jgz) in the `SYNC_S3_RENAME_GZIP_EXT`
variable.
Command options are:
-p PREFIX, --prefix=PREFIX
The prefix to prepend to the path on S3.
--gzip Enables gzipping CSS and Javascript files.
--expires Enables setting a far future expires header.
--force Skip the file mtime check to force upload of all
files.
--filter-list Override default directory and file exclusion
filters. (enter as comma separated line)
--renamegzip Enables renaming of gzipped files by appending '.gz'.
to the original file name. This way your original
assets will not be replaced by the gzipped ones.
You can change the extension setting the
`SYNC_S3_RENAME_GZIP_EXT` var in your settings.py
file.
--invalidate Invalidates the objects in CloudFront after uploading
stuff to s3.
--media-only Only MEDIA_ROOT files will be uploaded to S3.
--static-only Only STATIC_ROOT files will be uploaded to S3.
--s3host Override default s3 host.
--acl Override default ACL settings ('public-read' if
settings.AWS_DEFAULT_ACL is not defined).
TODO:
* Use fnmatch (or regex) to allow more complex FILTER_LIST rules.
"""
import datetime
import email
import gzip
import mimetypes
import os
import time
from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django_extensions.compat import StringIO
from django_extensions.management.utils import signalcommand
# Make sure boto is available
try:
import boto
import boto.exception
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
class Command(BaseCommand):
# Extra variables to avoid passing these around
AWS_ACCESS_KEY_ID = ''
AWS_SECRET_ACCESS_KEY = ''
AWS_BUCKET_NAME = ''
AWS_CLOUDFRONT_DISTRIBUTION = ''
SYNC_S3_RENAME_GZIP_EXT = ''
DIRECTORIES = ''
FILTER_LIST = ['.DS_Store', '.svn', '.hg', '.git', 'Thumbs.db']
GZIP_CONTENT_TYPES = (
'text/css',
'application/javascript',
'application/x-javascript',
'text/javascript'
)
uploaded_files = []
upload_count = 0
skip_count = 0
option_list = BaseCommand.option_list + (
make_option('-p', '--prefix',
dest='prefix',
default=getattr(settings, 'SYNC_S3_PREFIX', ''),
help="The prefix to prepend to the path on S3."),
make_option('-d', '--dir',
dest='dir',
help="Custom static root directory to use"),
make_option('--s3host',
dest='s3host',
default=getattr(settings, 'AWS_S3_HOST', ''),
help="The s3 host (enables connecting to other providers/regions)"),
make_option('--acl',
dest='acl',
default=getattr(settings, 'AWS_DEFAULT_ACL', 'public-read'),
help="Enables to override default acl (public-read)."),
make_option('--gzip',
action='store_true', dest='gzip', default=False,
help="Enables gzipping CSS and Javascript files."),
make_option('--renamegzip',
action='store_true', dest='renamegzip', default=False,
help="Enables renaming of gzipped assets to have '.gz' appended to the filename."),
make_option('--expires',
action='store_true', dest='expires', default=False,
help="Enables setting a far future expires header."),
make_option('--force',
action='store_true', dest='force', default=False,
help="Skip the file mtime check to force upload of all files."),
make_option('--filter-list', dest='filter_list',
action='store', default='',
help="Override default directory and file exclusion filters. (enter as comma seperated line)"),
make_option('--invalidate', dest='invalidate', default=False,
action='store_true',
help='Invalidates the associated objects in CloudFront'),
make_option('--media-only', dest='media_only', default='',
action='store_true',
help="Only MEDIA_ROOT files will be uploaded to S3"),
make_option('--static-only', dest='static_only', default='',
action='store_true',
help="Only STATIC_ROOT files will be uploaded to S3"),
)
help = 'Syncs the complete MEDIA_ROOT structure and files to S3 into the given bucket name.'
args = 'bucket_name'
can_import_settings = True
@signalcommand
def handle(self, *args, **options):
if not HAS_BOTO:
raise ImportError("The boto Python library is not installed.")
# Check for AWS keys in settings
if not hasattr(settings, 'AWS_ACCESS_KEY_ID') or not hasattr(settings, 'AWS_SECRET_ACCESS_KEY'):
raise CommandError('Missing AWS keys from settings file. Please supply both AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY.')
else:
self.AWS_ACCESS_KEY_ID = settings.AWS_ACCESS_KEY_ID
self.AWS_SECRET_ACCESS_KEY = settings.AWS_SECRET_ACCESS_KEY
if not hasattr(settings, 'AWS_BUCKET_NAME'):
raise CommandError('Missing bucket name from settings file. Please add the AWS_BUCKET_NAME to your settings file.')
else:
if not settings.AWS_BUCKET_NAME:
raise CommandError('AWS_BUCKET_NAME cannot be empty.')
self.AWS_BUCKET_NAME = settings.AWS_BUCKET_NAME
if not hasattr(settings, 'MEDIA_ROOT'):
raise CommandError('MEDIA_ROOT must be set in your settings.')
else:
if not settings.MEDIA_ROOT:
raise CommandError('MEDIA_ROOT must be set in your settings.')
self.AWS_CLOUDFRONT_DISTRIBUTION = getattr(settings, 'AWS_CLOUDFRONT_DISTRIBUTION', '')
self.SYNC_S3_RENAME_GZIP_EXT = \
getattr(settings, 'SYNC_S3_RENAME_GZIP_EXT', '.gz')
self.verbosity = int(options.get('verbosity'))
self.prefix = options.get('prefix')
self.do_gzip = options.get('gzip')
self.rename_gzip = options.get('renamegzip')
self.do_expires = options.get('expires')
self.do_force = options.get('force')
self.invalidate = options.get('invalidate')
self.DIRECTORIES = options.get('dir')
self.s3host = options.get('s3host')
self.default_acl = options.get('acl')
self.FILTER_LIST = getattr(settings, 'FILTER_LIST', self.FILTER_LIST)
filter_list = options.get('filter_list')
if filter_list:
# command line option overrides default filter_list and
# settings.filter_list
self.FILTER_LIST = filter_list.split(',')
self.media_only = options.get('media_only')
self.static_only = options.get('static_only')
# Get directories
if self.media_only and self.static_only:
raise CommandError("Can't use --media-only and --static-only together. Better not use anything...")
elif self.media_only:
self.DIRECTORIES = [settings.MEDIA_ROOT]
elif self.static_only:
self.DIRECTORIES = [settings.STATIC_ROOT]
elif self.DIRECTORIES:
self.DIRECTORIES = [self.DIRECTORIES]
else:
self.DIRECTORIES = [settings.MEDIA_ROOT, settings.STATIC_ROOT]
# Now call the syncing method to walk the MEDIA_ROOT directory and
# upload all files found.
self.sync_s3()
# Sending the invalidation request to CloudFront if the user
# requested this action
if self.invalidate:
self.invalidate_objects_cf()
print("")
print("%d files uploaded." % self.upload_count)
print("%d files skipped." % self.skip_count)
def open_cf(self):
"""
Returns an open connection to CloudFront
"""
return boto.connect_cloudfront(
self.AWS_ACCESS_KEY_ID, self.AWS_SECRET_ACCESS_KEY)
def invalidate_objects_cf(self):
"""
Split the invalidation request in groups of 1000 objects
"""
if not self.AWS_CLOUDFRONT_DISTRIBUTION:
raise CommandError(
'An object invalidation was requested but the variable '
'AWS_CLOUDFRONT_DISTRIBUTION is not present in your settings.')
# We can't send more than 1000 objects in the same invalidation
# request.
chunk = 1000
# Connecting to CloudFront
conn = self.open_cf()
# Splitting the object list
objs = self.uploaded_files
chunks = [objs[i:i + chunk] for i in range(0, len(objs), chunk)]
# Invalidation requests
for paths in chunks:
conn.create_invalidation_request(
self.AWS_CLOUDFRONT_DISTRIBUTION, paths)
def sync_s3(self):
"""
Walks the media/static directories and syncs files to S3
"""
bucket, key = self.open_s3()
for directory in self.DIRECTORIES:
os.path.walk(directory, self.upload_s3, (bucket, key, self.AWS_BUCKET_NAME, directory))
def compress_string(self, s):
"""Gzip a given string."""
zbuf = StringIO()
zfile = gzip.GzipFile(mode='wb', compresslevel=6, fileobj=zbuf)
zfile.write(s)
zfile.close()
return zbuf.getvalue()
def get_s3connection_kwargs(self):
"""Returns connection kwargs as a dict"""
kwargs = {}
if self.s3host:
kwargs['host'] = self.s3host
return kwargs
def open_s3(self):
"""
Opens connection to S3 returning bucket and key
"""
conn = boto.connect_s3(
self.AWS_ACCESS_KEY_ID,
self.AWS_SECRET_ACCESS_KEY,
**self.get_s3connection_kwargs())
try:
bucket = conn.get_bucket(self.AWS_BUCKET_NAME)
except boto.exception.S3ResponseError:
bucket = conn.create_bucket(self.AWS_BUCKET_NAME)
return bucket, boto.s3.key.Key(bucket)
def upload_s3(self, arg, dirname, names):
"""
This is the callback to os.path.walk and where much of the work happens
"""
bucket, key, bucket_name, root_dir = arg
# Skip directories we don't want to sync
if os.path.basename(dirname) in self.FILTER_LIST:
# prevent walk from processing subfiles/subdirs below the ignored one
del names[:]
return
# Later we assume the MEDIA_ROOT ends with a trailing slash
if not root_dir.endswith(os.path.sep):
root_dir = root_dir + os.path.sep
for file in names:
headers = {}
if file in self.FILTER_LIST:
continue # Skip files we don't want to sync
filename = os.path.join(dirname, file)
if os.path.isdir(filename):
continue # Don't try to upload directories
file_key = filename[len(root_dir):]
if self.prefix:
file_key = '%s/%s' % (self.prefix, file_key)
# Check if file on S3 is older than local file, if so, upload
if not self.do_force:
s3_key = bucket.get_key(file_key)
if s3_key:
s3_datetime = datetime.datetime(*time.strptime(
s3_key.last_modified, '%a, %d %b %Y %H:%M:%S %Z')[0:6])
local_datetime = datetime.datetime.utcfromtimestamp(
os.stat(filename).st_mtime)
if local_datetime < s3_datetime:
self.skip_count += 1
if self.verbosity > 1:
print("File %s hasn't been modified since last being uploaded" % file_key)
continue
# File is newer, let's process and upload
if self.verbosity > 0:
print("Uploading %s..." % file_key)
content_type = mimetypes.guess_type(filename)[0]
if content_type:
headers['Content-Type'] = content_type
else:
headers['Content-Type'] = 'application/octet-stream'
file_obj = open(filename, 'rb')
file_size = os.fstat(file_obj.fileno()).st_size
filedata = file_obj.read()
if self.do_gzip:
# Gzipping only if file is large enough (>1K is recommended)
# and only if file is a common text type (not a binary file)
if file_size > 1024 and content_type in self.GZIP_CONTENT_TYPES:
filedata = self.compress_string(filedata)
if self.rename_gzip:
# If rename_gzip is True, then rename the file
# by appending an extension (like '.gz)' to
# original filename.
file_key = '%s.%s' % (
file_key, self.SYNC_S3_RENAME_GZIP_EXT)
headers['Content-Encoding'] = 'gzip'
if self.verbosity > 1:
print("\tgzipped: %dk to %dk" % (file_size / 1024, len(filedata) / 1024))
if self.do_expires:
# HTTP/1.0
headers['Expires'] = '%s GMT' % (email.Utils.formatdate(time.mktime((datetime.datetime.now() + datetime.timedelta(days=365 * 2)).timetuple())))
# HTTP/1.1
headers['Cache-Control'] = 'max-age %d' % (3600 * 24 * 365 * 2)
if self.verbosity > 1:
print("\texpires: %s" % headers['Expires'])
print("\tcache-control: %s" % headers['Cache-Control'])
try:
key.name = file_key
key.set_contents_from_string(filedata, headers, replace=True,
policy=self.default_acl)
except boto.exception.S3CreateError as e:
print("Failed: %s" % e)
except Exception as e:
print(e)
raise
else:
self.upload_count += 1
self.uploaded_files.append(file_key)
file_obj.close()
| [
"SCD@SCD-PC.inpact.net"
] | SCD@SCD-PC.inpact.net |
942f5be0fdd8ad8c418f4b959e1310af74cb20ff | 3c76dd3d7eda65236ff47da97c04d7b1b6eb7c15 | /recursion/questions/pow.py | fa7c82cd42821e8d05266f7451e8c4c63bed16e0 | [
"MIT"
] | permissive | devclassio/200-interview-algorithm-questions | b672629c93ca99fcf626cb34254f0ef1b5e2731d | ab6a41f3399d8ae58acf0aebb285ca6de744433c | refs/heads/main | 2023-02-21T01:39:03.060131 | 2021-01-25T16:11:24 | 2021-01-25T16:11:24 | 330,233,973 | 9 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,191 | py | '''
Can't cache here! arr is too big! need to use this math trick :)
'''
class OmerSolution:
'''
Great example! to make this work with memoize i think arr must be of len n/2
'''
def myPow(self, x, n):
self.cache = [None] * (abs(n) + 1)
self.cache[0] = 1
def helper(x, n):
if self.cache[n]:
return self.cache[n]
if self.cache[n-1]:
return x * self.cache[n-1]
self.cache[n-1] = helper(x, n - 1)
return x * self.cache[n - 1]
return helper(x, n) if n > 0 else 1 / helper(x, -n)
class Solution:
def myPow(self, x, n):
def helper(x, n):
if n == 0:
return 1
# compute
if (n - 1) % 2 == 0:
oneBefore = helper(x * x, int(n / 2))
else:
oneBefore = x * helper(x * x, int((n - 1) / 2))
# solution
return x * oneBefore
if n < 0:
x = 1 / x
n = -n
squared = x * x
exp = n / 2 if n % 2 == 0 else (n - 1) / 2
return helper(squared, exp) if n % 2 == 0 else x * helper(squared, exp)
| [
"arieg419@gmail.com"
] | arieg419@gmail.com |
bef2865099cfa242c3bdb6201a9e938682c2a4d7 | 7d39b91d61fcae881a4ac412974edb6028941d80 | /test/espnet2/bin/test_enh_inference.py | 6ec708df0a27c4d50ddfc008b7a71c595e0b3108 | [
"Apache-2.0"
] | permissive | pzelasko/espnet | 4106826c5887bf7e203102b36a0129297ecfb5fe | 629ac3b89b23e08cafbaab3dac1467daaf7d39fb | refs/heads/master | 2021-08-30T17:10:15.178217 | 2021-08-25T20:50:53 | 2021-08-25T20:50:53 | 173,996,615 | 1 | 2 | Apache-2.0 | 2020-12-05T05:30:26 | 2019-03-05T18:09:12 | Shell | UTF-8 | Python | false | false | 1,198 | py | from argparse import ArgumentParser
from pathlib import Path
import pytest
import torch
from espnet2.bin.enh_inference import get_parser
from espnet2.bin.enh_inference import main
from espnet2.bin.enh_inference import SeparateSpeech
from espnet2.tasks.enh import EnhancementTask
def test_get_parser():
assert isinstance(get_parser(), ArgumentParser)
def test_main():
with pytest.raises(SystemExit):
main()
@pytest.fixture()
def config_file(tmp_path: Path):
# Write default configuration file
EnhancementTask.main(
cmd=[
"--dry_run",
"true",
"--output_dir",
str(tmp_path),
]
)
return tmp_path / "config.yaml"
@pytest.mark.execution_timeout(5)
@pytest.mark.parametrize("batch_size", [1, 2])
@pytest.mark.parametrize(
"input_size, segment_size, hop_size", [(16000, None, None), (35000, 2.4, 0.8)]
)
def test_SeparateSpeech(config_file, batch_size, input_size, segment_size, hop_size):
separate_speech = SeparateSpeech(
enh_train_config=config_file, segment_size=segment_size, hop_size=hop_size
)
wav = torch.rand(batch_size, input_size)
separate_speech(wav, fs=8000)
| [
"C0me_On@163.com"
] | C0me_On@163.com |
90dac013e7b8d3564d99078b2df9f789bb89833d | d6c86a4302debcf730516ac5bba8ad32d44faf82 | /Final Project/sportclubs/teammanager/urls.py | 5dbdda6101ef6e59f1e409323ee318db97b25b11 | [] | no_license | patrickmcgreevy/SportClubManager | 7cd51c1af20d6092a210640d038b3d8075962166 | 8f067aef7b2319c329bbf6db29836fc352635263 | refs/heads/master | 2020-04-16T17:49:13.846890 | 2019-04-27T00:14:43 | 2019-04-27T00:14:43 | 165,790,239 | 1 | 0 | null | 2019-04-27T00:14:43 | 2019-01-15T05:22:22 | Python | UTF-8 | Python | false | false | 480 | py | from django.urls import path, reverse
from . import views
urlpatterns = [
path('', views.AllClubs.as_view(), name='clubshome'),
path('myclubs/', views.UserClubs.as_view(), name='userclubs'),
path('<int:pk>/', views.ClubDetails.as_view(), name='clubdetails'),
path('<int:pk>/officer_details/', views.ClubOfficerDetails.as_view(), name='clubofficerdetails'),
path('<int:pk>/change_members/', views.ClubMemberChange.as_view(), name='changemembers'),
] | [
"noreply@github.com"
] | patrickmcgreevy.noreply@github.com |
8946b98e89bdddce94a2715d79b0fbbcb3e9740e | e7e6e19e293c67e412355811fd9e447b3f26443d | /libs/functions/__init__.py | a5f7f6b9dfa0d84d541d00f6f4ae843dd43084ac | [
"MIT"
] | permissive | nga-27/SecuritiesAnalysisTools | 6c2f57929346e01433e7d6e1176747de2dbce50a | 7c4ce3d9d6ffb62aaf86c7d46bd7f15f4c68cbb0 | refs/heads/master | 2023-08-09T03:54:33.587670 | 2023-07-19T23:11:27 | 2023-07-19T23:11:27 | 180,685,810 | 5 | 2 | MIT | 2023-07-19T23:11:28 | 2019-04-11T00:44:25 | Python | UTF-8 | Python | false | false | 123 | py | """ functions to be used a single operations (or eventual API functions) """
from .functions import only_functions_handler
| [
"namell91@gmail.com"
] | namell91@gmail.com |
fc99fb37d4a38af2dd88b91d6d660527ae7b23fb | e29922802cd85e6745ec3215d71ffba9ba4a1db8 | /a_prep_data/a4_mate_plots_eunis.py | c53e37719fb89975e557ea673be59ea9080f695d | [] | no_license | haro-nl/DOREN_WEnR | 26855f49261a0b3ea93ab743b377e4f5dfb10583 | dadf8b0bf56912d22eb5fe4a499d3ef3ad79e127 | refs/heads/master | 2020-04-16T18:41:39.259162 | 2019-01-15T11:20:09 | 2019-01-15T11:20:09 | 165,831,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,830 | py | # -*- coding: utf-8 -*
#!/usr/bin/python3.5.3
'''Script to
1. read EUNIS type indicator species
2. generate look up dictionaries to get Dg, CoSp & DmSp species for each EUNIS types
3. read species list for all EVA vegetation plots and add sp list to each plot in a new column
4. define completeness score for a veg plot based on similarity between actual sp list and DgSp, CoSp & DmSp lists
5. calculate scores for all plots
Hans Roelofsen, 22 November 2018, Wageningen Environmental Research
'''
import os
import datetime
import pickle
from helper import do
if __name__ == "__main__":
print("starting at {0}".format(datetime.datetime.now().strftime("%Y%m%d_%H%M")))
eva_head = do.get_eva_emep_data() # EVA header data with EMEP already joined
eva_plot_comp = do.get_eva_plot_species(eva_head['PlotObservationID'].tolist()) # Species composition of all EVA plots
eunis_type_composition_lists = do.generate_types_species_lists() # Dictionary of CoSp, DgSp & DmSp species for all EUNIS types
# Add species list as column to the EVA plot table
eva_head['sp_list'] = eva_head.apply(lambda row: do.get_plot_species(eva_plot_comp, row['PlotObservationID']), axis=1)
print("Done 01 at {0}".format(datetime.datetime.now().strftime("%Y%m%d_%H%M")))
# Calculate DmSp score based on actual species list and DmSp species list for the applicable EUNIS type
eva_head['dmsp'] = eva_head.apply(lambda row: do.completeness(plot_species_list=row['sp_list'],
reference_species_list=eunis_type_composition_lists[row['EUNIScode']]['DmSp']), axis=1)
print("Done 02 at {0}".format(datetime.datetime.now().strftime("%Y%m%d_%H%M")))
# idem for DgSp
eva_head['dgsp'] = eva_head.apply(lambda row: do.completeness(plot_species_list=row['sp_list'],
reference_species_list=eunis_type_composition_lists[row['EUNIScode']]['DgSp']), axis=1)
print("Done 03 at {0}".format(datetime.datetime.now().strftime("%Y%m%d_%H%M")))
# idem for CoSp
eva_head['cosp'] = eva_head.apply(lambda row: do.completeness(plot_species_list=row['sp_list'],
reference_species_list=eunis_type_composition_lists[row['EUNIScode']]['CoSp']), axis=1)
print("Done 04 at {0}".format(datetime.datetime.now().strftime("%Y%m%d_%H%M")))
# write to pickle file for safe keeping
pickle_name = "eva_emep_score" + datetime.datetime.now().strftime("%Y%m%d_%H%M") + '.pkl'
with open(os.path.join(r'd:\temppickle', pickle_name), 'wb') as handle:
pickle.dump(eva_head, handle, protocol=pickle.HIGHEST_PROTOCOL)
print("Done all at {0}".format(datetime.datetime.now().strftime("%Y%m%d_%H%M")))
| [
"hans.roelofsen@wur.nl"
] | hans.roelofsen@wur.nl |
4573c307c91eba94f7133e73ffb4e29b05316bfc | 0bd6e56b5046391b5be4b466b8ce5b44626818f5 | /mac_changer.py | 8c55a63e8d4bd61b36e5da67151b61392b86a493 | [] | no_license | Ronlin1/python-hacking-scripts | af14f60610019474c07d76fd15f90d7c812a0165 | 291ccc7283e6c47f1f3a3385d729b7c08c1c24ed | refs/heads/master | 2023-03-27T17:45:09.065699 | 2021-04-03T05:27:13 | 2021-04-03T05:27:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,702 | py | #!/user/bin/env python
import subprocess
import optparse
import re
def get_arguments():
parser = optparse.OptionParser()
parser.add_option("-i", "--interface", dest="interface", help="Interface to change its Mac address")
parser.add_option("-m", "--mac", dest="new_mac", help="New MAC address")
(options, arguments) = parser.parse_args()
if not options.interface:
parser.error("[-] Please specify an interface, use --help for more info.")
elif not options.new_mac:
parser.error("[-] Please specify a new mac, use --help for more info.")
return options
def get_current_mac(interface):
ifconfig_result = subprocess.check_output(["ifconfig", interface])
mac_address_search_result = re.search(r"\w\w:\w\w:\w\w:\w\w:\w\w:\w\w", str(ifconfig_result))
if mac_address_search_result:
return mac_address_search_result.group(0)
else:
print("[-] Could not read MAC address")
def change_mac(interface, new_mac):
print("[+] Changing Mac address for " + interface + " to " + new_mac)
subprocess.call(["ifconfig", interface, "down"])
subprocess.call(["ifconfig", interface, "hw", "ether", new_mac])
subprocess.call(["ifconfig", interface, "up"])
options = get_arguments()
current_mac = get_current_mac(options.interface)
print("Current Mac = " + str(current_mac))
change_mac(options.interface, options.new_mac)
current_mac = get_current_mac(options.interface)
if current_mac == options.new_mac:
print("[+] MAC address was successfully changed to " + current_mac)
else:
print("[-] MAC address did not get changed")
"testing"
| [
"tonyjcha3713@gmail.com"
] | tonyjcha3713@gmail.com |
d5672859a1c11baa0302a06e15050c61a8db266f | 70ac291bcf11d8452c6b1ade5fbadd0003d9e613 | /machine_learning_机器学习/准确率(Accuracy)、精确率(Precision)、召回率(Recall)、F值(F-Measure)等评估指标的计算.py | 8c51fb9bad3621e3c8e70198ca29c52253849f25 | [] | no_license | SnowWhiteZ/hello-world | 10cc1faf508340f835fffbf3c587101e3e0e78a5 | e43793b413016eb2f52b40990a8f1b493d29c983 | refs/heads/master | 2022-03-12T22:10:42.163091 | 2019-12-02T09:58:15 | 2019-12-02T09:58:15 | 225,376,506 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,456 | py | #!/usr/bin/python3
# coding: utf-8
import numpy as np
from sklearn.metrics import f1_score, accuracy_score, fbeta_score, precision_score, recall_score
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
# 真实标签
y_true = [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
# 模型预测结果
y_test = [0.8453712241207609, 0.8365137845084419, 0.8396024690959464, 0.8690716625950063, 0.801398983655787, 0.8353417405844167, 0.8887589815396711, 0.8274617726584338, 0.8901324702288052, 0.8515827665762914, 0.8008748432690203, 0.9129143613344268, 0.8213637332093631, 0.7926672650384551, 0.8715962551942291, 0.865989576549353, 0.8487118383625984, 0.893722366823937, 0.8683798090835637, 0.8258107838161615, 0.9067962552630583, 0.8896577622207299, 0.8287242449131549, 0.862162050742874, 0.9145984088092137, 0.8195240228832353, 0.8627208683955114, 0.8667420865435141, 0.833175478131922, 0.8338735760735464, 0.8609573544733866, 0.8270040835455006, 0.8438342928159803, 0.9162216060491829, 0.8681943043237748, 0.825237777063406, 0.9309199493779501, 0.847918698600505, 0.885842165942269, 0.845606331185933, 0.8867428557974891, 0.8569372316111383, 0.8374900840504085, 0.8495098728280119, 0.8475137546498668, 0.8509974354378016, 0.8545542968912262, 0.8369359268265817, 0.8881628216627452, 0.8553054247582024, 0.8715475068300871, 0.8608489638331329, 0.7871896522021451, 0.7986180814516614, 0.8679817198115483, 0.8555312604259576, 0.8737131993516944, 0.8570307159808236, 0.86943760267903, 0.8155454038368009, 0.8284627670247386, 0.7440460226630737, 0.8383901711678877, 0.9176876584197461, 0.8867356968591616, 0.8800298236584221, 0.8534696245512979, 0.9166524864925935, 0.8205450625187547, 0.8235830983361883, 0.8610359125511253, 0.8534495672661243, 0.8343550724006359, 0.826657313239454, 0.8327557274202153, 0.8263809690050867, 0.8449533999089178, 0.7403854533869694, 0.8862881836134406, 0.80930312554624, 0.8390349727384677, 0.7812820207595776, 0.8405256568966404, 0.7208619973606759, 0.8237972236612818, 0.8652031422452744, 0.7788070757633151, 0.8795942431527423, 0.8603826742129177, 0.83330392945359, 0.8487413534443429, 0.8085704307615089, 0.8862416492592033, 0.8154708608934949, 0.8949611666064037, 0.8189329260750865, 0.8328395987596068, 0.9158502403398057, 0.8066900361300818, 0.9277331317048729]
thre = 0.874 # 随机定义一个阈值
tp = 0 # 正真
tn = 0 # 真负
fp = 0 # 假正
fn = 0 # 假负
for t4, t5 in zip(y_true, y_test):
if t4 == 1 and t5 >= thre:
tp += 1
elif t4 == 1:
fn += 1
elif t4 == 0 and t5 < thre:
tn += 1
else:
fp += 1
data = {
"真正": tp,
"真负": tn,
"假正": fp,
"假负": fn
}
print("混淆矩阵数据:", data)
p = tp / (tp + fp ) # 精确率,预测为正的样本中有多少是真正的正样本
r = tp / (tp + fn ) # 召回率,样本中的正例有多少被预测正确了
acc = (tp + tn) / (tp + tn + fp + fn) # 准确率,被分对的样本数除以所有的样本数
f1 = 2 * p * r / (p + r )
beta = 2
# (1 + β × β) × P × R
# Fβ = ──────────────────────
# (β × β) × P + R
f2 = (1+beta*beta) * p * r / (beta*beta*p+r)
data2 = {
"准确率": acc,
"精确率": p,
"召回率": r,
"f1值": f1,
"f2值": f2,
}
print('通过精确率,召回率计算的结果:', data2)
# auc
auc = roc_auc_score(y_true, y_test)
# 精确率
p = precision_score(y_true, np.array(y_test)>thre)
# 召回率
r = recall_score(y_true, np.array(y_test) > thre)
# acc
acc = accuracy_score(y_true, np.array(y_test) > thre)
f1 = f1_score(y_true, np.array(y_test) > thre)
f2 = fbeta_score(y_true, np.array(y_test) > thre, beta=2)
data3 = {
"准确率": acc,
"ROC曲线下面积": auc,
"f1值": f1,
"f2值": f2,
"精确率": p,
"召回率": r,
}
print('通过sklearn计算的结果:', data3)
y_true = [0, 1, 2, 2, 2]
y_test = [0, 0, 2, 2, 1]
target_names = ['class 0', 'class 1', 'class 2']
print(classification_report(y_true, y_test, target_names=target_names))
def main():
pass
if __name__ == '__main__':
main() | [
"gswyhq@126.com"
] | gswyhq@126.com |
9dc8e842d1c50ed74d1c5b4728ef47282db16f7c | cf43421567c1634abe1df885c6e185a180659708 | /Extract/common.py | cd7e1ac1c4a6b11a70a4290fe71d6d2217580e77 | [] | no_license | fabio-gz/ETL_newspaper | 4c5239892098840a730ecf3b58452054a50e914b | 7458701eab76821a1fd65f0821356b1e7924bc97 | refs/heads/master | 2023-01-11T05:01:39.773346 | 2020-11-16T22:10:57 | 2020-11-16T22:10:57 | 292,719,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | # cargar yaml
import yaml
#global var
__config = None
def config():
global __config
if not __config:
with open('config.yml', mode='r') as f:
__config = yaml.safe_load(f)
return __config
| [
"fabiogomez.silva@gmail.com"
] | fabiogomez.silva@gmail.com |
159ae11c00f6321cb99a0ef0d0efc843e5b9f5ce | 8d034478e79e5653bc3d43656925d480c2f4d5ea | /image_detection.py | 0e0c0978699965c5bada9201c6c1a64edf4a5297 | [] | no_license | stavik476/last_project_stav | 515e907d871f59dfda12797411a7eee32d25550d | 1f61204a9bc7d6cb03807b89db6085ea085320c4 | refs/heads/main | 2023-04-26T02:30:28.914181 | 2021-05-25T14:23:05 | 2021-05-25T14:23:05 | 370,720,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,777 | py | # Import packages
import os
import cv2
import numpy as np
import tensorflow.compat.v1 as tf
import sys
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
# Import utilites
from the_utils import label_map_util
from the_utils import visualization_utils as vis_util
#Change the test image name
IMAGE_NAME = 'red_lights.jpg'
# Number of classes the object detector can identify
NUM_CLASSES = 7
# Name of the directory containing the object detection module we're using
MODEL_NAME = 'inference_graph'
# Grab path to current working directory
CWD_PATH = os.getcwd()
# Path to frozen detection graph .pb file, which contains the model that is used
# for object detection.
PATH_TO_CKPT = "models/frozen_inference_graph.pb"
# Path to label map file
PATH_TO_LABELS = "models\label_map.pbtxt"
# Path to image
PATH_TO_IMAGE = os.path.join("D:\models\Research\object_detection",'Testing\images',IMAGE_NAME)
# Load the label map.
# Label maps map indices to category names, so that when our convolution
# network predicts `5`, we know that this corresponds to `king`.
# Here we use internal utility functions, but anything that returns a
# dictionary mapping integers to appropriate string labels would be fine
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Load the Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(graph=detection_graph)
# Define input and output tensors (i.e. data) for the object detection classifier
# Input tensor is the image
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Output tensors are the detection boxes, scores, and classes
# Each box represents a part of the image where a particular object was detected
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represents level of confidence for each of the objects.
# The score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
# Number of objects detected
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Load image using OpenCV and
# expand image dimensions to have shape: [1, None, None, 3]
# i.e. a single-column array, where each item in the column has the pixel RGB value
image = cv2.imread(PATH_TO_IMAGE)
image_expanded = np.expand_dims(image, axis=0)
# Perform the actual detection by running the model with the image as input
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_expanded})
# Draw the results of the detection (aka 'visulaize the results')
coordinates = vis_util.coordinates_find(
image,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8,
min_score_thresh=0.8)
print(coordinates)
name = "Traffic light"
squares = []
for cord in coordinates:
rows = []
if "1" == cord[4][0].split(":")[0]:
for i in range(4):
print(cord[i]%0.25)
if (cord[i] % 0.25) < 0.03 and (0 != int(cord[i] / 0.25) and (i != 0) and (i != 1)):
rows.append(int(cord[i]/0.25))
else:
rows.append((int(cord[i]/0.25) + 1))
#squares.append((5 - rows[0]) * 4 - rows[1] + 1)
#squares.append((5 - rows[0]) * 4 - rows[3] + 1)
#squares.append((5 - rows[2]) * 4 - rows[1] + 1)
#squares.append((5 - rows[2]) * 4 - rows[3] + 1)
for j in range(rows[2] - rows[0] + 1):
for t in range(rows[3] - rows[1] + 1):
squares.append((5 - rows[0] - j) * 4 - rows[1] - t + 1)
print(rows)
print(squares)
vis_util.visualize_boxes_and_labels_on_image_array(
image,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8,
min_score_thresh=0.6)
# All the results have been drawn on image. Now display the image.
cv2.imshow('Object detector', image)
# Press any key to close the image
cv2.waitKey(0)
# Clean up
cv2.destroyAllWindows()
| [
"71749280+stavik476@users.noreply.github.com"
] | 71749280+stavik476@users.noreply.github.com |
70917ef3ba2fae2c622f7b341e85affcd1aa530a | 19d8c15700cbb2d0b8108c379edbc50bfcb1c149 | /Source code/laptop.py | aa6adaadf30faa8e250fe0bfebe582a4ffbd0e33 | [] | no_license | trandaitai327/laptop-store-manage | 41543be450ca68a973e754c0b2bfead741d88e44 | dee8519d4c4602f7132b5fcbd4a2e03e9193223d | refs/heads/master | 2023-09-03T21:43:59.694305 | 2021-11-05T03:14:07 | 2021-11-05T03:14:07 | 424,813,427 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | class Laptop:
def __init__(self,ten,hang,gia,soluong):
self.name = ten
self.brand = hang
self.price = gia
self.quantum = soluong
def xuatgia(self):
return self.price
new= Laptop("G3","Dell",25000,3)
| [
"trandaitai327@gmail.com"
] | trandaitai327@gmail.com |
c8a495c25d68757e7f04885e00d173531073ac78 | efde64a427ec0e7a03c6227ea36e63c386924545 | /test.py | 295e11d4a904dc38869e71a681bae9ef5e109100 | [] | no_license | ZhihaoDU/du2020dan | 5bfcc8ead7c9ac1f1e45e3cfb68f45c253e81403 | 4ec8d37a46c6d40e6d2f07ec1d299ac2e802ed69 | refs/heads/master | 2021-01-15T07:04:33.043159 | 2020-02-25T04:43:55 | 2020-02-25T04:43:55 | 242,909,472 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,432 | py | import os
# os.environ['CUDA_VISIBLE_DEVICES'] = '2'
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from kaldi_helper import KaldiFeatHolder
from speech_utils import read_path_list, print_with_time, calc_rescale_c
from scipy.io import wavfile
from kaldi_fbank_extractor import log_fbank, get_fft_mel_mat
import scipy.io as sio
from multiprocessing import Pool
import librosa
from models import get_model
from enhancement_targets import get_target
import json
import argparse
def post_process(src, target):
print_with_time("Doing post process...")
os.system("cp %s/spk2utt %s/" % (src, target))
os.system("cp %s/text %s/" % (src, target))
os.system("cp %s/utt2spk %s/" % (src, target))
os.system("cp %s/wav.scp %s/" % (src, target))
os.system("cp %s/spk2gender %s/" % (src, target))
def calc_func(noisy_dir_path):
with torch.no_grad():
debug_model = args.debug_model
_method = method
model_opts = json.load(open(os.path.join("configs/%s.json" % args.model_config), 'r'))
gen_model = model_opts['gen_model_name']
calc_target = get_target(args.target_type)
device = torch.device("cuda")
print_with_time("Loading model...")
Generator, _ = get_model(gen_model, None)
model = Generator(model_opts['gen_model_opts']).to(device)
checkpoint = torch.load("Checkpoints/%s/checkpoint_%09d.pth" % (_method, args.global_step))
model.load_state_dict(checkpoint["generator"])
# model.load_state_dict(checkpoint["enhancer"])
model.eval()
melbank = get_fft_mel_mat(512, 16000, 40)
_method = "_".join([_method, str(args.global_step)])
if debug_model:
os.system('mkdir -p debug/%s' % _method)
print_with_time("Start to enhance wav file in %s with method %s\n" % (noisy_dir_path, _method))
udir_path = "%s_%s" % (noisy_dir_path, _method)
if not os.path.exists(udir_path):
os.mkdir(udir_path)
wav_scp = read_path_list(os.path.join(noisy_dir_path, "wav.scp"))
if not debug_model:
ark_file = open(os.path.join(udir_path, "feats.ark"), 'wb')
scp_file = open(os.path.join(udir_path, "feats.scp"), 'w')
key_len = wav_scp[0].find(' ')
kaldi_holder = KaldiFeatHolder(key_len, 3000, 40)
offset = key_len + 1
enhanced_number = 0
for it, (one_wav) in enumerate(wav_scp):
wav_id, wav_path = one_wav.split(' ')
sr, noisy_speech = wavfile.read(wav_path)
if len(noisy_speech.shape) > 1:
noisy_speech = np.mean(noisy_speech, 1)
early50_path = wav_path.replace('.wav', '_early50.wav')
sr, early50 = wavfile.read(early50_path)
if len(early50.shape) > 1:
early50 = np.mean(early50, 1)
# as the training dataset, use "power_norm" to normalize the waveform to match the input of model.
# c = np.sqrt(np.mean(np.square(noisy_speech)))
c = calc_rescale_c(noisy_speech, args.rescale_method)
noisy_speech = noisy_speech / c
early50 = early50 / c
noisy_fbank, noisy_mag = log_fbank(noisy_speech, False, True, True, None)
early50_fbank, early50_mag = log_fbank(early50, False, True, True, None)
noise_fbank, noise_mag = log_fbank(noisy_speech - early50, False, True, True, None)
if args.feature_domain == "mel":
feat = torch.Tensor(noisy_fbank.T).unsqueeze(0).to(device)
label = torch.Tensor(early50_fbank.T).unsqueeze(0).to(device)
noise = torch.Tensor(noise_fbank.T).unsqueeze(0).to(device)
else:
feat = torch.Tensor(np.square(noisy_mag).T).unsqueeze(0).to(device)
label = torch.Tensor(np.square(early50_mag).T).unsqueeze(0).to(device)
noise = torch.Tensor(np.square(noise_mag).T).unsqueeze(0).to(device)
if args.target_type.lower() == "mapping_mag":
predict = model.forward(feat.sqrt())
else:
predict = model.forward(torch.log(feat + opts['eps']))
results = calc_target(feat, label, noise, predict, opts)
enhanced = results["enhanced"]
predict = results["predict"]
target = results["target"]
if args.feature_domain == "mel":
enhanced_pow = 0
enhanced_fbank = enhanced[0, :, :].cpu().numpy()
else:
enhanced_pow = enhanced[0, :, :].cpu().numpy()
enhanced_fbank = np.matmul(enhanced_pow, melbank.T)
log_enhanced_fbank = np.log(enhanced_fbank * (c ** 2.) + opts['eps'])
if debug_model:
sio.savemat("debug/%s/%s_%s" % (_method, wav_id, wav_path.split('/')[-5]),
{'noisy_mag': noisy_mag, 'noisy_fbank': noisy_fbank,
'enhanced_mag': np.sqrt(enhanced_pow).T, 'enhanced_fbank': enhanced_fbank.T,
'early50_mag': early50_mag, 'early50_fbank': early50_fbank,
'predict': predict[0, :, :].cpu().numpy().T,
'target': target[0, :, :].cpu().numpy().T,
'log_enhanced_fbank': log_enhanced_fbank.T,
'log_early50_fbank': np.log(early50_fbank * (c ** 2.) + opts['eps']),
'c': c
})
if it >= 0:
return
else:
kaldi_holder.set_key(wav_id)
kaldi_holder.set_value(log_enhanced_fbank)
kaldi_holder.write_to(ark_file)
scp_file.write("%s %s/feats.ark:%d\n" % (wav_id, udir_path, offset))
offset += kaldi_holder.get_real_len()
enhanced_number += 1
if enhanced_number % 40 == 0:
print_with_time(
"Enhanced %5d(%6.2f%%) utterance" % (enhanced_number, 100. * enhanced_number / len(wav_scp)))
print_with_time("Enhanced %d utterance" % enhanced_number)
ark_file.close()
scp_file.close()
post_process(noisy_dir_path, udir_path)
print_with_time("Done %s." % _method)
if __name__ == '__main__':
opts = {}
opts['win_len'] = 400
opts['sr'] = 16000
opts['device'] = torch.device('cuda:0')
opts['mel_channels'] = 40
opts['win_type'] = 'hamming'
opts['eps'] = 1e-12
opts['clip_low'] = 0.
opts['clip_high'] = 1.
opts['log_power_offset'] = 10.
opts['compress_label'] = False
opts['log_label_min'] = -27.63
opts['log_label_max'] = 14.41
parser = argparse.ArgumentParser()
parser.add_argument('--script_note', type=str, default=None)
parser.add_argument('--feature_domain', type=str, default="mel")
parser.add_argument('--adversarial_loss', type=str, default=None)
parser.add_argument('--model_config', type=str, default='BiFreqMelCRN_DCGAN')
parser.add_argument('--target_type', type=str, default="mapping_log_pow")
parser.add_argument('--clean_type', type=str, default="early50")
parser.add_argument('--name_note', type=str, default=None)
parser.add_argument('--d_iter', type=int, default=0)
parser.add_argument('--rescale_method', type=str, default="power_norm", choices=["None", "value_norm", "power_norm",
"st_power_norm", "max_norm"])
parser.add_argument('--dist_alpha', type=float, default=0)
parser.add_argument('--data_augment', type=str, default="naive", choices=["None", "naive"])
parser.add_argument('--global_step', type=int, default=0)
parser.add_argument('--debug_model', type=bool, default=False)
parser.add_argument('--l1_alpha', type=float, default=0.)
parser.add_argument('--l2_alpha', type=float, default=0.)
parser.add_argument('--glc_alpha', type=float, default=0., help="Lipschitz continuous penalty for generator")
parser.add_argument('--feat_alpha', type=float, default=0.)
args = parser.parse_args()
if args.script_note is not None:
model_name_list = [args.script_note, args.feature_domain]
else:
model_name_list = [args.feature_domain]
# model_name_list.append("mse")
if args.adversarial_loss is not None:
model_name_list.append(args.adversarial_loss)
model_name_list.extend([args.model_config, args.target_type, args.clean_type])
if args.d_iter > 0:
model_name_list.append("D%d" % args.d_iter)
if args.name_note is not None:
model_name_list.append(args.name_note)
if args.rescale_method != "None":
model_name_list.append(args.rescale_method)
if args.l1_alpha > 0:
model_name_list.append("L1_%.6f" % args.l1_alpha)
if args.l2_alpha > 0:
model_name_list.append("L2_%.6f" % args.l2_alpha)
if args.glc_alpha > 0:
model_name_list.append("GLC_%.6f" % args.glc_alpha)
if args.dist_alpha > 0:
model_name_list.append("DIST_%.6f" % args.dist_alpha)
if args.feat_alpha > 0:
model_name_list.append("FEAT_%.6f" % args.feat_alpha)
if args.data_augment != "None":
model_name_list.append(args.data_augment)
method = "_".join(model_name_list)
print("|----------------------------------------------------------------------------|")
print("|", method.center(74), "|")
print("|----------------------------------------------------------------------------|")
print(args)
print(opts)
input("Press any key to continue.")
noisy_dir_list = [
"/data/duzhihao/kaldi/egs/chime2/s5/data-fbank/train_si84_noisy",
"/data/duzhihao/kaldi/egs/chime2/s5/data-fbank/dev_dt_05_noisy",
"/data/duzhihao/kaldi/egs/chime2/s5/data-fbank/test_eval92_5k_noisy",
]
if args.debug_model:
noisy_dir_list = [
"/data/duzhihao/kaldi/egs/chime2/s5/data-fbank/train_si84_noisy"
]
pool = Pool(len(noisy_dir_list))
pool.map(calc_func, noisy_dir_list)
pool.close()
| [
"duzhihao.china@gmail.com"
] | duzhihao.china@gmail.com |
cb03a855464cc9c2f80b4f406ed8aaac4d1c0f3f | e5acfe14534911fb42ab7715331abda164a3a93b | /devel/lib/python2.7/dist-packages/adhoc_communication/msg/_ExpCluster.py | a1eb356df41d275f3ee571397b570d141b72bb3b | [] | no_license | mgr4dv/surf_inspec | 964095590c58967d6183ac16e755192922bf8af4 | ada5332edaebe622fa403dd8f5233b01b8b16559 | refs/heads/master | 2021-07-04T17:29:06.141297 | 2017-09-27T23:40:28 | 2017-09-27T23:40:28 | 103,476,523 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,101 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from adhoc_communication/ExpCluster.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import adhoc_communication.msg
class ExpCluster(genpy.Message):
_md5sum = "378b1f01ebed06706a22e7cc27608df5"
_type = "adhoc_communication/ExpCluster"
_has_header = False #flag to mark the presence of a Header object
_full_text = """ExpClusterElement[] ids_contained
float64 bid
================================================================================
MSG: adhoc_communication/ExpClusterElement
int64 id
string detected_by_robot_str
"""
__slots__ = ['ids_contained','bid']
_slot_types = ['adhoc_communication/ExpClusterElement[]','float64']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
ids_contained,bid
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(ExpCluster, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.ids_contained is None:
self.ids_contained = []
if self.bid is None:
self.bid = 0.
else:
self.ids_contained = []
self.bid = 0.
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
length = len(self.ids_contained)
buff.write(_struct_I.pack(length))
for val1 in self.ids_contained:
buff.write(_struct_q.pack(val1.id))
_x = val1.detected_by_robot_str
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_struct_d.pack(self.bid))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.ids_contained is None:
self.ids_contained = None
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.ids_contained = []
for i in range(0, length):
val1 = adhoc_communication.msg.ExpClusterElement()
start = end
end += 8
(val1.id,) = _struct_q.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.detected_by_robot_str = str[start:end].decode('utf-8')
else:
val1.detected_by_robot_str = str[start:end]
self.ids_contained.append(val1)
start = end
end += 8
(self.bid,) = _struct_d.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
length = len(self.ids_contained)
buff.write(_struct_I.pack(length))
for val1 in self.ids_contained:
buff.write(_struct_q.pack(val1.id))
_x = val1.detected_by_robot_str
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_struct_d.pack(self.bid))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.ids_contained is None:
self.ids_contained = None
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.ids_contained = []
for i in range(0, length):
val1 = adhoc_communication.msg.ExpClusterElement()
start = end
end += 8
(val1.id,) = _struct_q.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.detected_by_robot_str = str[start:end].decode('utf-8')
else:
val1.detected_by_robot_str = str[start:end]
self.ids_contained.append(val1)
start = end
end += 8
(self.bid,) = _struct_d.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_q = struct.Struct("<q")
_struct_d = struct.Struct("<d")
| [
"builder@kududyn.com"
] | builder@kududyn.com |
a8885f69c487b2f187926f4fa20b933388d0a0d1 | 50ed16359e7a180298e847c4866ff2b45b3f3815 | /scripts/computeNumbers.py | e07b5acf8876c8f9cd7ac521858d44c012313e7f | [] | no_license | bfildier/Fildier2022_code | cde8fac4c01597e8ea7f631913aee229e725ffbd | 8cd2c5e78b85ccc89544f2c6698b7717dd7a1537 | refs/heads/main | 2023-04-18T01:28:39.748615 | 2022-12-05T16:14:40 | 2022-12-05T16:14:40 | 574,580,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,112 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 21 11:46:19 2022
Numbers in PNAS main 2022
@author: bfildier
"""
##-- modules
import scipy.io
import sys, os, glob
import numpy as np
import xarray as xr
import matplotlib
from matplotlib import cm
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import matplotlib.patches as mpatches
import matplotlib.lines as mlines
from matplotlib.patches import Circle
from PIL import Image
from datetime import datetime as dt
from datetime import timedelta, timezone
import pytz
import matplotlib.image as mpimg
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import pickle
from scipy.stats import gaussian_kde
from scipy.stats import linregress
from scipy import optimize
from matplotlib.patches import Ellipse
import matplotlib.transforms as transforms
##-- directories
# workdir = os.path.dirname(os.path.realpath(__file__))
workdir = '/Users/bfildier/Code/analyses/EUREC4A/EUREC4A_organization/scripts'
repodir = os.path.dirname(workdir)
moduledir = os.path.join(repodir,'functions')
resultdir = os.path.join(repodir,'results','radiative_features')
figdir = os.path.join(repodir,'figures','paper')
#inputdir = '/Users/bfildier/Dropbox/Data/EUREC4A/sondes_radiative_profiles/'
inputdir = os.path.join(repodir,'input')
radinputdir = os.path.join(repodir,'input')
imagedir = os.path.join(repodir,'figures','snapshots','with_HALO_circle')
scriptsubdir = 'Fildier2021'
# Load own module
projectname = 'EUREC4A_organization'
thismodule = sys.modules[__name__]
## Own modules
sys.path.insert(0,moduledir)
print("Own modules available:", [os.path.splitext(os.path.basename(x))[0]
for x in glob.glob(os.path.join(moduledir,'*.py'))])
from radiativefeatures import *
from radiativescaling import *
# from thermodynamics import *
from conditionalstats import *
from matrixoperators import *
from thermoConstants import *
mo = MatrixOperators()
##--- local functions
def defineSimDirectories():
"""Create specific subdirectories"""
# create output directory if not there
os.makedirs(os.path.join(figdir),exist_ok=True)
if __name__ == "__main__":
# arguments
parser = argparse.ArgumentParser(description="Compute paper numbers from all precomputed data")
parser.add_argument('--overwrite',type=bool,nargs='?',default=False)
# output directory
defineSimDirectories()
##-- Load all data
exec(open(os.path.join(workdir,"load_data.py")).read())
#%% Rerecence wavenumbers
print('-- compute reference wavenumbers --')
print()
T_ref = 290 # K
W_ref = 3 # mm
print('choose reference temperature T = %3.1fK'%T_ref)
print('choose reference water path W = %3.1fmm'%W_ref)
print()
print("> compute reference wavenumber ")
kappa_ref = 1/W_ref # mm-1
rs = rad_scaling_all['20200202']
nu_ref_rot = rs.nu(kappa_ref,'rot')
nu_ref_vr = rs.nu(kappa_ref,'vr')
print('reference wavenumber in rotational band: nu = %3.1f cm-1'%(nu_ref_rot/1e2))
print('reference wavenumber in vibration-rotation band: nu = %3.1f cm-1'%(nu_ref_vr/1e2))
print()
print("> Planck function at both reference wavenumbers")
piB_ref_rot = pi*rs.planck(nu_ref_rot,T_ref)
piB_ref_vr = pi*rs.planck(nu_ref_vr,T_ref)
print('reference Planck term in rotational band: piB = %3.4f J.s-1.sr-1.m-2.cm'%(piB_ref_rot*1e2))
print('reference Planck term in vibration-rotation band: piB = %3.4f J.s-1.sr-1.m-2.cm'%(piB_ref_vr*1e2))
#%% Alpha
#-- Analytical approximation
# show temperature profiles
day = '20200126'
date = pytz.utc.localize(dt.strptime(day,'%Y%m%d'))
data_day = data_all.sel(launch_time=day)
f = rad_features_all[day]
# colors
var_col = f.pw
norm = matplotlib.colors.Normalize(vmin=var_col.min(), vmax=var_col.max())
cmap = plt.cm.nipy_spectral
cmap = plt.cm.RdYlBu
cols = cmap(norm(var_col))
# N data
Ns = data_day.dims['launch_time']
# Exploratory figure for lapse rate
fig,ax = plt.subplots()
for i_s in range(Ns):
ax.plot(data_day.temperature[i_s],data_day.alt,c=cols[i_s],linewidth=0.5,alpha=0.5)
s_fit_FT = slice(200,600)
s_fit_BL = slice(0,160)
for suff in '_FT','_BL':
s_fit = getattr(thismodule,'s_fit%s'%suff)
s_dry = f.pw < 30 # mmm
temp_mean = np.nanmean((data_day.temperature)[s_dry],axis=0)
not_nan = ~np.isnan(temp_mean)
z_fit = data_day.alt[not_nan][s_fit]
# regress
slope, intercept, r, p, se = scipy.stats.linregress(z_fit,temp_mean[not_nan][s_fit])
# show
ax.plot(slope*z_fit+intercept,z_fit,'k')
#!- analytical alpha
Gamma = -slope
T_ref = 290
alpha_an = L_v*Gamma/gg/T_ref * R_d/R_v - 1
print('alpha_analytical%s ='%suff,alpha_an)
ax.set_xlabel('T (K)')
ax.set_ylabel('z (km)')
#%% Inversion
Ns = rad_scaling_all[day].rad_features.pw.size
fig,ax = plt.subplots()
# Ns = data_all.temperature.shape[0]
for i_s in range(Ns):
theta = data_day.temperature[i_s] * (1e5/data_day.pressure[i_s])**(R_d/c_pd)
ax.plot(theta,data_day.pressure[i_s]/100,c = cols[i_s],alpha=0.2)
ax.invert_yaxis()
ax.set_ylabel('p (hPa)')
ax.set_xlabel(r'Potential temperature $\theta$ (K)')
#%% Water paths vs RH
# alpha_qvstar = 2.3
# qvstar_0 = 0.02
# qvstar_power = qvstar_0 * np.power(pres_fit/pres_fit[-1],alpha_qvstar)
def waterPath(qvstar_surf,pres,pres_jump,rh_min,rh_max,alpha,i_surf=-1):
"""Water path from top of atmosphere, in mm
- qv_star_surf: surface saturated specific humidity (kg/kg)
- pres: reference pressure array (hPa)
- pres_jump: level of RH jump (hPa)
- rh_max: lower-tropospheric RH
- rh_min: upper-tropospheric RH
- alpha: power exponent
- i_surf: index of surface layer in array (default is -1, last element)
"""
hPa_to_Pa = 100
rho_w = 1e3 # kg/m3
m_to_mm = 1e3
# init
W = np.full(pres.shape,np.nan)
# constant
A = qvstar_surf/(pres[i_surf]*hPa_to_Pa)**alpha/gg/(1+alpha)
print(A)
# lower troposphere
lowert = pres >= pres_jump
W[lowert] = A*(rh_max*(pres[lowert]*hPa_to_Pa)**(alpha+1)-(rh_max-rh_min)*(pres_jump*hPa_to_Pa)**(alpha+1))
# upper troposphere
uppert = pres < pres_jump
W[uppert] = A*rh_min*(pres[uppert]*hPa_to_Pa)**(alpha+1)
return W/rho_w*m_to_mm
qvstar_0 = 0.02
pres_fit = np.linspace(0,1000,1001)
pres_jump = 800 # hPa
rh_min = 1
rh_max = 1
alpha_qvstar = 2.3
W_prof = waterPath(qvstar_0,pres_fit,pres_jump,rh_min,rh_max,alpha_qvstar)
i_jump = np.where(pres_fit >= pres_jump)[0][0]
W_FT = W_prof[i_jump]
print('Free tropospheric water path at saturation (qvstar integral) =',W_FT)
print('with uniform RH_t = 1%, W =',W_FT/100)
print('with uniform RH_t = 5%, W =',W_FT*0.05)
print('with uniform RH_t = 50%, W =',W_FT*0.5)
print('with uniform RH_t = 80%, W =',W_FT*0.8)
| [
"bfildier.work@gmail.com"
] | bfildier.work@gmail.com |
4c1785f655e01342cbdda1667b1a388889254f6b | 2daa3894e6d6929fd04145100d8a3be5eedbe21c | /tests/artificial/transf_pow3/trend_poly/cycle_7/ar_12/test_artificial_32_pow3_poly_7_12_100.py | 7f5a2931b6b59c48d8a1216fadd94ec7826eabbc | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Henri-Lo/pyaf | a1f73a0cc807873bd7b79648fe51de9cfd6c126a | 08c968425d85dcace974d90db7f07c845a0fe914 | refs/heads/master | 2021-07-01T12:27:31.600232 | 2017-09-21T11:19:04 | 2017-09-21T11:19:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
dataset = tsds.generate_random_TS(N = 32 , FREQ = 'D', seed = 0, trendtype = "poly", cycle_length = 7, transform = "pow3", sigma = 0.0, exog_count = 100, ar_order = 12);
art.process_dataset(dataset); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
3cbc9bfba6c7cc7ac49325cfc8ffaf1622d354b1 | bdaed512916fcf96e5dc915538fe8598aeb2d3cf | /mcex/history/nphistory.py | f042a176b3e0f6b83b0c8e8c2c2c693ec6657ff1 | [] | no_license | jsalvatier/mcex | 9657cc2e8083f4e4dd013baaaceba08f9a48754e | 040f49bfd6eb467ef4d50d15de25033b1ba52c55 | refs/heads/master | 2021-06-18T19:02:07.055877 | 2017-01-22T01:10:01 | 2017-01-22T01:10:01 | 1,455,409 | 9 | 3 | null | 2012-06-21T18:07:36 | 2011-03-08T17:02:42 | Python | UTF-8 | Python | false | false | 954 | py | '''
Created on Mar 15, 2011
@author: jsalvatier
'''
import numpy as np
class NpHistory(object):
"""
encapsulates the recording of a process chain
"""
def __init__(self, max_draws):
self.max_draws = max_draws
self.samples = {}
self.nsamples = 0
def record(self, point):
"""
records the position of a chain at a certain point in time
"""
if self.nsamples < self.max_draws:
for var, value in point.iteritems():
try :
s = self.samples[var]
except:
s = np.empty((self.max_draws,) + value.shape)
self.samples[var] = s
s[self.nsamples,...] = value
self.nsamples += 1
else :
raise ValueError('out of space!')
def __getitem__(self, key):
return self.samples[key][0:self.nsamples,...] | [
"jsalvatier@gmail.com"
] | jsalvatier@gmail.com |
8ef7a014215cf45c13afb9ae120e112806e9cc33 | 0a8a3d486c170019a09fadeafc36cffb459f55db | /miscImu/testIMU.py | 682b2e9007fdba02a94d619d77cd53b9402a9718 | [
"MIT"
] | permissive | Armon16/IMU | e240f6d30451695d94a71e5f0d2454cf0ccc7d85 | ee4d114c7f5074a5f45bd658bf5d7f310d452f2f | refs/heads/master | 2020-04-29T01:21:05.744558 | 2019-04-12T06:17:29 | 2019-04-12T06:17:29 | 153,354,839 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,560 | py | import logging
import sys
import time
from deepstream import get, post
from Adafruit_BNO055 import BNO055
try:
obj = {}
post(obj, 'imu')
except:
print("Not connected to deepstream")
magneticDeviation = 11
bno = BNO055.BNO055(busnum=2)
confMode = True
while True:
try:
if not bno.begin():
print('The sensor is not connected')
time.sleep(1)
#raise RuntimeError('Failed to initialize BNO055! Is the sensor connected?')
else:
break
except:
print('waiting for sensor...')
def magToTrue(h):
global magneticDeviation
if (h - magneticDeviation < 0):
return (h + 360 - magneticDeviation)
else:
return h - magneticDeviation
bno.set_mode(0x00)
print("Entering Config Mode")
fileIn = open('calibrationData.txt','r')
data = fileIn.read().splitlines()
for i in range(len(data)):
data[i] = int(data[i])
bno.set_calibration(data)
fileIn.close()
# Print system status and self test result.
status, self_test, error = bno.get_system_status()
print('System status: {0}'.format(status))
print('Self test result (0x0F is normal): 0x{0:02X}'.format(self_test))
# Print out an error if system status is in error mode.
if status == 0x01:
print('System error: {0}'.format(error))
print('See datasheet section 4.3.59 for the meaning.')
# Print BNO055 software revision and other diagnostic data.
sw, bl, accel, mag, gyro = bno.get_revision()
print('Software version: {0}'.format(sw))
print('Bootloader version: {0}'.format(bl))
print('Accelerometer ID: 0x{0:02X}'.format(accel))
print('Magnetometer ID: 0x{0:02X}'.format(mag))
print('Gyroscope ID: 0x{0:02X}\n'.format(gyro))
print('Reading BNO055 data, press Ctrl-C to quit...')
try:
while True:
heading, roll, pitch = bno.read_euler()
sys, gyro, accel, mag = bno.get_calibration_status()
heading = magToTrue(heading)
if (sys == 3 and gyro == 3 and accel == 3 and mag == 3 and confMode):
bno.set_mode(0x0C)
print("Entering Nine Degrees of Freedom Fusion Mode")
confMode = False
print('Heading={0:0.2F} Roll={1:0.2F} Pitch={2:0.2F}\tSys_cal={3} Gyro_cal={4} Accel_cal={5} Mag_cal={6}'.format(
heading, roll, pitch, sys, gyro, accel, mag))
try:
response = post({ "heading":heading, "roll":roll, "pitch":pitch, "sys":sys, "gyro":gyro, "accel":accel, "mag":mag }, 'imu')
except:
print("Cannot Post to Deepstream")
response = None
time.sleep(.03)
except:
print("Error in try catch")
| [
"armon16@csu.fullerton.edu"
] | armon16@csu.fullerton.edu |
bfe44943e89a9537af13bd731fb422a50eb87f7f | 488fb4ea9b50759c61d115fea2f830dbe1a92fb4 | /flask_app/simple.py | fc09e1aef193d07567780b2eea211e3db2ae8005 | [] | no_license | jacobcui/python301 | ea42e8664c8dd1ce1d7f5f05c592372a050a27bf | 0f2961bdf061bf6d8b6390f19deeef1b73af96b4 | refs/heads/master | 2020-04-19T22:13:59.503027 | 2019-02-08T00:17:18 | 2019-02-08T00:17:18 | 168,463,713 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | """A simple example explaining GET, POST.
Reference: http://flask_app.pocoo.org/docs/1.0/quickstart/#a-minimal-application
"""
from flask import Blueprint, render_template, request
bp = Blueprint('simple', __name__)
@bp.route('/simple', methods=['GET', 'POST'])
def simple_handler():
if request.method == 'GET':
return render_template('simple.html')
| [
"jacobcui123@gmail.com"
] | jacobcui123@gmail.com |
4b6ede8c383df1a8a4b9930c4be790de6c73ee5f | e8811aaa2f4344de1f835b4e72b26f9dc5eb9f47 | /http1/predict.py | 387a14987524c7b3f34297d2b3c2f10ab83bf5f8 | [] | no_license | anandanthony/anandpyfunc | 93bae5ce03b8638e2b11a2fcb540592578e1ba84 | 6f7d497676492c1ced88a84aae8aad60a7907f9f | refs/heads/master | 2022-11-23T07:56:30.060122 | 2020-08-03T21:23:27 | 2020-08-03T21:23:27 | 284,809,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,585 | py | # General libs
from datetime import datetime
import logging
import os
# Image processing libs
import tensorflow as tf
import tensorflow_addons as tfa
import cv2
from PIL import Image
# Additional libs
import numpy as np
from urllib.request import urlopen
import requests
from io import BytesIO
from numpy import genfromtxt
from scipy.spatial import distance
scriptpath = os.path.abspath(__file__)
dir = os.path.dirname(scriptpath)
image = os.path.join(dir, 'file.jpeg')
model_weights = os.path.join(dir, 'keras.h5')
dataset = os.path.join(dir, 'dataset.tsv')
classes = os.path.join(dir, 'classes.txt')
database = genfromtxt(dataset, delimiter='\t')
classes_list = genfromtxt(classes, delimiter='\n',dtype=None)
size = 480
def exctract_roi(image): # Exctract object from an image
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5,5), 0)
canny = cv2.Canny(blurred, 0,100, 3)
kernel = np.ones((5,5),np.uint8)
dilate = cv2.dilate(canny, kernel, iterations=1)
cnts = cv2.findContours(dilate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
image_number = 0
for c in cnts:
x,y,w,h = cv2.boundingRect(c)
ROI = image[y:y+h, x:x+w]
return ROI
def url_to_image(url): # Download image from URL and open in opencv
#image = urlopen(url)
#image = np.asarray(bytearray(image.read()), dtype="uint8")
#image = cv2.imdecode(image, cv2.IMREAD_COLOR)
response = requests.get(url)
image = Image.open(BytesIO(response.content))
image = np.array(image)
return image
def image_preprocessing(image_url):
image = url_to_image(image_url)
image = exctract_roi(image)
image = np.array(image)
tensor = tf.convert_to_tensor(image)
tensor = tf.image.convert_image_dtype(tensor, tf.float32)
tensor = tf.image.resize(tensor, (size,size))
return tf.expand_dims(tensor,0)
def result_post_processing(result):
distances = []
for i in database:
dist = distance.euclidean(i,result)
distances.append(dist)
id = np.take(classes_list,np.argmin(distances))
return id.decode("utf-8")
def predict_image_from_url(image_url):
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(filters=64, kernel_size=7, strides=2, padding='valid', activation='relu', input_shape=(480,480,3)),
tf.keras.layers.MaxPooling2D(pool_size=2),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.Conv2D(filters=192, kernel_size=3, strides=1, padding='valid', activation='relu'),
tf.keras.layers.MaxPooling2D(pool_size=2),
tf.keras.layers.Conv2D(filters=192, kernel_size=3, strides=1, padding='valid', activation='relu'),
tf.keras.layers.MaxPooling2D(pool_size=2),
#tf.keras.layers.Conv2D(filters=384, kernel_size=3, strides=1, padding='valid', activation='relu'),
tf.keras.layers.Conv2D(filters=256, kernel_size=3, strides=1, padding='valid', activation='relu'),
#tf.keras.layers.Conv2D(filters=256, kernel_size=3, strides=1, padding='valid', activation='relu'),
tf.keras.layers.MaxPooling2D(pool_size=2),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation=None),
tf.keras.layers.Lambda(lambda x: tf.math.l2_normalize(x, axis=1))
])
model.load_weights(model_weights)
model.compile(loss=tfa.losses.TripletSemiHardLoss(margin = 4.0))
result = model.predict(image_preprocessing(image_url))
mongoid = result_post_processing(result)
return mongoid | [
"anfranci@microsoft.com"
] | anfranci@microsoft.com |
c1cf80839a68e4e308b1e1494623900cb368e997 | a483ec5f451f4d6a4455626d3b5e7493f2c44052 | /sophomore/基于神经网络的人体行为姿态识别/SRTP-201909069-项目成果/3DCNN-Behavior Recognition/model_detail.py | 01973fa68d8799c8feb8ab796528dfc78cb54d7a | [] | no_license | wjialei/DuringColloge | 8e62587da265e2cf512c6a90990cf41c3beccf40 | d899cfb9954e1f8e10dd806d0e0428dfae18ad9b | refs/heads/master | 2020-12-13T05:47:05.759575 | 2020-01-18T05:40:41 | 2020-01-18T05:40:41 | 234,324,600 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,007 | py | import h5py
#打开文件
f1 = h5py.File('.\\data\\checkpoint\\inception.001-2.03.hdf5','r')
f2 = h5py.File('.\\data\\checkpoint\\inception.002-1.77.hdf5','r')
f3 = h5py.File('.\\data\\checkpoint\\inception.005-1.64.hdf5','r')
f4 = h5py.File('.\\data\\checkpoint\\inception.015-1.58.hdf5','r')
f5 = h5py.File('.\\data\\checkpoint\\inception.016-1.55.hdf5','r')
f6 = h5py.File('.\\data\\checkpoint\\inception.022-1.39.hdf5','r')
f7 = h5py.File('.\\data\\checkpoint\\inception.029-1.39.hdf5','r')
with h5py.File('.\\data\\checkpoint\\inception.001-2.03.hdf5','r') as f:
def prtname(name):
print(name)
f.visit(prtname)
subgroup = f['subgroup']
subsub = subgroup['subsub']
data1 = subgroup['data1']
data2 = subsub['data2']
# print(dset)
print("data1 name:",data1.name,"data2 name",data2.name)
print("data1 shape:",data1.shape,"data2 shape:",data2.shape)
print("data1 dtype:",data1.dtype,"data2 dtype:",data2.dtype)
print("data1:",data1[:],"data2:",data2[:]) | [
"Jialei_w@163.com"
] | Jialei_w@163.com |
ce369289555ace6e16616cbac4ee84b00b8d594e | 9939d9357257f10074b3d0055d70d5f278e7032f | /entity.py | b3e4eaaab491ccabdaa68dce765d27934f403a48 | [] | no_license | bravequinoaa/FlappyBirdPy | 8068042b97f2b9829bf5fb4364b84f70b05b9f65 | 6e04c07bb6566846386b2cad5e226a072092a36b | refs/heads/main | 2023-02-03T23:03:38.021231 | 2020-12-16T04:27:44 | 2020-12-16T04:27:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 319 | py | import pygame
from abc import ABC, abstractmethod, ABCMeta
class Entity(ABC):
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self, surface, clock, width, height, color):
self.X = None
self.Y = None
self.gravity = None
@abstractmethod
def update(self):
pass
| [
"wvano97@gmail.com"
] | wvano97@gmail.com |
14349834269be1eb71541b0b9ba7c9447bd65661 | 6f9a5717fed38b0a79c399f7e5da55c6a461de6d | /Baekjoon/TreeDiameter.py | 403cdb3ebca8db3488b4692be26727c85cc6920a | [] | no_license | Alfred-Walker/pythonps | d4d3b0f7fe93c138d02651e05ca5165825676a5e | 81ef8c712c36aa83d1c53aa50886eb845378d035 | refs/heads/master | 2022-04-16T21:34:39.316565 | 2020-04-10T07:50:46 | 2020-04-10T07:50:46 | 254,570,527 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,267 | py | # 트리의 지름이란, 트리에서 임의의 두 점 사이의 거리 중 가장 긴 것을 말한다.
# 트리의 지름을 구하는 프로그램을 작성하시오.
#
# 입력
# 트리가 입력으로 주어진다.
# 먼저 첫 번째 줄에서는 트리의 정점의 개수 V가 주어지고 (2≤V≤100,000)
# 둘째 줄부터 V개의 줄에 걸쳐 간선의 정보가 다음과 같이 주어진다.
# (정점 번호는 1부터 V까지 매겨져 있다고 생각한다)
#
# 먼저 정점 번호가 주어지고, 이어서 연결된 간선의 정보를 의미하는 정수가 두 개씩 주어지는데,
# 하나는 정점번호, 다른 하나는 그 정점까지의 거리이다.
# 예를 들어 네 번째 줄의 경우 정점 3은 정점 1과 거리가 2인 간선으로 연결되어 있고,
# 정점 4와는 거리가 3인 간선으로 연결되어 있는 것을 보여준다.
# 각 줄의 마지막에는 -1이 입력으로 주어진다. 주어지는 거리는 모두 10,000 이하의 자연수이다.
#
# 출력
# 첫째 줄에 트리의 지름을 출력한다.
import sys
sys.setrecursionlimit(10**6)
V = int(sys.stdin.readline().rstrip())
connected = [[]for _ in range(V + 1)]
visited = [False for _ in range(V + 1)]
# 입력 처리
for i in range(1, V + 1):
edges = list(map(int, sys.stdin.readline().rstrip().split()))
for j in range(1, len(edges)-1, 2):
connected[edges[0]].append((edges[j], edges[j + 1]))
# 오입력 주의: connected[i].append((edges[j], edges[j + 1]))
# v로부터 연결된 정점 중 방문하지 않은 곳들에 대하여 재귀.
# dist로 누적 거리를 체크
def dfs(v, dist):
ret = (v, dist)
visited[v] = True
for v_d in connected[v]:
if visited[v_d[0]]:
continue
next_search = dfs(v_d[0], dist + v_d[1])
if ret[1] < next_search[1]:
ret = next_search
return ret
# 첫번째 dfs: 임의의 점(1)로부터 가장 먼 곳과 거리 구함
first_dfs = dfs(1, 0)
far_v = first_dfs[0]
# 다시 dfs 하기 위해 visited 초기화
visited = [False for _ in range(V + 1)]
# 두번째 dfs: 앞서 구한 1로부터 먼 곳에서 다시 가장 먼 곳을 찾음
second_dfs = dfs(far_v, 0)
far_v = second_dfs[1]
print(far_v)
| [
"studio.alfred.walker@gmail.com"
] | studio.alfred.walker@gmail.com |
b0af44c71bc504fdf4b8d8d7454978a75e49f783 | 532989f8d1efeed25c954e801802ecaa2038ce52 | /movies_genre_model.py | e69f92c1675cac0c640013a8ce0741cd722a74b1 | [] | no_license | NaHut/Project3 | 465016ab8204abd47e419b229f20b0977e3c323e | 586fe26c98117b6b3cc183a3a22e51663c3300cc | refs/heads/master | 2020-03-23T10:13:57.741738 | 2018-07-19T01:57:45 | 2018-07-19T01:57:45 | 141,432,593 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,314 | py | import os
import time
import keras
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Dense, Dropout, Flatten
from keras.models import Sequential
from keras.models import Model, Input, Sequential
from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, Activation, Average, Dropout
from keras.utils import to_categorical
from keras.losses import categorical_crossentropy
from keras.callbacks import ModelCheckpoint, TensorBoard
from keras.optimizers import Adam
from keras.datasets import cifar10
def build(ratio, epochs,batch_size,
x_train=None, y_train=None, x_validation=None, y_validation=None):
print(' x_train shape: ', x_train.shape)
print(' y_train shape: ', y_train.shape)
print(x_train.shape[0], 'train samples')
print(x_validation.shape[0], 'validation samples')
print(' x_valditaion shape: ', x_validation.shape)
print(' y_validation shape: ', y_validation.shape)
#build model
num_classes = len(y_train[0])
model = Sequential([
Conv2D(24, (5, 5), padding='same', input_shape=x_train.shape[1:], activation='relu'),
Conv2D(24, (5, 5), activation='relu'),
MaxPooling2D(pool_size=(2, 2)),
Dropout(0.25),
Conv2D(48, (5, 5), padding='same', activation='relu'),
Conv2D(48, (5, 5), activation='relu'),
MaxPooling2D(pool_size=(2, 2)),
Dropout(0.25),
Flatten(),
Dense(256, activation='relu'),
Dropout(0.5),
Dense(num_classes, activation='sigmoid')
])
model2 = Sequential([
Conv2D(24, (3, 3), input_shape=x_train.shape[1:], activation='relu', padding='same'),
Conv2D(24, (3, 3), activation='relu', padding='same'),
Conv2D(24, (3, 3), activation='relu', padding='same'),
MaxPooling2D(pool_size=(3, 3), strides=2),
Conv2D(48, (3, 3), activation='relu', padding='same'),
Conv2D(48, (3, 3), activation='relu', padding='same'),
Conv2D(48, (3, 3), activation='relu', padding='same'),
MaxPooling2D(pool_size=(3, 3), strides=2),
Conv2D(48, (3, 3), activation='relu', padding='same'),
Conv2D(48, (3, 3), activation='relu'),
Conv2D(48, (3, 3)),
Flatten(),
Dense(128, activation='relu'),
Dropout(0.5),
Dense(num_classes, activation='sigmoid')
])
# lr = 1e-4 -> 0.024
opt = keras.optimizers.rmsprop(lr=1e-4, decay=1e-6)
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
print(model.summary())
# create save_dir
save_dir = os.path.join(os.getcwd(), 'saved_models')
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_validation, y_validation))
model_file_name = 'genres' + '_tmp.h5'
model_path = os.path.join(save_dir, model_file_name)
keras.callbacks.ModelCheckpoint(model_path,
monitor='val_loss',
verbose=0,
save_best_only=False,
save_weights_only=False,
mode='auto',
period=1)
model.save(model_path) | [
"whow1111@naver.com"
] | whow1111@naver.com |
665fa4ba03e6c225b3c0e1b947ee5d50644e1b6b | 4b660991e5c9c93c83dccccdd3ea91531201e8a3 | /DSA/stack/balanced_parentheses.py | b4f1220d5f0ec9e0f22a0eb60703bc0198df83f8 | [
"MIT"
] | permissive | RohanMiraje/DSAwithPython | 2a1515fa5f9e5cc76b08a3e6f0ce34e451fb6f4b | ea4884afcac9d6cc2817a93e918c829dd10cef5d | refs/heads/master | 2022-09-24T08:57:04.695470 | 2021-10-21T01:06:06 | 2021-10-21T01:06:06 | 238,381,770 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,280 | py | def check_balanced_parentheses(string):
stack = list()
matches = [("(", ")"), ("{", "}"), ("[", "]")]
if len(string) % 2:
"""
base condition to early check assuming string has only parentheses
"""
return False
for char in string:
if char in ['(', '{', '[']:
stack.append(char)
elif char in [')', '}', ']']:
if len(stack) == 0:
return False
last_opening = stack.pop()
if (last_opening, char) not in matches:
return False
# prev = stack.pop()
# if char == ')':
# if prev != "(":
# return False
# elif char == "}":
# if prev != "{":
# return False
# elif char == "]":
# if prev != "[":
# return False
"""
other approach for checking matches like
matches = [("(",")"),("{","}"),("[","]")]
last_opening = stack.pop()
if (last_opening, curr_char )not in matches:
return False
"""
return len(stack) == 0
if __name__ == '__main__':
exp = "([{}])"
print(check_balanced_parentheses(exp))
| [
"rohanmiraje19@gmail.com"
] | rohanmiraje19@gmail.com |
58e85669af64469dd275db9980940709213e68dc | 2b0bbe4977893f5368eab4eb540c451863bf3e24 | /tools/get_rates.py | 859067050df1ba382330b78a4b3c31dbd3c0b1d1 | [
"MIT"
] | permissive | hildogjr/KiCost | a1f0b91ec1c8ba6b8a86f2108a38742c26ff112f | 197a61d90a24ab21049824ad1e5638ac9c4420ac | refs/heads/master | 2023-06-25T16:51:23.137235 | 2023-06-22T16:24:05 | 2023-06-22T16:24:05 | 38,410,608 | 143 | 20 | MIT | 2023-04-10T13:38:40 | 2015-07-02T04:09:13 | Python | UTF-8 | Python | false | false | 846 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# Copyright (c) 2021 Salvador E. Tropea
# Copyright (c) 2021 Instituto Nacional de Tecnología Industrial
# License: Apache 2.0
# Project: KiCost
"""
Tool to generate the default exchange rates.
Should be used before each release.
"""
import sys
import os
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from kicost.currency_converter.download_rates import download_rates # noqa: E402
date, rates = download_rates()
assert date
print('#!/usr/bin/python3')
print('# -*- coding: utf-8 -*-')
print("default_date = '{}'".format(date))
first = True
for cur, rate in rates.items():
cont = "'"+cur+"': "+str(rate)+","
if first:
first = False
print("default_rates = {"+cont)
else:
print(' '+cont)
print(" }")
| [
"salvador@inti.gob.ar"
] | salvador@inti.gob.ar |
e54ee1ecc2087f724a84dac26976197e1055fff2 | 60ac463f25995f50acd0b4caea95bfdb112fe99f | /sump2.py | f67895cc91e30268e25d36d004324c6f7d862fc1 | [] | no_license | descampsa/sump2_linux | a1c36872b62e2bbdc2795d87302fc5abf073bd4c | 51264d52a5789e7042ee9f6a277b619b8e30f0ea | refs/heads/master | 2020-06-26T01:56:19.690628 | 2016-11-23T19:24:26 | 2016-11-23T19:24:26 | 74,607,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246,168 | py | #!/usr/bin/python
# sump2
# Copyright (c) Kevin M. Hubbard 2016 BlackMesaLabs
#
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Source file: sump2.py
# Date: 07.25.16
# Author: Kevin M. Hubbard
# Description: A light weight VCD viewer written in Python+PyGame for Linux
# or Windows platforms. Designed to make use of the mouse scroll
# wheel for fast navigation and inspection of waveform files. Also
# follows keyboard navigation used by Vim and ChipVault.
# History:
# The backstory on ChipWave.py, which was then forked to become sump2.py
# is that I wrote it over a weekend while sequestered in a Redmond,WA hotel
# chaperoning a highschool JSA tournament for my daughter's class. I was
# wanting a better VCD viewer for IcarusVerilog and was frustrated with
# the GTKwave user interface and difficulty installing on Linux. It was
# designed only to be the backend viewer for simulations, but turned out to
# be a really good front-end and back-end for SUMP hardware capture engine.
# Original ( now SUMP1 ) design used .NET for waveform viewing which was
# frustratingly slow. PyGame based SUMP2 gui is 100x better in my opinion.
#
# PyGame:
# ChipWave uses to Python package PyGame for Mouse and Screen iterfacing.
# PyGame does not come with Python and MUST BE INSTALLED!
# See http://www.pygame.org/download.shtml
# ChipWave.py was written in 2013 as a VCD viewer for IcarusVerilog. It was
# ditched in favor of just using GTKwave. Basic features were then reused for
# a SUMP2 front-end and back-end to replace the SUMP1 Powershell/.NET app.
# Note: There are some legacy ChipWave functions still in here that are not
# currently being used and have not been removed.
#
# [ Python 3.5 for Windows ]
# https://www.python.org/downloads/ Python 3.5.2 python-3.5.2.exe
# Click Add to Path on Installer Popup
#
# python.exe -c 'import distutils.util; print(distutils.util.get_platform())'
# win32
#
# [ PyGame ]
# http://www.lfd.uci.edu/~gohlke/pythonlibs/#pygame
# pygame-1.9.2b1-cp35-cp35m-win_amd64.whl
# pygame-1.9.2b1-cp35-cp35m-win32.whl
#
# Copy WHL to C:\Users\root\AppData\Local\Programs\Python\Python35-32\Scripts
# pip install pygame-1.9.2b1-cp35-cp35m-win32.whl
#
# [ PySerial ]
# https://pypi.python.org/pypi/pyserial
# https://pypi.python.org/packages/...../pyserial-3.1.1-py2.py3-none-any.whl
# pip install pyserial-3.1.1-py2.py3-none-any.whl
#
# TODO: Vertical resizing of window has issues. Signal scrolling isnt updated
#
# Revision History:
# Ver When Who What
# ---- -------- -------- ---------------------------------------------------
# 0.00 07.25.16 khubbard Creation. Forked from chip_wave.py ( VCD Viewer )
#
# TODO: Key repeats dont work. Fake out and make own key events ?
# TODO: scroll_up and scroll_down have issues if signals are deleted.
# TODO: Add a "GenerateVCD" feature that calls external program to make vcd
# TODO: Support for bus ripping. Complicated....
# TODO: Search only support Hex searches. Should support signed and unsigned
# TODO: Add support for an autorun.txt file on startup.
# TODO: Reload doesn't work if new VCD is longer than old VCD.
# TODO: On reload, wave.do should be saved and reloaded to preserve format.
# WARNING: vcdfile2signal_list() currently requires a clock signal or else
# conversion doesnt work unless there is a value change every sample
# NOTE: Pygame stops responding if window is dragged onto second monitor.
# TODO: Doesn't support SUMP list reordering or wave.txt order.
# 09.05.16 khubbard Fix for VCD exporting nicknames. GUI crash fixes.
# 09.06.16 khubbard Partial ported from Python 2.7 to 3.5. Incomplete.
# 09.18.16 khubbard Major performance improvements. GUI file loading
# 09.19.16 khubbard Adjust DWORDs in RLE for trig_delay value
# 09.20.16 khubbard RLE Undersample 4x-64x feature added. Popup change.
# 09.23.16 khubbard Popup Entry for variable changes in GUI added.
# 09.24.16 khubbard User Interface and performance usability improvmnts
# 09.25.16 khubbard GUI popup for signal rename.
# 09.26.16 khubbard Fixed opening VCD files for static offline viewing.
# 09.26.16 khubbard zoom_out capped at max_samples. RLE->VCD working
# 09.29.16 khubbard cursor_snap back in for single click op. VCD 'x'
# 10.04.16 khubbard fast_render added. Disable 4x prerender on >1000
# 10.06.16 khubbard Fixed popup bugs. sump_bundle_data() feature added
# 10.16.16 khubbard RLE culling null sample improvements. Menu changes.
# 10.17.16 khubbard fixed vcdfile2signal_list() not decompressing.
# 10.18.16 khubbard fixed menu. New function list_remove
# 10.19.16 khubbard RLE Event to DWORD alignment fix. Needs HW too.
# 10.20.16 khubbard Improve centering to trigger post acquisition.
# 10.21.16 khubbard Acquisition_Length fixed. Also works with RLE now.
# 10.24.16 khubbard Fixed RLE cropping not showing DWORDs.Speed Improvs
###############################################################################
import time
from time import sleep;
import math # pow
import types # type
import sys;
import os;
import platform;
import locale;
class main(object):
def __init__(self):
# import math # pow
# import types # type
self.vers = "2016.10.24";
print("Welcome to SUMP2 " + self.vers + " by BlackMesaLabs");
self.mode_cli = True;
try:
import pygame # Import PyGame Module
except:
print("WARNING: PyGame not FOUND!! running in Command Line Mode");
print("Pygame http://www.lfd.uci.edu/~gohlke/pythonlibs/#pygame");
print("pip install pygame-1.9.2b1-cp35-cp35m-win32.whl");
self.vars = init_vars( self, "sump2.ini" );
self.help = init_help( self );
self.math = math;
list2file( self, "sump2_manual.txt", init_manual(self ) );
#locale.setlocale( locale.LC_NUMERIC, 'English' );
locale.setlocale( locale.LC_NUMERIC, 'en_US.UTf-8' );
init_globals( self );# Internal software variables
self.file_log = open ( self.vars["file_log"] , 'w' );
# ARG0 either specifies a static file to view OR sump2 or an IP address
# for talking directly to sump2 hardware.
import sys;
args = sys.argv + [None]*3;
self.file_name = args[1]; # args[0] is script name
if ( self.file_name == "bd_shell" or \
self.file_name == "cli" ):
self.mode_cli = True;
self.file_name = None;
else:
if 'pygame' in locals() or 'pygame' in globals():
display_init( self );
self.mode_cli = False;
else:
self.mode_cli = True;
self.signal_list = [];# List of class signal(object)s
self.signal_delete_list = [];
if ( self.file_name == None ):
# if ( sump_connect( self ) == False ):
# shutdown( self );
# sump_connect( self );
if ( sump_connect( self ) != False ):
sump2signal_list( self );# Make Signals based on SUMP2 HW Config
self.top_module = "sump2";
else:
# make_demo_vcd();
# self.file_name = "foo.vcd";
self.file_name = make_demo_vcd( self );
# sump_dump_data(self);
# else:
if ( self.file_name != None ):
self.bd=None;
file2signal_list( self, self.file_name );# VCD is now a signal_list
# save_format( self, self.file_name, False );# Create Wave File from VCD Info
self.file_name = None; # Make sure we don't overwrite vcd with wave on exit
self.vcd_import = True;# Prevents saving a VCD specific sump2_wave.txt file
# # Attempt to loading an existing wave.txt file for this block if exists
# # otherwise, create one from scratch
# import os;
# file_name = "wave_" + self.top_module + ".txt";# Default
# if os.path.exists( file_name ):
# print( "load_format() ", file_name );
# load_format( self, file_name );
# else:
# save_format( self, file_name, False );
if ( self.bd != None ):
###########################################################################
# If a wave file doesn't exist, create a default one using info from HW
import os;
# file_name = "wave_" + self.top_module + ".txt";# Default
file_name = "sump2_wave.txt";# Default
if ( os.path.exists( file_name ) == False and self.bd != None ):
print("Creating default wave file");
ram_dwords = self.sump.cfg_dict['ram_dwords'];
ram_bytes = self.sump.cfg_dict['ram_event_bytes'];
events = ram_bytes * 8;
# Iterate the number of event bits and init with 0s
txt_list = [];
for j in range( 0, events, 1):
txt = ("/event[%d]" % j) ;
txt_list += [ txt + " -nickname " + txt.replace("/","") ];
# Then follow with a group for all the DWORDs
if ( ram_dwords != 0 ):
txt_list += ["/dword[%d:%d]" % ( ( 0),( ram_dwords-1) )];
for i in range( 0, ram_dwords, 1):
txt = " /dword[%d]" % ( i );
txt_list += [ txt + " -nickname " + txt.replace("/","") ];
file_out = open( "sump2_wave.txt", 'w' );
for each in txt_list:
file_out.write( each + "\n" );
file_out.close();
# Load in the wavefile
if os.path.exists( file_name ):
print( "load_format() ", file_name );
load_format( self, file_name );
trig_i = sump_dump_data(self);
sump_vars_to_signal_attribs( self );# Populates things like trigger attr
# if os.path.exists( file_name ):
# print( "load_format() ", file_name );
# load_format( self, file_name );
# if ( self.bd != None ):
# sump_dump_data(self);
# else:
# save_format( self, file_name, False );# Create one
# return;
#############################################################################
# CLI Main Loop : When no PyGame loop here STDIN,STDOUT old school style
while( self.mode_cli == True and self.done == False ):
rts = raw_input(self.prompt);
# rts = input(self.prompt);
rts = rts.replace("="," = ");
words = " ".join(rts.split()).split(' ') + [None] * 4;
if ( words[1] == "=" ):
cmd = words[1];
parms = [words[0]]+words[2:];
else:
cmd = words[0];
parms = words[1:];
# print( cmd, parms );
rts = proc_cmd( self, cmd, parms );
for each in rts:
print( each );
# Load the wavefile
# # Calc max number of samples and change default zoom if not enough to fill
# for sig_obj in self.signal_list:
# if ( len( sig_obj.values ) > self.max_samples ):
# self.max_samples = len( sig_obj.values );
# if ( ( self.max_samples * self.zoom_x ) < self.screen_width ):
# self.zoom_x = float(self.screen_width) / float(self.max_samples);
# set_zoom_x( self, self.zoom_x ); # Set the zoom ratio
recalc_max_samples( self );
# Draw the 1st startup screen
screen_refresh( self );
self.context = "gui";
# GUI Main Loop
self.clock = self.pygame.time.Clock();
self.time = self.pygame.time;
self.pygame.key.set_repeat(50,200);
while ( self.done==False ):
# When live, attempt to acquire data, else display static data (faster)
if ( self.acq_state == "acquire_single" or
"acquire_rle" in self.acq_state or
self.acq_state == "acquire_continuous" ):
# Check to see if acquired bit is set, then read the data
# print ("%02X" % self.sump.rd( addr = None )[0] );
if ( "acquire_rle" in self.acq_state ):
sump_done = self.sump.status_triggered + self.sump.status_rle_post;
else:
sump_done = self.sump.status_triggered + self.sump.status_ram_post;
self.undersample_data = False;
self.undersample_rate = 1;
if ( ( self.sump.rd( addr = None )[0] & sump_done ) == sump_done ):
if ( self.acq_mode == "nonrle" ):
trig_i = sump_dump_data(self);
else:
trig_i = sump_dump_rle_data(self);
print("Trigger Index = %d " % trig_i );
# Place the cursors by the trigger.
for ( i , each ) in enumerate( self.cursor_list ):
if ( i == 0 ): offset = -6;
else : offset = +4;
each.selected = False;
# trigger_sample = self.max_samples // 2;# Temporary Trig @ 50%
# each.sample = int( trigger_sample ) + offset;
each.sample = int( trig_i ) + offset;
self.curval_surface_valid = False;# curval surface invalid
if ( self.acq_state == "acquire_continuous" ):
sump_arm( self, True );
else:
self.acq_state = "acquire_stop";
draw_header( self, "ACQUIRED");
print("RENDERING-START");
# start = ( self.max_samples // 2 ) - ( self.max_samples // 8 );
# stop = ( self.max_samples // 2 ) + ( self.max_samples // 8 );
# Zoom-Out the maximum amount that still keeps trigger centered
# for non-RLE this is trivial, for RLE it is more complicated
trig_to_start = trig_i - 0;
trig_to_end = self.max_samples - trig_i;
start = trig_i - min( trig_to_start, trig_to_end );
stop = trig_i + min( trig_to_start, trig_to_end );
proc_cmd( self, "zoom_to", [str(start), str(stop) ] );
# proc_cmd( self, "zoom_to", ["0", str( self.max_samples ) ] );
# proc_cmd( self, "zoom_to_cursors", [] );
print("RENDERING-COMPLETE");
else:
# draw_header(self,"Waiting for trigger..");
draw_screen( self );# This updates banner
self.time.wait(500 ); # Waiting for Trigger
else:
# self.clock.tick( 10 ); # Don't take 100% of CPU as that would be rude
self.time.wait(10 ); # Wait 10ms to share CPU
for event in pygame.event.get(): # User did something
# VIDEORESIZE
if event.type == pygame.VIDEORESIZE:
self.screen= pygame.display.set_mode(event.dict['size'],
pygame.RESIZABLE |
pygame.HWSURFACE |
pygame.DOUBLEBUF);
self.resize_on_mouse_motion = True;# Delay redraw until resize done
# Detect when console box has gained focus and switch from GUI to BD_SHELL
# and loop in a keyboard loop processing commands. Exit on a NULL Command.
# if event.type == pygame.ACTIVEEVENT:
# #print( str(event.gain) + " " + str(event.state) );
# # state=2 user gave focus to something other than GUI. Assume DOS-Box
# if ( event.state == 2 and self.os_sys != "Linux" ):
# bd_shell( self );
# KEYDOWN
if event.type == pygame.KEYDOWN:
if ( event.key == pygame.K_BACKSPACE ):
self.key_buffer = self.key_buffer[:-1];# Remove last char
elif ( event.key == pygame.K_DELETE ):
proc_cmd( self, "delete", [""] );
elif ( event.key == pygame.K_INSERT ):
proc_cmd( self, "insert_divider", [""] );
elif ( event.key == pygame.K_PAGEUP ):
proc_cmd( self, "zoom_in" , [] );
elif ( event.key == pygame.K_PAGEDOWN ):
proc_cmd( self, "zoom_out" , [] );
elif ( event.key == pygame.K_HOME ):
proc_cmd( self, "font_larger" , [] );
elif ( event.key == pygame.K_END ):
proc_cmd( self, "font_smaller" , [] );
elif ( event.key == pygame.K_RIGHT ):
num_samples = self.sample_room // 16;
proc_cmd( self, "scroll_right", [str(num_samples)] );
elif ( event.key == pygame.K_LEFT ):
num_samples = self.sample_room // 16;
proc_cmd( self, "scroll_left", [str(num_samples)] );
# Up and Down arrows either Zoom In,Out or Scroll the Signal list
elif ( event.key == pygame.K_UP ):
if ( self.mouse_region == "signal_name" ):
proc_cmd( self, "scroll_up" , ["1"] );
else:
proc_cmd( self, "zoom_in" , [] );
elif ( event.key == pygame.K_DOWN ):
if ( self.mouse_region == "signal_name" ):
proc_cmd( self, "scroll_down", ["1"] );
else:
proc_cmd( self, "zoom_out", [] );
elif ( event.key == pygame.K_SPACE and self.key_buffer == "" ):
proc_cmd( self, "Expand", [""] );
draw_screen( self );
screen_flip( self );
# Note: Text Entry moved to DOS-Box
# elif ( event.key == pygame.K_RETURN ):
# self.key_buffer = self.key_buffer.replace("="," = ");
# words = self.key_buffer.strip().split()+[None]*4;# Avoid IndexError
# cmd = words[0];
# parms = words[1:];
# if ( self.txt_entry == True ):
# cmd = self.txt_entry_caption;# ie "Rename_Signal"
# parms = words[0:];
# self.txt_entry = False; # Disable Dialog Box
# rts = proc_cmd( self, cmd, parms );
# for each in rts:
# print( each );# <CR>s
# sys.stdout.write( self.prompt );# No <CR>
# self.cmd_history.append( " " + self.key_buffer );
# self.key_buffer = "";
# elif ( event.key > 0 and event.key < 255 ):
# self.key_buffer = self.key_buffer + event.unicode;
# if ( event.unicode == "/" or event.unicode == "?" ):
# self.key_buffer = self.key_buffer + " ";
# sys.stdout.write( event.unicode );
# QUIT
if ( event.type == pygame.QUIT ) :
self.done=True;
# MOUSEMOTION
if event.type == pygame.MOUSEMOTION:
(self.mouse_x,self.mouse_y) = pygame.mouse.get_pos();
# self.mouse_region = get_mouse_region(self,self.mouse_x,self.mouse_y);
# # If mouse wave moved to right of the value region, scroll once to the
# # right and then create a fake MOUSEMOTION event to continue scrolling
# # until mouse is moved away.
# if ( self.mouse_region == "scroll_right" or \
# self.mouse_region == "scroll_left" ):
# proc_cmd( self, self.mouse_region , ["1"] ); # scroll left or right
# # TODO: This wait time needs to be configurable. On my HP Centos
# # laptop it scrolled too fast at 50ms, too slow at 250ms.
# # Make sure mouse is still in this window ( focused )
# if ( self.pygame.mouse.get_focused() == True ):
# self.pygame.time.wait( 100 );# Delay in ms
# self.pygame.event.post( pygame.event.Event( pygame.MOUSEMOTION ) );
# If a resize op was just completed, redraw on 1st mouse motion as
# trying to redraw during the resize is very slow and jerky.
if ( self.resize_on_mouse_motion == True ):
self.resize_on_mouse_motion = False; # This makes resize op smoother
old_width = self.screen_width;
( self.screen_width, self.screen_height ) = self.screen.get_size();
self.vars["screen_width"] = str( self.screen_width );
self.vars["screen_height"] = str( self.screen_height );
# if ( self.screen_width > old_width ):
# This is a HACK as sig_value_stop_x wasn't auto adjusting for some
# reason when window is resized to be larger.
# There is like a chicken and egg problem with zoom_full and stop_x
# adjust stop_x for delta change rather than calling zoom_full twice
self.sig_value_stop_x += ( self.screen_width - old_width );
proc_cmd( self, "zoom_full", [] );# HACK Needed to update parms
# create_surfaces( self );
# flush_surface_cache( self );
# screen_refresh( self );
# If popup enabled, continue drawing and updating until button release
if ( self.popup_x != None ):
self.popup_sel = get_popup_sel( self );
# print("draw_popup_cmd()");
draw_popup_cmd( self );# Just draw popup on top of existing display
screen_flip( self );# Only thing changing is the popup selection
# If mouse button is held down, check for drag operation
# elif ( self.mouse_button != 0 ):
elif ( self.mouse_button == 1 or \
self.mouse_button == 2 ):
# Make sure the region doesnt wander, so calc from mouse press
self.mouse_region = get_mouse_region(self,
self.mouse_btn1dn_x, self.mouse_btn1dn_y );
if ( self.mouse_region == "cursor" ):
mouse_event_move_cursor( self ); # Move a cursor
elif ( self.mouse_region == "slider" ):
mouse_event_move_slider( self,0 ); # Move the viewport slider
elif ( self.mouse_region == "signal_name" ):
mouse_event_vertical_drag_wip( self );# Move a signal name
# HERE : Doesnt work well
# elif ( self.mouse_region == "signal_value" ):
# # Calculate mouse drag deltas in char units
# delta_x=abs(self.mouse_btn1dn_x-self.mouse_x) / self.txt_width;
# delta_y=abs(self.mouse_btn1dn_y-self.mouse_y) / self.txt_height;
# if ( delta_x > 2 and delta_y > 2 ):
# mouse_event_area_drag_wip( self ); # Rectangle Zoom Region
# MOUSEBUTTONUP : event.button 1=Left,2=Middle,3=Right,4=ScrlUp,5=ScrlDn
if event.type == pygame.MOUSEBUTTONUP:
(self.mouse_x,self.mouse_y) = pygame.mouse.get_pos();
self.mouse_region = get_mouse_region(self,self.mouse_x,self.mouse_y);
if ( event.button == 1 ):
(self.mouse_btn1up_x,self.mouse_btn1up_y)=(self.mouse_x,self.mouse_y);
if ( event.button == 3 ):
(self.mouse_btn3up_x,self.mouse_btn3up_y)=(self.mouse_x,self.mouse_y);
# Attempt to detect double-click on left-mouse button t<300ms
if ( event.button == 1 ):
self.mouse_btn1up_time_last = self.mouse_btn1up_time;
self.mouse_btn1up_time = self.pygame.time.get_ticks();
if ( ( self.mouse_btn1up_time - self.mouse_btn1up_time_last ) < 300 ):
mouse_event_double_click( self );
delta_y=abs( self.mouse_btn3dn_y-self.mouse_btn3up_y )/self.txt_height;
delta_x=abs( self.mouse_btn3dn_x-self.mouse_btn3up_x )/self.txt_width;
# If popup enabled, process the cmd
# if ( self.popup_x != None ):
if ( self.popup_x != None and
( event.button == 1 ) or
( event.button == 3 and ( delta_y > 1.0 or delta_x > 1.0 ) )
):
# proc_cmd( self, self.popup_sel, [""] );
words = self.popup_sel.strip().split() + [""] * 4;# AvoidIndexError
proc_cmd( self, words[0],words[1:] );
self.popup_x = None;# Erase popup
self.popup_parent_x = None;
else:
# Mouse Button 1 Only - except emulate Center Mouse Drag to Zoom
delta_x = abs( self.mouse_btn1dn_x-self.mouse_btn1up_x ) / \
self.txt_width;
delta_y = abs( self.mouse_btn1dn_y-self.mouse_btn1up_y ) / \
self.txt_height;
# if ( event.button == 2 and delta_x > 2 and delta_y > 2 ):
if ( event.button == 1 or
( event.button == 2 and delta_x > 2 and delta_y > 2 ) ):
# Mouse region is from 1st click, not release
self.mouse_region = get_mouse_region(self,
self.mouse_btn1dn_x, self.mouse_btn1dn_y );
if ( self.mouse_region == "cursor" ):
mouse_event_move_cursor( self ); # Move a cursor
elif ( self.mouse_region == "slider" ):
mouse_event_move_slider( self,0 ); # Move the viewport slider
elif ( self.mouse_region == "signal_expand" ):
proc_cmd( self, "Expand", [""] );
elif ( self.mouse_region == "signal_name" ):
delta_y = abs( self.mouse_btn1dn_y-self.mouse_btn1up_y ) / \
self.txt_height;
if ( delta_y > 0 ):
mouse_event_vertical_drag_done( self, \
((self.mouse_btn1dn_y-self.mouse_btn1up_y) / \
self.txt_height ) );# Reorder signal list
# elif ( self.mouse_region == "signal_value" ):
# delta_x = abs( self.mouse_btn1dn_x-self.mouse_btn1up_x ) / \
# self.txt_width;
# delta_y = abs( self.mouse_btn1dn_y-self.mouse_btn1up_y ) / \
# self.txt_height;
# if ( delta_x < 2 and delta_y < 2 ):
# mouse_event_single_click( self ); # Moves Cursor to here
# if ( delta_x > 2 and delta_y < 2 ):
# # signal_value region is being dragged, so pan left or right
# direction = (self.mouse_btn1dn_x-self.mouse_btn1up_x) / \
# self.zoom_x;
# proc_cmd( self, "scroll_right", [str( int(direction) ) ] );
#
# elif ( delta_x > 2 and delta_y > 2 ):
# mouse_event_area_drag_done( self ); # Zooms to region
if ( event.button == 2 and delta_x < 2 and delta_y < 2 ):
print( "Center Mouse Button Click");
# Mouse-Scroll Wheel
# region==signal_name : Scroll Up and Down
# region==signal_value : Scroll Left and Right
# region==slider : Zoom in and out
elif ( event.button >= 4 ):
# print( self.mouse_region );
if ( self.mouse_region == "signal_name" ):
if ( event.button == 4 ):
proc_cmd( self, "scroll_up", ["1"] );
elif ( event.button == 5 ):
proc_cmd( self, "scroll_down", ["1"] );
elif ( self.mouse_region == "signal_value" ):
if ( event.button == 4 ):
mouse_event_zoom_scroll( self, +1 );
elif ( event.button == 5 ):
mouse_event_zoom_scroll( self, -1 );
elif ( self.mouse_region == "slider" ):
if ( event.button == 4 ):
proc_cmd( self, "scroll_right",[str(+self.scroll_num_samples)]);
elif ( event.button == 5 ):
proc_cmd( self, "scroll_left",[str(+self.scroll_num_samples)]);
elif ( self.mouse_region == "cursor" ):
self.curval_surface_valid = False;# curval surface is now invalid
for cur_obj in self.cursor_list:
if ( cur_obj.selected == True ):
sample = cur_obj.sample;
if ( event.button == 4 ):
sample +=1;
elif ( event.button == 5 ):
sample -=1;
if ( sample < 0 ) : sample = 0;
if ( sample > self.max_samples ): sample = self.max_samples;
cur_obj.sample = sample;
screen_refresh( self );
self.mouse_button = 0; # No Button is Pressed
# MOUSEBUTTONDOWN : 1=Left,2=Middle,3=Right,4=ScrlUp,5=ScrlDn
if event.type == pygame.MOUSEBUTTONDOWN:
self.mouse_button = event.button;# Remember which button is Pressed
(self.mouse_x,self.mouse_y) = pygame.mouse.get_pos();
self.mouse_region = get_mouse_region(self,self.mouse_x,self.mouse_y);
# Left-Mouse-Button-Down
# If popup is already up and right-click is clicked again, emulate left
if ( event.button == 1 or event.button == 2 or
( event.button == 3 and self.popup_x != None ) ):
self.mouse_btn1dn_time = self.pygame.time.get_ticks();
(self.mouse_x,self.mouse_y) = pygame.mouse.get_pos();
(self.mouse_btn1dn_x,self.mouse_btn1dn_y) = \
(self.mouse_x,self.mouse_y);
if ( self.mouse_region == "slider" ):
mouse_event_move_slider( self, 0 );
elif ( self.mouse_region == "signal_name" or
self.mouse_region == "signal_expand" ):
mouse_event_select_signal( self );
elif ( self.mouse_region == "signal_value" ):
mouse_event_single_click( self ); # Moves Cursor to here
# pass;
# Right-Mouse-Button-Down
if ( event.button == 3 and self.popup_x == None ):
(self.popup_x,self.popup_y) = pygame.mouse.get_pos();
(self.mouse_btn3dn_x,self.mouse_btn3dn_y) = \
(self.mouse_x,self.mouse_y);
# For cursor bring tos want to know exacy sample right click was on
(self.popup_sample, Null ) = get_sample_at_mouse( self,
self.popup_x, self.popup_y );
# Set the popup up and to the left so that a click and release
# selects the 1st guy ( Scroll_Toggle ) - a hack - I know.
self.popup_x -= 2*self.txt_width;
self.popup_y -= self.txt_height;
if ( self.mouse_region == "signal_value" ):
self.popup_list = self.popup_list_values;
# elif ( self.mouse_region == "signal_name" ):
else:
self.popup_list = self.popup_list_names;
draw_popup_cmd( self );
self.popup_sel = get_popup_sel( self );
screen_flip( self ); # Place popup on top existing stuff, no erase
self.acq_state = "acquire_stop";# Stop any live acquisitions
# New
# draw_screen( self );
# screen_flip( self );
shutdown( self );
return;# This is end of main program loop
###############################################################################
def recalc_max_samples( self ):
# Calc max number of samples and change default zoom if not enough to fill
self.max_samples = 0;
for sig_obj in self.signal_list:
if ( len( sig_obj.values ) > self.max_samples ):
self.max_samples = len( sig_obj.values );
if ( self.mode_cli == True ):
return;
if ( float(self.max_samples) != 0.0 and
( self.max_samples * self.zoom_x ) < self.screen_width ):
self.zoom_x = float(self.screen_width) / float(self.max_samples);
set_zoom_x( self, self.zoom_x ); # Set the zoom ratio
# HERE14
# Warning: This sample_room calculation assumes samples are 1 nibble wide.
sample_start = self.sample_start;
start_x = self.sig_value_start_x;
x2 = self.screen_width - start_x - 2*self.txt_width;
self.sample_room = int( float(x2) / float(self.zoom_x) );
self.sample_stop = sample_start + self.sample_room;
return;
def display_init( self ):
log( self, ["display_init()"] );
import pygame # Import PyGame Module
pygame.init() # Initialize the game engine
self.screen_width = int( self.vars["screen_width"], 10 );
self.screen_height = int( self.vars["screen_height"], 10 );
# pygame.NOFRAME, pygame.FULLSCREEN
self.screen=pygame.display.set_mode(
[ self.screen_width, self.screen_height ],
pygame.RESIZABLE | pygame.HWSURFACE | pygame.DOUBLEBUF );
self.pygame = pygame;
self.pygame.display.set_icon( create_icon( self ) );
draw_header( self, "" );
self.font = get_font( self,self.vars["font_name"],self.vars["font_size"]);
self.gui_active = True;
create_surfaces( self );
return;
# mouse_event_select_signal() : User has clicked the mouse in the signal name
# region, so either deselect the old selection and select the new signal at
# the mouse location, or if the shift key is held down, select everything from
# old selection to new location.
def mouse_event_select_signal( self ):
self.name_surface_valid = False;
if ( self.pygame.key.get_pressed()[self.pygame.K_LSHIFT] or
self.pygame.key.get_pressed()[self.pygame.K_RSHIFT] ):
# (Null,index) = get_sample_at_mouse( self, self.mouse_x, self.mouse_y );
# if ( index != None ):
# sig_obj = self.signal_list[ index ];
# sig_obj.selected = True;
# self.sig_obj_sel = sig_obj;# Remember for pulldown commands
sig_obj = get_sig_obj_at_mouse( self, self.mouse_x, self.mouse_y );
if ( sig_obj != None ):
sig_obj.selected = True;
self.sig_obj_sel = sig_obj;# Remember for pulldown commands
start_jk = False;
for sig_obj in self.signal_list:
# Select all visible signals between old select and new select
if ( start_jk == True and sig_obj.visible == True ):
sig_obj.selected = True;
# Start the grouping on the old select
if ( sig_obj.selected == True ):
start_jk = True;
# Finish when we get to new select
if ( sig_obj == self.sig_obj_sel ):
start_jk = False;
break;
screen_refresh( self );
else:
# DeSelect All signals unless a CTRL key is held down
if ( self.pygame.key.get_pressed()[self.pygame.K_LCTRL] == False and
self.pygame.key.get_pressed()[self.pygame.K_RCTRL] == False ):
for sig_obj in self.signal_list:
sig_obj.selected = False;
# Find the signal at the mouse location at select it.
sig_obj = get_sig_obj_at_mouse( self, self.mouse_x, self.mouse_y );
if ( sig_obj != None ):
sig_obj.selected = True;
self.sig_obj_sel = sig_obj;# Remember for pulldown commands
screen_refresh( self );
return;
def mouse_event_double_click( self ):
# print "mouse_event_double_click()";
sig_obj = get_sig_obj_at_mouse( self, self.mouse_x, self.mouse_y );
if ( sig_obj != None ):
sig_obj.selected = True;
if ( sig_obj.hidden == False ):
proc_cmd( self, "hide", [""] );
else:
proc_cmd( self, "show", [""] );
return;
def mouse_event_move_slider( self, direction ):
# print "mouse_event_move_slider()";
mouse_x = self.mouse_x;
mouse_y = self.mouse_y;
delta_x = abs( self.mouse_btn1dn_x-self.mouse_btn1up_x ) / self.txt_width;
# if ( delta_x == 0 and direction == 0 ):
if ( True ):
x1 = self.sig_value_start_x;
x2 = self.sig_value_stop_x;
mouse_x -= self.slider_width / 2;# Put Center of Slider on Mouse
# if ( mouse_x > x1 and mouse_x < x2 ):
if ( True ):
self.sample_start = int(self.max_samples * ( mouse_x - x1 ) / ( x2-x1 ));
# Prevent scrolling too far to right or left
if ( self.sample_start + self.sample_room > self.max_samples ):
self.sample_start = int(self.max_samples - self.sample_room);
if ( self.sample_start < 0 ):
self.sample_start = 0;
else:
None; # Dont support dragging slider
screen_refresh( self );
return;
def mouse_event_single_click( self ):
proc_cmd( self, "cursor_snap" , [str(self.mouse_x),str(self.mouse_y)] );
return;
# print( self.popup_x );
def mouse_event_zoom_scroll( self, direction ):
( sample, Null ) = get_sample_at_mouse( self, self.mouse_x, self.mouse_y );
if ( direction == +1 ):
if ( True ):
# new_zoom_x = self.zoom_x * 1.25;
new_zoom_x = self.zoom_x * 2.00;
if ( new_zoom_x > 100 ):
new_zoom_x = 100.0;# Don't ZoomIn too far
set_zoom_x( self, new_zoom_x );
else:
sample_room = self.sample_room * 2.00;
new_sample = self.sample_start - sample_room // 4;
if ( ( new_sample + sample_room ) > self.max_samples ):
proc_cmd( self, "zoom_full", [] );
return;
if ( self.stop_zoom == False ):
# new_zoom_x = self.zoom_x / 1.25;
new_zoom_x = self.zoom_x / 2.00;
# if ( new_zoom_x < 0.1 ):
# new_zoom_x = 0.1;
set_zoom_x( self, new_zoom_x );
# Now see what sample is at the mouse position and adjust start accordingly
# so that original sample is still under the mouse
(new_sample,Null)=get_sample_at_mouse( self, self.mouse_x, self.mouse_y );
sample_offset = sample - new_sample;
self.sample_start += sample_offset;
if ( self.sample_start < 0 ):
self.sample_start = 0;
screen_refresh( self );
return;
def mouse_event_horizontal_drag( self, direction ):
# print "mouse_event_horizontal_drag()";
return;
# # Support dragging the value region left or right to pan a number of samples
# self.sample_start += int( direction );
# # Prevent scrolling too far to right
# if ( self.sample_start + self.sample_room > self.max_samples ):
# self.sample_start = int( self.max_samples - self.sample_room );
# if ( self.sample_start < 0 ):
# self.sample_start = 0;
# screen_refresh( self );
# return;
def mouse_event_move_cursor( self ):
(self.mouse_x,self.mouse_y) = self.pygame.mouse.get_pos();
( sample, Null ) = get_sample_at_mouse( self, self.mouse_x, self.mouse_y );
self.curval_surface_valid = False;# curval surface is invalid when cur move
for cur_obj in self.cursor_list:
if ( self.mouse_btn1dn_y > cur_obj.y and \
self.mouse_btn1dn_y < cur_obj.y+self.txt_height ):
if ( sample < 0 ):
sample = 0;
cur_obj.sample = int( sample );
for each in self.cursor_list:
each.selected = False;# Make both cursors unselected
cur_obj.selected = True;# now select the current cursor only
screen_refresh( self );
return;
def mouse_event_vertical_drag_wip( self ):
# print "mouse_event_vertical_drag_wip()";
# Reorder the signal list when a signal name is dragged
if ( True ):
x1 = self.sig_name_start_x;
x2 = self.sig_name_stop_x;
sig_obj = get_sig_obj_at_mouse( self, self.mouse_x, self.mouse_y );
if ( sig_obj != None ):
# Draw a horizontal line at insert point before button is released
y1 = sig_obj.y - 1; y2 = y1;
# flush_surface_cache( self );
screen_erase( self );
draw_screen( self );
self.pygame.draw.line(self.screen,self.color_fg,(x1,y1),(x2,y2),1);
screen_flip( self );
return;
# TODO: This doesnt handle invisible signals
def mouse_event_vertical_drag_done( self, direction ):
# print "mouse_event_vertical_drag_done()";
# Reorder the signal list when a signal name is dragged
if ( True ):
(Null , index_dn ) = get_sample_at_mouse( self, self.mouse_btn1dn_x,
self.mouse_btn1dn_y );
(Null , index_up ) = get_sample_at_mouse( self, self.mouse_btn1up_x,
self.mouse_btn1up_y );
if ( index_up > index_dn ):
index_up -= 1;# Need to adjust this
print( "index_up = " + str( index_up ));
print( "index_dn = " + str( index_dn ));
if ( index_up != None and index_dn != None ):
self.signal_list.insert( index_up, self.signal_list.pop( index_dn ) );
flush_surface_cache( self );
screen_refresh( self );
return;
def mouse_event_area_drag_wip( self ):
# print "mouse_event_area_drag_wip()";
if ( self.mouse_btn1dn_x > self.sig_value_start_x ):
# Draw a rectangle over the drag region to be zoomed in on
x1 = self.mouse_btn1dn_x;
y1 = self.mouse_btn1dn_y;
w = ( self.mouse_x - self.mouse_btn1dn_x );
h = ( self.mouse_y - self.mouse_btn1dn_y );
screen_erase( self );
self.pygame.draw.rect( self.screen, self.color_fg,(x1,y1,w,h), 1);
draw_screen( self );
screen_flip( self );
return;
def mouse_event_area_drag_done( self ):
# print "mouse_event_area_drag_done()";
if ( self.mouse_btn1dn_x > self.sig_value_start_x ):
(s1,Null)=get_sample_at_mouse(self,self.mouse_btn1dn_x,self.mouse_btn1dn_y);
(s2,Null)=get_sample_at_mouse(self,self.mouse_btn1up_x,self.mouse_btn1up_y);
s1 = int( s1 );
s2 = int( s2 );
if ( s1 > s2 ):
s1,s2 = s2,s1;# Swap so that s1 is always smallest of the two
proc_cmd( self, "zoom_to" , [str(s1),str(s2)] );#
return;
###############################################################################
# Given a mouse position, return the section of screen that it is in
# as a txt string "signal_name","signal_value","cursor","slider"
def get_mouse_region( self, mouse_x, mouse_y ):
if ( self.popup_x != None ):
return ""; # No region if a popup is open
if ( mouse_x > self.sig_name_start_x and
mouse_x < self.sig_name_stop_x and
mouse_y > self.sig_name_start_y and
mouse_y < self.sig_name_stop_y+self.txt_height ):
# See if Click in the net name "[+]" region
# flush_surface_cache( self );
sig_obj = get_sig_obj_at_mouse( self, mouse_x, mouse_y );
if ( sig_obj != None ):
txt1 = self.font.render( " ",True,self.color_fg, self.color_bg);
txt2 = self.font.render( "[+]",True,self.color_fg, self.color_bg);
if ( mouse_x > self.sig_name_start_x and
mouse_x < ( ( self.sig_name_start_x ) + \
( sig_obj.hier_level * txt1.get_width() ) + \
( txt2.get_width() ) ) ):
return "signal_expand";
else:
return "signal_name";
if ( mouse_x > self.sig_value_start_x and
mouse_x < self.sig_value_stop_x and
mouse_y > self.sig_value_start_y and
mouse_y < self.sig_value_stop_y+self.txt_height ):
return "signal_value";
if ( mouse_x > self.sig_name_stop_x and
mouse_x < self.sig_value_start_x and
mouse_y > self.sig_value_start_y and
mouse_y < self.sig_value_stop_y+self.txt_height ):
return "scroll_left";
if ( mouse_x > self.sig_value_stop_x and
mouse_y > self.sig_value_start_y and
mouse_y < self.sig_value_stop_y+self.txt_height ):
return "scroll_right";
if ( mouse_x > self.sig_value_start_x and
mouse_x < self.sig_value_stop_x and
mouse_y > self.cursor_start_y and
mouse_y < self.cursor_stop_y+self.txt_height ):
return "cursor";
if ( mouse_x > self.sig_value_start_x and
mouse_x < self.sig_value_stop_x and
mouse_y > self.cursor_stop_y ):
return "slider";
return "";
###############################################################################
# direction -1=Backwards,+1=Forwards,0=Both
# value= Binary, Hex or "edge" ( transition )
# Returns sample number
def search_values( self, sig_obj, sample_start, search_value, direction ):
sample = sample_start; # RTS Default for it search_value not found
if ( sig_obj ):
done_right = False;
done_left = False;
if ( direction == -1 ):
done_right = True;
if ( direction == +1 ):
done_left = True;
i = 0;
last_value_right = sig_obj.values[ sample_start ];
last_value_left = sig_obj.values[ sample_start ];
while ( done_right == False or done_left == False ):
i += 1;
if ( sample_start + i < len( sig_obj.values ) ):
value_right = sig_obj.values[ sample_start + i ];
if ( ( search_value == value_right ) or
( search_value == "edge" and value_right != last_value_right ) ):
done_right = True;
sample = sample_start + i;
last_value_right = value_right;
else:
done_right = True;
if ( sample_start - i >= 0 ):
value_left = sig_obj.values[ sample_start - i ];
if ( ( search_value == value_left ) or
( search_value == "edge" and value_left != last_value_left ) ):
done_left = True;
sample = sample_start - i;
last_value_left = value_left;
else:
done_left = True;
return sample;
###############################################################################
# Given position of mouse, return the sample number, and signal index
def get_sample_at_mouse( self, mouse_x, mouse_y ):
x = mouse_x - self.sig_value_start_x;
sample_num = int( ( x / self.zoom_x ) + self.sample_start );
signal_index = None;
for ( i , sig_obj ) in enumerate( self.signal_list ):
if ( sig_obj.visible == True ):
if ( mouse_y > sig_obj.y and \
mouse_y < sig_obj.y + sig_obj.h ):
signal_index = i;
return ( sample_num, signal_index );
###############################################################################
# Given position of mouse, return the sig_obj
def get_sig_obj_at_mouse( self, mouse_x, mouse_y ):
for sig_obj in self.signal_list:
if ( sig_obj.visible == True ):
if ( mouse_y > sig_obj.y and \
mouse_y < sig_obj.y + sig_obj.h ):
return sig_obj;
return None;
###############################################################################
# Given name of a sig_obj, return that sig_obj
def get_sig_obj_by_name( self, name ):
for sig_obj in self.signal_list:
if ( sig_obj.name == name ):
return sig_obj;
return None;
###############################################################################
def log( self, txt_list ):
for each in txt_list:
# print( "log() :" + str( each ));
self.file_log.write( str(each) + "\n" );
# draw_header( self, each );
return;
###############################################################################
def init_help( self ):
a = [];
a+=["#####################################################################"];
a+=["# SUMP2 BlackMesaLabs GNU GPL V2 Open Source License. Python 3.x #"];
a+=["# (C) Copyright 2016 Kevin M. Hubbard - All rights reserved. #"];
a+=["#####################################################################"];
a+=["# bd_shell Commands #"];
a+=["# env : Display all assigned variables and values #"];
a+=["# print var : Display value of variable 'var' #"];
a+=["# foo = bar : Assign 'bar' to the variable 'foo' #"];
a+=["# var_bs foo bar : Set bits 'bar' inside variable 'foo' #"];
a+=["# var_bc foo bar : Clear bits 'bar' inside variable 'foo' #"];
a+=["# help : Display help page ( you're looking at it ) #"];
a+=["# quit : Quit the SUMP2 application #"];
a+=["# gui or NULL<ENTER> : Return from BD_SHELL to GUI Interface #"];
a+=["# source filename : Source an external command script #"];
a+=["# sleep,sleep_ms n : Pause of n seconds or milliseconds #"];
a+=["# UNIX Commands #"];
a+=["# pwd,mkdir,cd,ls,cp,vi #"];
a+=["# Backdoor Commands #"];
a+=["# w addr data : Write data to addr #"];
a+=["# w addr data data : Write multiple dwords #"];
a+=["# r addr : Read data from addr #"];
a+=["# r addr dwords : Read multiple dwords starting at addr #"];
a+=["# GUI Commands #"];
a+=["# crop_to_cursors : Minimize sample set to cursor region #"];
a+=["# save_png : Save current screen image to PNG file #"];
a+=["# save_vcd : Save current capture to VCD file #"];
a+=["# bd_shell : Switch from GUI to a bd_shell CLI #"];
a+=["# SUMP Commands #"];
a+=["# sump_arm timeout : Arm SUMP2 engine and wait for timeout sec #"];
a+=["# sump_arm_rle n : Arm SUMP2 engine and wait for n seconds #"];
a+=["# sump_stop : Stop the SUMP2 engine #"];
a+=["# sump_status : Display status of SUMP2 engine #"];
a+=["# acquire_single : Arm for Single non-RLE acquisition #"];
a+=["# acquire_continuous : Arm for non-RLE acquisition and loop #"];
a+=["# acquire_stop : Stop any pending arming #"];
a+=["# acquire_rle_1x : Arm for RLE acquisition plus dword data #"];
a+=["# acquire_rle_8x : Arm for RLE acquisition, 8x decimated #"];
a+=["# acquire_rle_64x : Arm for RLE acquisition, 64x decimated #"];
a+=["#####################################################################"];
return a;
###############################################################################
def init_manual( self ):
a = [];
a+=["#####################################################################"];
a+=["# SUMP2 by BlackMesaLabs GNU GPL V2 Open Source License. Python 3.x "];
a+=["# (C) Copyright 2016 Kevin M. Hubbard - All rights reserved. "];
a+=["#####################################################################"];
a+=["1.0 Scope "];
a+=[" This document describes the SUMP2 software and hardware. "];
a+=[" "];
a+=["2.0 Software Architecture "];
a+=[" The SUMP2 application is a Python 3.5 script using the PyGame module"];
a+=[" for mouse and graphical user interface. Communication to hardware is"];
a+=[" via TCP Socket communications to a BD_SERVER.py instance. The SW is "];
a+=[" architected as a GUI wrapper around a command line application with "];
a+=[" a bd_shell interface. When the PyGame GUI is used, mouse menu "];
a+=[" selections create commands that are then interpreted by bd_shell. "];
a+=[" In theory, sump2.py may be executed without PyGame as a command line"];
a+=[" only program to arm the sump2 hardware and then dump captured data "];
a+=[" to a VCD file for offline viewing by another application. "];
a+=[" "];
a+=["3.0 Command Descriptions "];
a+=[" Zoom_In : Increase signal view magnification 2x "];
a+=[" Zoom_Out : Decrease signal view magnification 2x "];
a+=[" Zoom_Full : View all signal samples : WARNING May be slow "];
a+=[" Zoom_Previous : Return to previous zoom view. "];
a+=[" Zoom_to_Cursors : View region bound by cursors "];
a+=[" Crop_to_Cursors : Reduce sample set to region bound by cursors "];
a+=[" Cursors_to_View : Bring both cursors into current view "];
a+=[" Cursor1_to_Here : Bring Cursor1 to mouse pointer "];
a+=[" Cursor2_to_Here : Bring Cursor2 to mouse pointer "];
a+=[" Acquire_Single : Arm hardware for single non-RLE acquisition "];
a+=[" Acquire_Continuous : Arm hardware for looping non-RLE acquisitions "];
a+=[" Acquire_Stop : Issue a stop to hardware from current Arming "];
a+=[" Acquire_RLE_1x : Arm hardware for RLE acquisition no decimation "];
a+=[" Acquire_RLE_8x : Arm hardware for RLE acquisition 8x decimation "];
a+=[" Acquire_RLE_64x : Arm hardware for RLE acquisition 64x decimation"];
a+=[" File_Load : Load a bd_shell script file "];
a+=[" File_Save : Save capture to a VCD,PNG,JPG, etc file "];
a+=[" Save_Rename : Rename the last file saved "];
a+=[" Fonts : Increase or Decrease GUI font size "];
a+=[" BD_SHELL : Close GUI and open a BD_SHELL command line "];
a+=[" "];
a+=[" Rename : Rename a selected signal's nickname "];
a+=[" Insert_Divider : Insert a dummy signal divider "];
a+=[" Clipboard : Cut and Paste selected signals "];
a+=[" Visibility : Change visibility. Impacts RLE Compression "];
a+=[" Trigger_Rising : Set Trigger for Rising edge of selected "];
a+=[" Trigger_Falling : Set Trigger for Falling edge of selected "];
a+=[" Trigger_Watchdog : Set Trigger for Watchdog timeout of selected "];
a+=[" Set_Pattern0 : Advanced Triggering "];
a+=[" Set_Pattern1 : Advanced Triggering "];
a+=[" Clear_Pattern_Match: Advanced Triggering "];
a+=[" Set_Data_Enable : Advanced data sampling "];
a+=[" Clear_Data_Enable : Advanced data sampling "];
a+=[" SUMP_Configuration : Modify advanced SUMP variables "];
a+=[" Acquisition_Length : Configure amount of non-RLE RAM to use "];
a+=[" "];
a+=["4.0 SUMP2 Environment Variables "];
a+=[" bd_connection : Connection type to hardware. tcp only "];
a+=[" bd_protocol : Communication protocol to HW, poke only "];
a+=[" bd_server_ip : IP address or localhost for bd_server "];
a+=[" bd_server_socket : Socket Number for bd_server, 21567 typ"];
a+=[" sump_addr : 32bit PCI address of sump_ctrl_reg "];
a+=[" sump_data_enable : Event bits to use for data_enable feature "];
a+=[" sump_rle_event_en : Event bits to use for RLE capture "];
a+=[" sump_rle_post_trig_len : Max number of post trigger RLE samples "];
a+=[" sump_rle_pre_trig_len : Max number of pre trigger RLE samples "];
a+=[" sump_trigger_delay : Number of clocks to delay trigger "];
a+=[" sump_trigger_field : Event bits to use for trigger "];
a+=[" sump_trigger_nth : nTh trigger to trigger on "];
a+=[" sump_trigger_type : or_rising,or_falling,watchdog,pattern_ris "];
a+=[" sump_user_ctrl : 32bit user_ctrl field "];
a+=[" sump_user_pattern0 : 32bit user pattern0 field "];
a+=[" sump_user_pattern1 : 32bit user pattern1 field "];
a+=[" sump_watchdog_time : Watchdog timeout for Watchdog trigger "];
a+=[" "];
a+=["5.0 SUMP2 Hardware "];
a+=[" The SUMP2 hardware is a single verilog file with fixed input parms "];
a+=[" for the depth and width of capture memory to use. A maximum SUMP2 "];
a+=[" configuration contains a 32bit Block RAM for non-RLE events and a "];
a+=[" 64bit Block RAM for RLE events and time stamps. In addition to 32 "];
a+=[" signal events, SUMP2 may also capture 16 DWORDs (512 bits ) of non "];
a+=[" RLE data. The SUMP2 software automatically adjusts to each instance "];
a+=[" of hardware for memory depth, width and advanced features. A key "];
a+=[" feature for acquiring long captures in time is the ability to mask "];
a+=[" any of the event inputs, which can be used to dramatically reduce "];
a+=[" event occurrence and support capturing only events of interest. The "];
a+=[" software supports masking events by double-clicking the signal name "];
a+=[" prior to arming which hides the signals and masks them from the RLE "];
a+=[" compression. 10x to 1000x compression is possible run-time for some "];
a+=[" designs by dynamically masking input events prior to acquisition. "];
a+=[" --------------- "];
a+=[" events[31:0] -+->| Trigger Logic |------------------------- "];
a+=[" | --------------- ----------------- | "];
a+=[" +---------------------->| RLE Compression | | "];
a+=[" | --------------- | Timestamp and |<-+ "];
a+=[" +->| RLE RAM |<---| Addr Generator | | "];
a+=[" | --------------- ----------------- | "];
a+=[" | --------------- ----------------- | "];
a+=[" ->| non-RLE RAM |<-+-| Addr Generator |<- "];
a+=[" --------------- | ----------------- "];
a+=[" --------------- | "];
a+=[" dwords[511:0] -->| non-RLE RAM |<- "];
a+=[" --------------- "];
a+=[" "];
a+=["6.0 Working with large RLE datasets "];
a+=[" RLE datasets can be overwhelming large to work with in software once"];
a+=[" samples have been decompressed. Compression ratios of 10,000:1 are "];
a+=[" possible for some systems. SUMP Software provides internal tools for"];
a+=[" reducing the hardware captured RLE dataset to more manageable size "];
a+=[" for both viewing and VCD generation. "];
a+=[" crop_to_cursors : Permanently crops the number of samples to a "];
a+=[" region indicated by the cursors. "];
a+=[" RLE Decimation : 8x and 64x decimation specified at arming will "];
a+=[" acquire the RLE data and reduce the sample rate "];
a+=[" by 8x or 64x prior to rendering. "];
a+=[" Signal Hiding : Hiding a signal prior to acquisition will mask "];
a+=[" the signal entirely and increase the overall RLE "];
a+=[" acquisition length. Hiding a signal post acquire "];
a+=[" speeds up rendering time for remaining signals. "];
a+=[" "];
a+=[" 6.1 Bundles "];
a+=[" The following is an example of manually modifying sump2_wave.txt "];
a+=[" file in order to group together multiple events into a bundle. "];
a+=[" /my_cnt -bundle -hex "];
a+=[" /event[12] -nickname event[12] "];
a+=[" /event[13] -nickname event[13] "];
a+=[" /event[14] -nickname event[14] "];
a+=[" /event[15] -nickname event[15] "];
a+=[" "];
a+=["7.0 History "];
a+=[" The original OSH+OSS SUMP was designed in 2007 as an external logic "];
a+=[" logic analyzer using a Xilinx FPGA eval board for capturing external"];
a+=[" electrical signals non compressed to all available FPGA block RAM. "];
a+=[" See http://www.sump.org/projects/analyzer/ "];
a+=[" The original developer published the serial communication protocol "];
a+=[" and also wrote a Java based waveform capture tool. The simplicity of"];
a+=[" the protocol and the quality and maintenance of the Open-Source Java"];
a+=[" client has inspired many new SUMP compliant projects such as: "];
a+=[" 'Open Logic Sniffer' : https://www.sparkfun.com/products/9857 "];
a+=[" "];
a+=[" 7.1 SUMP1-RLE ( 2014 ) "];
a+=[" Black Mesa Labs developed the SUMP1-RLE hardware in 2014 as a "];
a+=[" software protocol compatible SUMP engine that was capable of real "];
a+=[" time hardware compression of samples ( Run Length Encoded ). The "];
a+=[" idea of the project was to leverage the open-source Java software "];
a+=[" and couple it with new hardware IP that was capable of storing deep"];
a+=[" capture acquisitions using only a single FPGA Block RAM, allowing "];
a+=[" SUMP to be used internally with existing FPGA designs rather than "];
a+=[" a standalone device. FPGA vendor closed license logic analyzers all"];
a+=[" store using no compression requiring vast amount of Block RAMS to "];
a+=[" be useful and typically do not fit will within the limited fabric "];
a+=[" resources of an existing FPGA design requiring debugging. SUMP1-RLE"];
a+=[" was later enhanced to include 2 DWORDs of sampled data along with "];
a+=[" the RLE compressed signal events. This enhancement required new "];
a+=[" software which was written in .NET Powershell for Windows platform."];
a+=[" "];
a+=[" 7.2 SUMP2-RLE ( 2016 ) "];
a+=[" SUMP2 is a software and hardware complete redesign to improve upon "];
a+=[" the SUMP1-RLE concept. For SUMP2 the .NET software was tossed due "];
a+=[" to poor user interface performance and replaced with a PyGame based"];
a+=[" VCD waveform viewer ( chip_wave.py also from BML ). The SUMP2 HW "];
a+=[" is now a single Verilog file with no backwards compatibility with "];
a+=[" any legacy SUMP hardware or software systems. SUMP2 hardware is "];
a+=[" designed to capture 512bits of DWORDs and 32bits of events versus "];
a+=[" the SUMP1 limits of 16 event bits and 64bits of DWORDs. Sample "];
a+=[" depth for SUMP2 is now completely defined by a hardware instance "];
a+=[" with software that automatically adapts. The RLE aspect of SUMP2 "];
a+=[" is optional and not required for simple data intensive captures. "];
a+=[" SUMP2 software includes bd_shell support for changing variables "];
a+=[" on the fly and providing simple low level hardware access to regs. "];
a+=[" "];
a+=["8.0 BD_SERVER.py "];
a+=[" The SUMP2.py application does not communicate directly to hardware "];
a+=[" but instead uses BD_SERVER.py as an interface layer. BD_SERVER is "];
a+=[" a multi use server application that accepts requests via TCP to "];
a+=[" read and write to low level hardware and then translates those "];
a+=[" requests using one of many low level hardware protocols available. "];
a+=[" BD_SERVER allows the low level communications to easily change from"];
a+=[" interfaces like USB FTDI serial to PCI without requiring any change"];
a+=[" to the high level application. This interface also supports the "];
a+=[" debugging of an embedded system from a users regular desktop with "];
a+=[" a standard Ethernet or Wifi connection between the two. Typical use"];
a+=[" is to run both python applications on same machine and use the TCP "];
a+=[" localhost feature within the TCP stack for communications. "];
a+=[" "];
a+=[" ------------ -------------- --------------- "];
a+=[" | sump2.py |<------->| bd-server.py |<------->| SUMP Hardware | "];
a+=[" ------------ Ethernet -------------- USB,PCI --------------- "];
a+=[" "];
a+=["9.0 License "];
a+=[" This hardware and software is released under the GNU GPLv2 license. "];
a+=[" Full license is available at http://www.gnu.org "];
a+=[" "];
return a;
###############################################################################
def init_vars( self, file_ini ):
# Load App Variables with Defaults.
vars = {}; # Variable Dictionary
vars["font_name"] = "dejavusansmono";
vars["font_size"] = "12";
vars["file_in"] = "dut.vcd";
vars["file_log"] = "sump2_log.txt";
vars["color_screen_background"] = "000000";
vars["color_screen_foreground"] = "00FF00";
vars["screen_width"] = "800";
vars["screen_height"] = "600";
vars["cursor_unit"] = "clocks";
# vars["cursor_unit"] = "samples";
vars["cursor_mult"] = "1.0";
vars["bd_connection" ] = "tcp";
vars["bd_protocol" ] = "poke";
vars["bd_server_ip" ] = "localhost";
vars["bd_server_socket" ] = "21567";
vars["uut_name" ] = "UUT";
vars["sump_addr" ] = "00000090" ;# Addr of sump2_ctrl_reg
vars["sump_script_inc_filter"] = "*.txt";
vars["sump_script_exc_filter"] = "sump2_*.txt";
vars["sump_trigger_type" ] = "or_rising";
vars["sump_trigger_field" ] = "00000000";
vars["sump_trigger_delay" ] = "0000";
vars["sump_trigger_nth" ] = "0001";
vars["sump_acquisition_len" ] = "44";
vars["sump_rle_event_en" ] = "FFFFFFFF";
vars["sump_rle_pre_trig_len" ] = "00100000";
vars["sump_rle_post_trig_len"] = "00100000";
vars["sump_user_ctrl" ] = "00000000";
vars["sump_user_pattern0" ] = "00000000";
vars["sump_user_pattern1" ] = "00000000";
vars["sump_data_enable" ] = "00000000";
vars["sump_watchdog_time" ] = "00001000";
# vars["sump_rle_undersample"] = "10";
# return;
import os;
if os.path.exists( file_ini ):
file_in = open( file_ini, 'r' );
file_list = file_in.readlines();
file_in.close();
for each in file_list:
each = each.replace("="," = ");
words = each.strip().split() + [None] * 4; # Avoid IndexError
# foo = bar
if ( words[1] == "=" and words[0][0:1] != "#" ):
vars[ words[0] ] = words[2];
else:
print( "Warning: Unable to open " + file_ini);
return vars;
###############################################################################
# Dump all the app variables to ini file when application quits.
def var_dump( self, file_ini ):
log( self, ["var_dump()"] );
file_out = open( file_ini, 'w' );
file_out.write( "# [" + file_ini + "]\n" );
file_out.write( "# WARNING: \n");
file_out.write( "# This file is auto generated on application exit.\n" );
file_out.write( "# Safe to change values, but comments will be lost.\n" );
txt_list = [];
for key in self.vars:
val = self.vars[ key ];
txt_list.append( key + " = " + val + "\n" );
for each in sorted( txt_list ):
file_out.write( each );
file_out.close();
return;
def list2file( self, file_name, my_list ):
file_out = open( file_name, 'w' );
for each in my_list:
file_out.write( each + "\n" );
file_out.close();
return;
def tuplelist2file( self, file_name, my_list ):
file_out = open( file_name, 'w' );
for (dw1,dw2) in my_list:
file_out.write("%08x %08x" % ( dw1,dw2 ) + "\n" );
file_out.close();
return;
###############################################################################
# Command Line BD_SHELL
def bd_shell( self, cmd_start = "" ):
log( self, ["bd_shell()"] );
import pygame;
loop_jk = True;
import msvcrt;# Note: Windows specific
print("\nMode=BD_SHELL : Enter NULL command to return to GUI");
self.context = "cli";
pygame.display.quit();
self.gui_active = False;
print("");
sys.stdout.write( self.prompt );
sys.stdout.write( cmd_start );
sys.stdout.flush();
h_cnt = 1;# Command history count
key_buf = cmd_start;
while ( loop_jk == True ):
ch = msvcrt.getch();# Wait for single key press from DOS-Box
if ( ch != "\xe0" ):
ch = ch.decode();
else:
# K=Left,M=Right,H=Up,P=Down,G=Home,O=End
ch = msvcrt.getch();# The Special KeyCode
print( ch );
ch = "";
# print( ch );
# ch = (msvcrt.getch().decode());# Wait for single key press from DOS-Box
# print( ch );
# Handle Backspace Erase
if ( ch == chr( 8 ) ):
sys.stdout.write( str( ch ) );#
sys.stdout.write( str(" " ) );#
sys.stdout.write( str( ch ) );#
else:
sys.stdout.write( str( ch ) );# Echo typed character to DOS-Box STDOUT
sys.stdout.flush();
# If not <ENTER> key then append keypress to a key_buf string
if ( ch != chr( 13 ) ):
if ( ch == chr( 8 ) ):
if ( len(key_buf) > 0 ):
key_buf = key_buf[:-1];# Subtract last char on Backspace
else:
key_buf += str(ch);# Append new character
elif ( ch == chr( 13 ) ):
if ( len( key_buf ) == 0 or key_buf == "gui" ):
loop_jk = False;
else:
print( ("%d>"+key_buf+" " ) % h_cnt ); h_cnt +=1;
key_buf = key_buf.replace("="," = ");
words = " ".join(key_buf.split()).split(' ') + [None] * 8;
if ( words[1] == "=" ):
cmd = words[1];
parms = [words[0]]+words[2:];
else:
cmd = words[0];
parms = words[1:];
rts = proc_cmd( self, cmd, parms );
for each in rts:
print( each );
key_buf = "";
sys.stdout.write( self.prompt );# "bd>"
sys.stdout.flush();
# while ( loop_jk == True ):
self.context = "gui";
print("\nMode=GUI");
# NOTE: set_mode prevents resizing after return to GUI.
# pygame.display.set_mode();# Set focus back to GUI Window
display_init( self );
pygame.display.update();
flush_surface_cache( self );# Redraw with new values
draw_screen( self );
screen_flip( self );
sump_vars_to_signal_attribs( self );# Assume sump vars were modified
return;
###############################################################################
# Process Backdoor commands for Writing and Reading to any hardware
def proc_bd_cmd( self, cmd, parms ):
log( self, ["proc_bd_cmd() : " + cmd + " " + str( parms ) ] );
rts = [];
file_mode = None;
if ( ">" in parms ):
i = parms.index(">");
file_mode = "w";# Create new file, overwriting existing
if ( ">>" in parms ):
i = parms.index(">>");
file_mode = "a";# Append to any existing file
if ( file_mode != None ):
file_name = parms[i+1];
file_out = open( file_name, file_mode ); # a or w : Append or Overwite
parms = parms[0:i] + [None]*10;# Strip "> foo.txt" prior to processing
# if ( cmd == "w" or cmd == "r" or cmd == "bs" or cmd == "bc" ):
if ( cmd == "w" or cmd == "r" ):
addr = parms[0];
data = parms[1:];
# Address may be a variable, so look
if ( self.vars.get( addr ) != None ):
addr = self.vars[ addr ];
if ( cmd == "w" ):
data_hex = [];
for each in data:
if ( each != None ):
data_hex += [int(each,16)];
self.bd.wr( int(addr,16), data_hex );
if ( cmd == "r" ):
if ( data[0] == None ):
num_dwords = 1;
else:
num_dwords = int( data[0],16 );
rts = self.bd.rd( int(addr,16) , num_dwords, repeat = False );
# data_hex = [];
# for each in rts:
# data_hex += ["%08x" % each];
# rts = data_hex;
# Format 8 dwords wide per line
data_hex = "";
i = 0;
for each in rts:
data_hex += ("%08x " % each );
i += 1;
if ( i == 8 ):
i = 0;
data_hex += "\n";
rts = [ data_hex ];
if ( file_mode != None ):
for each in rts:
file_out.write( each + "\n" );
file_out.close();
rts = [];
return rts;
###############################################################################
# Generate a demo vcd file to display if the hardware isn't present
def make_demo_vcd( self ):
filename_vcd = "sump2_demo.vcd";
txt2vcd = TXT2VCD();# Instantiate Class for the VCD Conversion
# line-0 contains a list of all signal names and ends with clock period
# Iterate the list and replace each signal name with its nickname
new_line = "hsync vsync pixel_r pixel_g pixel_b 10000";
h = 0; v = 597; sample_lines = [ new_line ];
import random;
rnd_list = [0]*1000;
rnd_list += [1];
pixel_r = 0;
pixel_g = 0;
pixel_b = 0;
for i in range( 0, 10000, 1):
h+=1;
hsync = 0;
vsync = 0;
pixel_r = random.choice( rnd_list );
pixel_g = random.choice( rnd_list );
pixel_b = random.choice( rnd_list );
if ( h >= 800 ):
hsync = 1;
if ( h == 810 ):
hsync = 1; h = 0;
v += 1;
if ( v == 600 ):
v = 0;
if ( v == 599 ):
vsync = 1;
sample = "%d %d %d %d %d" % ( hsync,vsync, pixel_r,pixel_g,pixel_b );
sample_lines += [ sample ];
vcd_lines = sample_lines[:];
rts = txt2vcd.conv_txt2vcd( self, vcd_lines );
print("Saving " + filename_vcd );
file_out = open( filename_vcd, "w" ); # Append versus r or w
for each in rts:
file_out.write( each + "\n" );# Make Windows Friendly
file_out.close();
return filename_vcd;
###############################################################################
# Given a file_header ( like foo_ ), check for foo_0000, then foo_0001, etc
# and return 1st available name.
def make_unique_filename( self, file_header, file_ext ):
import os;
num = 0;
while ( True ):
file_name = file_header + ( "%04d" % num ) + file_ext;
if ( os.path.exists( file_name ) == False ):
return file_name;
else:
num +=1;
return None;
###############################################################################
# Read in a file and display it
def more_file( self, parms ):
log( self, ["more_file() " + str( parms ) ] );
file_name = parms[0];
rts = [];
try: # Read Input File
file_in = open( file_name , "r" );
file_lines = file_in.readlines();
file_in.close();
# rts = file_lines;
rts = list(map(str.strip, file_lines));# Chomp all the lines
except:
print( "ERROR Input File: "+file_name);
return;
return rts;
###############################################################################
# interpret a bd_shell script or wave file
# a wave file is easy to spot as 1st char on each line is a "/"
def source_file( self, parms ):
log( self, ["source_file() " + str( parms ) ] );
file_name = parms[0];
rts = [];
try: # Read Input File
file_in = open( file_name , "r" );
file_lines = file_in.readlines();
file_in.close();
except:
print( "ERROR Input File: "+file_name);
return;
is_wave = False;
for each in file_lines:
words = " ".join(each.split()).split(' ') + [None] * 20;
if ( words[0][0:1] == "/" ):
is_wave = True;
if ( is_wave == True ):
load_format( self, file_name );
self.name_surface_valid = False;
screen_refresh( self );
else:
for each in file_lines:
each = each.replace("="," = ");
words = " ".join(each.split()).split(' ') + [None] * 20;
if ( words[0][0:1] != "#" ):
if ( words[1] == "=" ):
cmd = words[1];
parms = [words[0]]+words[2:];
else:
cmd = words[0];
parms = words[1:];
rts += proc_cmd( self, cmd, parms );
return rts;
###############################################################################
# Process generic unknown commands ( GUI,Shell,Backdoor, SUMP )
def proc_cmd( self, cmd, parms ):
log( self, ["proc_cmd() " + cmd + " " + str( parms )] );
# print( cmd, parms );
rts = [];
if ( cmd == None ):
return rts;
cmd = cmd.lower();
# !! retrieves last command
if ( cmd == "!!" ):
cmd = self.last_cmd;
else:
self.last_cmd = cmd;
self.cmd_history.append([ cmd, parms ] );
if ( cmd[0:1] == "!" ):
try:
h_num = cmd[1:];
( cmd, parms ) = self.cmd_history[ int(h_num,10) ];
except:
print("Invalid Command History");
# print "proc_cmd()", cmd;
# Commands may have aliases, look them up here:
if ( self.cmd_alias_hash_dict.get( cmd ) != None ):
cmd = self.cmd_alias_hash_dict[ cmd ];
# Returned all assigned variables with their values
if ( cmd == "env" ):
for key in self.vars:
rts += [ key +"=" + self.vars[key] ];
return sorted(rts);
elif ( cmd == "=" ):
self.vars[parms[0]] = parms[1]; # Variable Assignment
elif ( cmd == "var_bs" ):
val = int( self.vars[parms[0]] , 16 );
val = val | int( parms[1], 16 );
self.vars[parms[0]] = ("%08x" % val );
elif ( cmd == "var_bc" ):
val = int( self.vars[parms[0]] , 16 );
val = val & ~int( parms[1], 16 );
self.vars[parms[0]] = ("%08x" % val );
elif ( cmd == "echo" or cmd == "print" ):
try:
rts = [ self.vars[ parms[0] ] ];
except:
rts = [ parms[0] ];
elif ( cmd == "h" or cmd == "history" ):
rts = self.cmd_history;
# rts for ( i , sig_obj ) in enumerate( self.signal_list ):
elif ( cmd == "source" ):
rts = source_file( self, parms );
elif ( cmd == "more" ):
rts = more_file( self, parms );
elif ( cmd == "help" or cmd == "?" ):
rts = self.help;# I'm a funny guy
elif ( cmd == "manual" ):
try:
import os;
filename = "sump2_manual.txt";
if ( self.os_sys == "Linux" ):
os.system('vi ' + filename );
else:
os.system('notepad.exe ' + filename );
except:
rts += ["ERROR: "+cmd+" "+filename ];
elif ( cmd == "bd_shell" ):
bd_shell(self, cmd_start ="" );
elif ( cmd == "quit" or cmd == "exit" ):
self.done=True;
shutdown( self );
elif ( cmd == "sump_connect" ):
sump_connect(self);
elif ( "[" in cmd and
"-" in cmd and
"t" in cmd and
"]" in cmd ):
words = cmd.split("t");
pre_trig = words[0].count("-");
post_trig = words[1].count("-");
acq_len = ( pre_trig << 4 ) + ( post_trig << 0 );
self.vars["sump_acquisition_len"] = ( "%02x" % acq_len );
print( "sump_acquisition_len = " + ( "%02x" % acq_len ));
elif ( cmd == "sump_arm" or
cmd == "sump_arm_rle" or
cmd == "sump_stop" or
cmd == "acquire_single" or
cmd == "acquire_normal" or
cmd == "acquire_continuous" or
"acquire_rle" in cmd or
cmd == "acquire_stop" ):
if ( cmd == "sump_arm" or
cmd == "sump_arm_rle" or
cmd == "acquire_single" or
cmd == "acquire_normal" or
cmd == "acquire_continuous" or
"acquire_rle" in cmd ):
sump_arm(self, True );# Arm the hardware
if ( "acquire_rle" in cmd ):
self.acq_mode = "rle";
else:
self.acq_mode = "nonrle";
else:
sump_arm(self, False);# Cancel an acq in progress
if ( cmd == "acquire_normal" ):
cmd = "acquire_single";
self.acq_state = cmd;
# if sump_arm has a parm then this is CLI and is a seconds timeout
if ( ( cmd=="sump_arm" or cmd=="sump_arm_rle" ) and parms[0] != None ):
timeout = int( parms[0], 16 );
# Loop until timeout or acquired bit is set
while ( timeout > 0 and
( self.sump.rd( addr = None )[0] &
self.sump.status_ram_post ) == 0x00 ):
print("Waiting for trigger..");
sleep( 1 );
timeout = timeout - 1;
if ( timeout > 0 ):
print("ACQUIRED.");
if ( self.acq_mode == "nonrle" ):
trig_i = sump_dump_data(self);# Grab data from hardware
else:
trig_i = sump_dump_rle_data(self);# Grab data from hardware
# Group of OS commands pwd,mkdir,cd,ls,cp,vi
elif ( cmd == "pwd" ):
import os;
rts += [ os.getcwd() ];
elif ( cmd == "mkdir" ):
import os;
try:
os.path.mkdir();
except:
rts += ["ERROR: "+cmd+" "+parms[0] ];
elif ( cmd == "cd" ):
import os;
try:
os.chdir( parms[0] );
except:
rts += ["ERROR: "+cmd+" "+parms[0] ];
elif ( cmd == "ls" ):
import os;
rts += os.listdir( os.getcwd() );
# rts += os.listdir( "*.ini" );
elif ( cmd == "vi" ):
try:
if ( self.os_sys == "Linux" ):
os.system('vi ' + parms[0] );
else:
os.system('notepad.exe ' + parms[0] );
except:
rts += ["ERROR: "+cmd+" "+parms[0] ];
elif ( cmd == "cp" ):
from shutil import copyfile;
try:
copyfile( parms[0], parms[1] );
except:
rts += ["ERROR: "+cmd+" "+parms[0]+" "+parms[1] ];
# elif ( cmd == "sump_dump" ):
# sump_dump_data(self);
# sump_save_txt(self);
# sump_save_txt(self, mode_vcd = True );
# sump_save_vcd( self );
# txt2vcd = TXT2VCD();
# file_in = open( "sump_dump.txt4vcd", "r" );
# file_lines = file_in.readlines();
# file_in.close();
# rts = txt2vcd.conv_txt2vcd( file_lines );
# filename = make_unique_filename( self, "sump_", ".vcd" );
# file_out = open( filename, "w" ); # Append versus r or w
# for each in rts:
# file_out.write( each + "\r\n" );# Make Windows Friendly
# file_out.close();
# file_name = "sump_dump.txt4vcd";
# else:
# file_name = "sump_dump.txt";
elif ( cmd == "save_txt" ):
filename = make_unique_filename( self, "sump2_", ".txt" );
sump_save_txt( self, filename );
elif ( cmd == "save_rename" ):
val1 = self.last_filesave;
val2 = val1;
if ( val1 != None ):
rts = draw_popup_entry(self, ["Save_Rename()", val1],val2);
import os;
try:
os.rename( val1, rts );
draw_header( self,"Save_Rename() : " + val1 + " " + rts );
except:
draw_header( self,"ERROR: Save_Rename() : " + val1 + " " + rts );
# elif ( cmd == "save_vcd" ):
elif ( cmd == "save_vcd" and self.acq_mode == "nonrle" ):
print("save_vcd()");
screen_flip( self );# Only thing changing is the popup selection
# sump_dump_data(self);# Grab data from hardware ( might be in CLI Mode )
filename_txt = make_unique_filename( self, "sump2_", ".txt" );
filename_vcd = make_unique_filename( self, "sump2_", ".vcd" );
draw_popup_msg(self,
["NOTE:","Saving capture to VCD file "+filename_vcd],1);
sump_save_txt(self, filename_txt, mode_vcd = True );
txt2vcd = TXT2VCD();# Instantiate Class for the VCD Conversion
file_in = open( filename_txt, "r" );
file_lines = file_in.readlines();
file_in.close();
# line-0 contains a list of all signal names and ends with clock period
# Iterate the list and replace each signal name with its nickname
words = " ".join(file_lines[0].split()).split(' ');
new_line = "";
for each in words:
nickname = each;# Handles both unknowns and clock period
for sig_obj in self.signal_list:
if ( each == sig_obj.name ):
nickname = sig_obj.nickname;
if ( nickname == "" ):
nickname = each;
new_line += nickname + " ";
vcd_lines = [new_line] + file_lines[1:];
print("conv_txt2vcd()");
rts = txt2vcd.conv_txt2vcd( self, vcd_lines );
# rts = txt2vcd.conv_txt2vcd( vcd_lines );
print("Saving " + filename_vcd );
draw_header( self,"save_vcd() : Saving " + filename_vcd );
file_out = open( filename_vcd, "w" ); # Append versus r or w
for each in rts:
file_out.write( each + "\n" );# Make Windows Friendly
file_out.close();
draw_header( self,"save_vcd() : Saved " + filename_vcd );
self.last_filesave = filename_vcd;
rts = ["save_vcd() Complete " + filename_vcd ];
elif ( cmd == "save_vcd" and self.acq_mode == "rle" ):
print("save_rle_vcd()");
screen_flip( self );# Only thing changing is the popup selection
# if ( self.mode_cli == True ):
# sump_dump_rle_data(self);# Grab data from hardware
filename_txt = make_unique_filename( self, "sump2_rle_", ".txt" );
filename_vcd = make_unique_filename( self, "sump2_rle_", ".vcd" );
draw_popup_msg(self,
["NOTE:","Saving capture to VCD file "+filename_vcd],1);
sump_save_txt(self, filename_txt, mode_vcd = True );
txt2vcd = TXT2VCD();# Instantiate Class for the VCD Conversion
file_in = open( filename_txt, "r" );
file_lines = file_in.readlines();
file_in.close();
# line-0 contains a list of all signal names and ends with clock period
# Iterate the list and replace each signal name with its nickname
words = " ".join(file_lines[0].split()).split(' ');
new_line = "";
for each in words:
nickname = each;# Handles both unknowns and clock period
for sig_obj in self.signal_list:
if ( each == sig_obj.name ):
nickname = sig_obj.nickname;
new_line += nickname + " ";
vcd_lines = [new_line] + file_lines[1:];
print("conv_txt2vcd()");
rts = txt2vcd.conv_txt2vcd( self, vcd_lines );
# rts = txt2vcd.conv_txt2vcd( vcd_lines );
print("Saving " + filename_vcd );
draw_header( self,"save_rle_vcd() : Saving " + filename_vcd );
file_out = open( filename_vcd, "w" ); # Append versus r or w
for each in rts:
file_out.write( each + "\n" );# Make Windows Friendly
file_out.close();
draw_header( self,"save_rle_vcd() : Saved " + filename_vcd );
self.last_filesave = filename_vcd;
rts = ["save_rle_vcd() Complete " + filename_vcd ];
elif ( cmd == "sump_status" ):
rts_hex = ( self.sump.rd( addr = None )[0] );
rts += [ "%08x" % rts_hex ];
# elif ( cmd == "w" or cmd == "r" or cmd == "bs" or cmd == "bc" ):
elif ( cmd == "w" or cmd == "r" ):
rts = proc_bd_cmd(self, cmd, parms );
elif ( cmd == "sleep" or cmd == "sleep_ms" ):
duration = float(int( parms[0], 16 ));
if ( cmd == "sleep_ms" ):
duration = duration / 1000.0;
sleep( duration );
# elif ( cmd == "debug_vars" ):
# debug_vars( self );
# elif ( cmd == "scroll_toggle" ):
# self.scroll_togl *= -1;
# if ( self.scroll_togl == 1 ):
# print( "Scroll Wheel is Pan");
# else:
# print( "Scroll Wheel is Zoom");
elif ( cmd == "reload" ):
proc_cmd( self, "save_format", ["wave_autosave.do"] );
self.signal_list = [];
file2signal_list( self, self.file_name );
flush_surface_cache( self );
proc_cmd( self, "load_format", ["wave_autosave.do"] );
elif ( cmd == "load" ):
self.file_name = parms[0];
proc_cmd( self, "reload", [""] );
proc_cmd( self, "load_format", [""] );
elif ( cmd == "save_jpg" ):
screen_erase( self );
draw_screen( self );
screen_flip( self );
filename = make_unique_filename( self, "sump2_", ".jpg" );
self.pygame.image.save( self.screen, filename );
draw_header( self,"save_jpg() : Saved " + filename );
self.last_filesave = filename;
elif ( cmd == "save_bmp" ):
screen_erase( self );
draw_screen( self );
screen_flip( self );
filename = make_unique_filename( self, "sump2_", ".bmp" );
self.pygame.image.save( self.screen, filename );
draw_header( self,"save_bmp() : Saved " + filename );
self.last_filesave = filename;
elif ( cmd == "save_png" ):
screen_erase( self );
draw_screen( self );
screen_flip( self );
filename = make_unique_filename( self, "sump2_", ".png" );
self.pygame.image.save( self.screen, filename );
draw_header( self,"save_png() : Saved " + filename );
self.last_filesave = filename;
elif ( cmd == "font_larger" or cmd == "font_smaller" ):
size = int( self.vars["font_size"] );
if ( cmd == "font_larger" ):
size += 2;
else:
size -= 2;
if ( size < 2 ):
size = 2;
self.vars["font_size"] = str( size );
self.font = get_font( self, self.vars["font_name"],self.vars["font_size"]);
self.max_w = 0;
self.max_w_chars = 0;
flush_surface_cache( self );
elif ( cmd == "add_wave" ):
sig_obj = add_wave( self, [ cmd ] + parms );
if ( sig_obj != None ):
self.signal_list.append( sig_obj );
flush_surface_cache( self );
elif ( cmd == "save_format" ):
file_name = parms[0];
if ( file_name == "" ):
# file_name = "wave_" + self.top_module + ".txt";# Default
file_name = "sump2_wave.txt";
save_format( self, file_name, False );
elif ( cmd == "save_selected" ):
file_name = parms[0];
if ( file_name == "" ):
file_name = "wave_" + self.top_module + ".txt";# Default
save_format( self, file_name, True );
load_format( self, file_name );
flush_surface_cache( self );
elif ( cmd == "load_format" ):
file_name = parms[0];
if ( file_name == "" ):
file_name = "wave_" + self.top_module + ".txt";# Default
load_format( self, file_name );
flush_surface_cache( self );
# Check for "SUMP_Configuration" menu items and launch entry popup
elif ( cmd == "sump_trigger_delay" or
cmd == "sump_trigger_nth" or
cmd == "sump_user_ctrl" or
cmd == "sump_user_pattern0" or
cmd == "sump_user_pattern1" or
cmd == "sump_watchdog_time"
):
name = cmd;
val1 = self.vars[ name ];# Original Value
val2 = val1; # New Value to change
rts = draw_popup_entry(self, [cmd, val1],val2);
self.vars[ name ] = rts;
elif ( cmd == "edit_format" ):
import os, subprocess, platform;
file_name = parms[0];
if ( file_name == "" ):
file_name = "wave_" + self.top_module + ".txt";# Default
editor = os.getenv('EDITOR', 'vi')
if ( platform.system() == "Windows" ):
editor = "notepad.exe";
subprocess.call('%s %s' % (editor, file_name), shell=True)
if ( platform.system() == "Windows" ):
self.pygame.event.clear();# Required for Windows
load_format( self, file_name );
flush_surface_cache( self );
elif ( cmd == "delete_format" ):
file_name = parms[0];
if ( file_name == "" ):
file_name = "wave_" + self.top_module + ".txt";# Default
import os;
print( "delete_format() ", file_name);
os.remove( file_name );
self.signal_list = [];
file2signal_list( self, self.file_name );
flush_surface_cache( self );
elif ( cmd == "search" or cmd == "backsearch" ):
if ( cmd == "search" ):
direction = +1;
else:
direction = -1;
# "/" : Search on last search value
# Optionally support "/ foo = bar" and convert to "/ foo bar"
if ( parms[1] == "=" ):
parms[1] = parms[2];
if ( parms[0] == None ):
value = self.last_search_value;
# "/ foo = bar" : Search for foo = bar
elif ( parms[1] != None ):
for each in self.signal_list:
if ( each.name.lower() == parms[0].lower() ):
self.sig_obj_sel = each;
for sig_obj in self.signal_list:
sig_obj.selected = False;# DeSelect All
self.sig_obj_sel.selected = True;
value = parms[1].lower();
break;
# "/ bar" : Search for self.sig_obj_sel = bar
else:
value = parms[0].lower();
self.last_search_value = value; # Support "/<enter>" to search again
self.sample_start = search_values( self, self.sig_obj_sel,
self.sample_start, value, direction );
elif ( cmd == "zoom_out" ):
# if ( self.zoom_x > 0.00001 ):
# print( self.popup_x );
# if ( self.popup_x != None ):
# sample = self.sample_start - sample_room // 4;
if ( True ):
self.prev_sample_start = self.sample_start;
self.prev_sample_stop = self.sample_start + self.sample_room;
sample_room = self.sample_room * 2;
sample = self.sample_start - sample_room // 4;
if ( ( sample + sample_room ) < self.max_samples ):
if ( sample < 0 ): sample = 0;
self.sample_start = sample;
set_zoom_x( self, self.zoom_x / 2.0 );
else:
proc_cmd( self, "zoom_full", [] );
elif ( cmd == "zoom_in" ):
self.prev_sample_start = self.sample_start;
self.prev_sample_stop = self.sample_start + self.sample_room;
# If called from popup, center the zoom on mouse position of popup
# print( self.popup_x );
if ( self.popup_x == None ):
# (sample, Null) = get_sample_at_mouse( self, self.popup_x, self.popup_y );
# sample_room = self.sample_room // 2; # zoom_in results in 1/2 sample_room
# sample = sample - sample_room // 2; # Center on select by sub 1/2
# if ( sample < 0 ):
# sample = 0;
# self.sample_start = sample;
self.sample_start += ( self.sample_room // 4 );
else:
(sample, Null) = get_sample_at_mouse( self, self.popup_x, self.popup_y );
delta = sample - self.sample_start;
delta = delta // 2;
self.sample_start = sample - delta;
if ( self.sample_start < 0 ):
self.sample_start = 0;
# sample = sample - sample_room // 2; # Center on select by sub 1/2
# if ( sample < 0 ):
# sample = 0;
# self.sample_start = sample;
set_zoom_x( self, self.zoom_x * 2.0 );
elif ( cmd == "zoom_previous" ):
if ( self.prev_sample_start != None and
self.prev_sample_stop != None ):
proc_cmd( self, "zoom_to", [str(self.prev_sample_start),
str(self.prev_sample_stop ) ] );
elif ( cmd == "zoom_to_cursors" ):
self.prev_sample_start = self.sample_start;
self.prev_sample_stop = self.sample_start + self.sample_room;
sample_left = None;
sample_right = None;
for cur_obj in self.cursor_list:
if ( sample_left == None ): sample_left = cur_obj.sample;
elif ( sample_right == None ): sample_right = cur_obj.sample;
if ( sample_left != None and sample_right != None ):
if ( sample_left > sample_right ):
sample_left, sample_right = sample_right, sample_left;# Swap
# Now fudge a bit as we want to actually see the cursors after to zoom
delta = sample_right - sample_left;
# If delta is large, use a small percentage, otherwise use a bunch of
# samples. Example is after triggering, cursors are at +/-1 from trigger
if ( delta > 20 ):
sample_left -= delta // 32;
sample_right += delta // 32;
else:
sample_left -= 4*delta;
sample_right += 4*delta;
if ( sample_left < 0 ): sample_left = 0;
if ( sample_right > self.max_samples ): sample_right = self.max_samples;
proc_cmd( self, "zoom_to", [str(sample_left), str( sample_right ) ] );
elif ( cmd == "crop_to_cursors" ):
sample_left = None;
sample_right = None;
for cur_obj in self.cursor_list:
if ( sample_left == None ): sample_left = cur_obj.sample;
elif ( sample_right == None ): sample_right = cur_obj.sample;
if ( sample_left != None and sample_right != None ):
if ( sample_left > sample_right ):
sample_left, sample_right = sample_right, sample_left;# Swap
if ( sample_left < 0 ): sample_left = 0;
if ( sample_right > self.max_samples ): sample_right = self.max_samples;
proc_cmd( self, "crop_to", [str(sample_left), str( sample_right ) ] );
elif ( cmd == "zoom_full" ):
proc_cmd( self, "zoom_to", ["0", str( self.max_samples ) ] );
elif ( cmd == "crop_to" ):
# If a sample range is specified, zoom to it
if ( parms[0] != None and parms[1] != None ):
if ( int( parms[0] ) < int( parms[1] ) ):
crop_to_left = int( parms[0] );
crop_to_right = int( parms[1] );
else:
crop_to_left = int( parms[1] );
crop_to_right = int( parms[0] );
for sig_obj in self.signal_list:
# print( sig_obj.name );
# print( len( sig_obj.values ));
if ( len( sig_obj.values ) >= crop_to_right ):
sig_obj.values = sig_obj.values[crop_to_left:crop_to_right];
recalc_max_samples( self );
proc_cmd( self, "zoom_full", [] );
elif ( cmd == "zoom_to" ):
# If a sample range is specified, zoom to it
if ( parms[0] != None and parms[1] != None ):
if ( int( parms[0] ) < int( parms[1] ) ):
self.zoom_to_left = int( parms[0] );
self.zoom_to_right = int( parms[1] );
else:
self.zoom_to_left = int( parms[1] );
self.zoom_to_right = int( parms[0] );
# Otherwise, zoom in so that current selectec signal is visible
else:
sig_obj = self.sig_obj_sel;
if ( sig_obj.bits_total > 1 ):
# nibs = sig_obj.bits_total / 4;
# nibs = nibs / 2;
nibs = sig_obj.bits_total // 4;
nibs = nibs // 2;
if ( nibs < 2 ):
nibs = 2;
nibs += 1; # Extra whitespace
zoom_x = self.txt_width * nibs;
value_width_x = self.sig_value_stop_x - self.sig_value_start_x;
# value_width_samples = value_width_x / zoom_x;
value_width_samples = int( value_width_x / zoom_x );
self.zoom_to_left = self.sample_start;
self.zoom_to_right = self.sample_start + value_width_samples;
self.sample_start = int( self.zoom_to_left );
# Given the zoom_to region, calculate new zoom_x, it is pixels/samples
# fudge_more_right = 3; # Need to grab more samples then calculated, strang
fudge_more_right = 0; # Need to grab more samples then calculated, strang
# set_zoom_x( self, ( self.sig_value_stop_x - self.sig_value_start_x ) / \
# ( fudge_more_right+self.zoom_to_right - self.zoom_to_left ) );
# Check for divide by zero and set new zoom if safe to, else ignore
if ( ( self.zoom_to_right - self.zoom_to_left ) != 0 ):
set_zoom_x( self,
( 1.0*(self.sig_value_stop_x - self.sig_value_start_x )) / \
( 1.0*( self.zoom_to_right - self.zoom_to_left )) );
else:
print("ERROR: Div-by-zero attempt on set_zoom_x()");
elif ( cmd == "scroll_right" or cmd == "scroll_left" ):
# print "cmd", cmd;
if ( cmd == "scroll_right" ):
direction = 0+int( parms[0] );
else:
direction = 0-int( parms[0] );
self.sample_start += int( direction );
# Prevent scrolling too far to right
if ( self.sample_start + self.sample_room > self.max_samples ):
self.sample_start = int( self.max_samples - self.sample_room );
if ( self.sample_start < 0 ):
self.sample_start = 0;
elif ( cmd == "scroll_up" or cmd == "scroll_down" ):
# Scroll thru the selected signal names. When at the top or bottom of
# the visible window, scroll the window.
self.name_surface_valid = False;
# self.curval_surface_valid = False;
index = 1;# Default if none found
if ( self.sig_obj_sel != None ):
if ( self.sig_obj_sel.selected == True ):
index = self.signal_list.index( self.sig_obj_sel );
self.sig_obj_sel.selected = False; # Deselect last scroll selected
if ( cmd == "scroll_up" ):
direction = 0-int( parms[0] );
else:
direction = 0+int( parms[0] );
# Keep moving in the desired direction until we get a visible signal
obj_is_visible = False;
while ( obj_is_visible == False ):
# Make sure new index is valid
index = index + direction;
if ( index < 0 ):
index = 0;
break;
if ( index >= len( self.signal_list ) ):
index = len( self.signal_list ) -1;
break;
obj_is_visible = self.signal_list[ index ].visible;
# Scroll the signal name viewport if newly selected is outside existing
self.vertical_scrolled_offscreen = False;
if ( index < self.sig_top ):
self.sig_top -= 1;
self.vertical_scrolled_offscreen = True;
flush_surface_cache( self );
if ( index > self.sig_bot ):
self.sig_top += 1;
self.vertical_scrolled_offscreen = True;
flush_surface_cache( self );
# Assign selected signal object to sig_obj_sel
sig_obj = self.signal_list[ index ];
sig_obj.selected = True;
self.sig_obj_sel = sig_obj;
# Rename a signal - popup bd_shell for text entry
# TODO: Would be nicer to have a GUI entry window for single bd_shell cmds
elif ( cmd == "rename" ):
# cmd_start = "rename_signal " + self.sig_obj_sel.name + " ";
# bd_shell(self, cmd_start );
cmd = "Rename_Signal";
val1 = self.sig_obj_sel.name;
val2 = self.sig_obj_sel.nickname;
rts = draw_popup_entry(self, [cmd, val1],val2);
self.sig_obj_sel.nickname = rts;
self.name_surface_valid = False;
# flush_surface_cache( self );# Redraw with new values
# proc_cmd( self, cmd, parms ):
elif ( cmd == "rename_signal" ):
if ( parms[1] != "" ):
for sig_obj in self.signal_list:
if ( sig_obj.name == parms[0] ):
sig_obj.nickname = parms[1];
# self.txt_entry = True; # Enable Dialog Box to show up
#
# # Rename a signal
# elif ( cmd == "rename" ):
# self.txt_entry = True; # Enable Dialog Box to show up
# # Rename a signal ( Process the Text Entry )
# elif ( cmd == "rename_signal" ):
# if ( self.sig_obj_sel.bits_total == 0 ):
# self.sig_obj_sel.name = parms[0]; # A Divider
# else:
# self.sig_obj_sel.nickname = parms[0]; # A Signal
# flush_surface_cache( self );
# Delete selected signal(s) ( Make Invisible )
elif ( cmd == "delete" ):
flush_surface_cache( self );
for sig_obj in self.signal_list:
if ( sig_obj.selected == True ):
index = self.signal_list.index( sig_obj );
self.signal_list[ index ].visible = False;
# del self.signal_list[ index ];
self.sig_obj_sel = None;
# print "deleting ", self.signal_list[ index ].name, str( index );
# Delete selected signal(s)
elif ( cmd == "cut" ):
flush_surface_cache( self );
self.clipboard = [];
for sig_obj in self.signal_list:
if ( sig_obj.selected == True ):
index = self.signal_list.index( sig_obj );
self.clipboard.append( self.signal_list.pop( index ) );
elif ( cmd == "paste" ):
flush_surface_cache( self );
for sig_obj in self.signal_list:
if ( sig_obj.selected == True ):
index = self.signal_list.index( sig_obj );
for each in reversed( self.clipboard ):
self.signal_list.insert( index, each );
break;
# Make all signals visible - only way to undo a Make_Invisible
elif ( cmd == "make_all_visible" ):
flush_surface_cache( self );
for sig_obj in self.signal_list:
sig_obj.visible = True;
sump_signals_to_vars( self );# Update sump variables
# Hide a signal at this mouse location
elif ( cmd == "make_invisible" ):
flush_surface_cache( self );
for sig_obj in self.signal_list:
if ( sig_obj.selected == True ):
sig_obj.visible = False;
sump_signals_to_vars( self );# Update sump variables
# Hide a selected signal. Note that hidden and invisible are different
# hidden means display the signal name, but hide the signal values.
# invisible means don't display at all ( kinda like delete ).
elif ( cmd == "hide" or cmd == "hide_all" ):
flush_surface_cache( self );
for sig_obj in self.signal_list:
if ( sig_obj.selected == True or cmd == "hide_all" ):
sig_obj.hidden = True;
sump_signals_to_vars( self );# Update sump variables
screen_refresh( self );
# Show a selected signal
elif ( cmd == "show" or cmd == "show_all" ):
flush_surface_cache( self );
for sig_obj in self.signal_list:
if ( sig_obj.selected == True or cmd == "show_all" ):
sig_obj.hidden = False;
sig_obj.visible = True;
sump_signals_to_vars( self );# Update sump variables
screen_refresh( self );
# When "Trigger_Rising" or "Trigger_Falling" is selected, set the bit
# in the sump variable and then update the signals to match.
# HERE4
elif ( cmd == "trigger_rising" or cmd == "trigger_falling" or
cmd == "trigger_watchdog" ):
print("Setting new trigger");
# Find which signal is selected
for sig_obj in self.signal_list:
sig_obj.trigger = 0;
if ( sig_obj.selected == True ):
for i in range( 0, 32 , 1):
if ( sig_obj.name == ( "event[%d]" % i ) ):
if ( cmd == "trigger_rising" ):
self.vars["sump_trigger_type"] = "or_rising";
if ( cmd == "trigger_falling" ):
self.vars["sump_trigger_type"] = "or_falling";
if ( cmd == "trigger_watchdog" ):
self.vars["sump_trigger_type"] = "watchdog";
self.vars["sump_trigger_field" ] = ("%08x" % (1<<i) );
sump_vars_to_signal_attribs( self );
# flush_surface_cache( self );
self.name_surface_valid = False;
screen_refresh( self );
# HERE5
elif ( cmd == "set_pattern_0" or cmd == "set_pattern_1" or \
cmd == "clear_pattern_match" ):
# Find which signal is selected
user_pattern0 = int( self.vars["sump_user_pattern0" ],16 );# Mask
user_pattern1 = int( self.vars["sump_user_pattern1" ],16 );# Value
for sig_obj in self.signal_list:
if ( sig_obj.selected == True ):
for i in range( 0, 32 , 1):
if ( sig_obj.name == ( "event[%d]" % i ) ):
if ( cmd == "clear_pattern_match" ):
user_pattern0 = user_pattern0 & ~( 1<<i );# Clear bit
user_pattern1 = user_pattern1 & ~( 1<<i );# Clear bit
else:
user_pattern0 = user_pattern0 | ( 1<<i );# Set bit
if ( cmd == "set_pattern_0" ):
user_pattern1 = user_pattern1 & ~( 1<<i );# Clear bit
self.vars["sump_trigger_type"] = "pattern_rising";
if ( cmd == "set_pattern_1" ):
user_pattern1 = user_pattern1 | ( 1<<i );# Set bit
self.vars["sump_trigger_type"] = "pattern_rising";
self.vars["sump_user_pattern0" ] = ("%08x" % user_pattern0 );
self.vars["sump_user_pattern1" ] = ("%08x" % user_pattern1 );
sump_vars_to_signal_attribs( self );
flush_surface_cache( self );
elif ( cmd == "set_data_enable" or cmd == "clear_data_enable" ):
data_en = int( self.vars["sump_data_enable" ],16 );
for sig_obj in self.signal_list:
if ( sig_obj.selected == True ):
for i in range( 0, 32 , 1):
if ( sig_obj.name == ( "event[%d]" % i ) ):
if ( cmd == "set_data_enable" ):
data_en = data_en | ( 1<<i );# Set bit
elif ( cmd == "clear_data_enable" ):
data_en = data_en & ~( 1<<i );# Clear bit
self.vars["sump_data_enable" ] = ("%08x" % data_en );
sump_vars_to_signal_attribs( self );
flush_surface_cache( self );
# sig_obj = get_sig_obj_by_name( self, ("event[%d]" % i ) );
# flush_surface_cache( self );
# for sig_obj in self.signal_list:
# sig_obj.trigger = 0;
# if ( sig_obj.selected == True ):
# if ( cmd == "trigger_rising" ):
# sig_obj.trigger = +1;
# elif ( cmd == "trigger_falling" ):
# sig_obj.trigger = -1;
# Make a signal Signed, Unsigned or Hex format
elif ( cmd == "signed" or cmd == "unsigned" or cmd == "hex" ):
flush_surface_cache( self );
for sig_obj in self.signal_list:
if ( sig_obj.selected == True ):
sig_obj.format = cmd.lower();# unsigned, signed or hex
# Insert a Divider at this mouse location
elif ( cmd == "insert_divider" ):
flush_surface_cache( self );
(Null,index) = get_sample_at_mouse( self, self.popup_x, self.popup_y );
try:
sig_obj = self.signal_list[ index ];
new_div = signal( name="--------" );
new_div.bits_per_line = 0;
new_div.bits_total = 0;
new_div.bit_top = 0;
new_div.bit_bot = 0;
new_div.format = "";
self.signal_list.insert( index, new_div );
except:
print("ERROR 5519:index = " + str( index ) );
# Expand : Iterate list and make visible signals under current hier level
elif ( cmd == "expand" ):
flush_surface_cache( self );
if ( self.sig_obj_sel.collapsable == True ):
proc_cmd(self, "collapse",[""] );
return;
found_jk = False;
hier_level = -1; # Keeps track of group nesting
for ( i , sig_obj ) in enumerate( self.signal_list ):
if ( found_jk == True ):
if ( sig_obj.hier_level <= hier_level ):
found_jk = False;# Found the endgroup so done
break;
if ( sig_obj.type != "endgroup" ):
sig_obj.visible = True;# Make all signals visible after expand
if ( sig_obj.collapsable == True or \
sig_obj.expandable == True ):
sig_obj.collapsable = True;
sig_obj.expandable = False;
if ( sig_obj == self.sig_obj_sel ):
found_jk = True; # Found our specified divider
sig_obj.collapsable = True;
sig_obj.expandable = False;
hier_level = sig_obj.hier_level;
# Collapse : Iterate list and hide signals under current hier level
elif ( cmd == "collapse" ):
flush_surface_cache( self );
found_jk = False;
hier_level = -1; # Keeps track of group nesting
for ( i , sig_obj ) in enumerate( self.signal_list ):
if ( found_jk == True ):
if ( sig_obj.hier_level <= hier_level ):
found_jk = False;# Found the endgroup so done
break;
if ( sig_obj.type != "endgroup" ):
sig_obj.visible = False;# Make all signals invisible after expand
if ( sig_obj.collapsable == True or \
sig_obj.expandable == True ):
sig_obj.collapsable = False;
sig_obj.expandable = True;
if ( sig_obj == self.sig_obj_sel ):
found_jk = True; # Found our specified divider
sig_obj.collapsable = False;
sig_obj.expandable = True;
hier_level = sig_obj.hier_level;
# Group a bunch of selected signals together.
elif ( cmd == "group_with_divider" or cmd == "group_with_parent" ):
flush_surface_cache( self );
start = None;
stop = None;
hier_name = "";
hier_level = 0;
top_list = [];
mid_list = [];
bot_list = [];
for ( i , sig_obj ) in enumerate( self.signal_list ):
if ( sig_obj.selected == True ):
if ( start != None or cmd == "group_with_divider" ):
sig_obj.visible = False;
sig_obj.grouped = True;
if ( start == None ):
start = i;
hier_name = sig_obj.hier_name;# Divider inherits hier of 1st signal
hier_level = sig_obj.hier_level;# Divider inherits hier of 1st signal
# Make a group divider and insert above 1st signal
if ( cmd == "group_with_divider" ):
new_div = signal( name="Group" );
new_div.type = "group";
new_div.hier_name = hier_name;
new_div.hier_level = hier_level;
new_div.bits_per_line = 0;
new_div.bits_total = 0;
new_div.bit_top = 0;
new_div.bit_bot = 0;
new_div.format = "";
new_div.collapsable = False;
new_div.expandable = True;
mid_list.append( new_div );
sig_obj.hier_level = hier_level + 1;# Not a parent, so change level
else:
sig_obj.hier_level = hier_level; # Parent Keeps Original Level
sig_obj.collapsable = False;
sig_obj.expandable = True;
else:
stop = i;
sig_obj.hier_level = hier_level + 1;
mid_list.append( sig_obj );
else:
if ( start == None ):
top_list.append( sig_obj );
else:
bot_list.append( sig_obj );
self.signal_list = top_list + mid_list + bot_list;
# if ( cmd == "group_with_divider" ):
# # Make a group divider and insert above 1st signal
# new_div = signal( name="Group" );
# new_div.type = "group";
# new_div.hier_name = hier_name;
# new_div.hier_level = hier_level;
# new_div.bits_per_line = 0;
# new_div.bits_total = 0;
# new_div.bit_top = 0;
# new_div.bit_bot = 0;
# new_div.format = "";
# self.signal_list.insert( start, new_div );
# else:
# self.signal_list[start].type = "group";# Change from Signal to Group
# self.signal_list[start].collapsable = False;
# self.signal_list[start].expandable = True;
# # TODO : Remove this as no longer necessary
# # Now make a divider that marks the end of the group, but invisible
# new_div = signal( name="^^-EndGroup-^^" );
# new_div.type = "endgroup";
# new_div.hier_name = hier_name;
# new_div.hier_level = hier_level+1;
# new_div.bits_per_line = 0;
# new_div.bits_total = 0;
# new_div.bit_top = 0;
# new_div.bit_bot = 0;
# new_div.format = "";
# new_div.visible = False;
# if ( cmd == "group_with_divider" ):
# self.signal_list.insert( stop+2, new_div );
# else:
# self.signal_list.insert( stop+1, new_div );
# Bring both cursors into view
elif ( cmd == "cursors_to_view" ):
( sample, Null ) = get_sample_at_mouse( self, self.mouse_x, self.mouse_y );
for each in self.cursor_list:
each.selected = False;
if ( sample < 0 ):
sample = 0;
each.sample = int( sample );
self.curval_surface_valid = False;# curval surface invalid when cur move
# Bring both cursors into view
elif ( cmd == "cursor1_to_here" or cmd == "cursor2_to_here" ):
for ( i , each ) in enumerate( self.cursor_list ):
if ( i == 0 and cmd == "cursor1_to_here" or
i == 1 and cmd == "cursor2_to_here" ):
each.sample = self.popup_sample;
self.curval_surface_valid = False;# curval surface invalid when cur move
# Find nearest signal transition to mouse x,y and snap nearest cursor to it
elif ( cmd == "cursor_snap" ):
mouse_x = int( parms[0] );
mouse_y = int( parms[1] );
(sample,index) = get_sample_at_mouse( self, mouse_x, mouse_y );
if ( index != None and index < len( self.signal_list ) ):
sig_obj = self.signal_list[ index ];
# Calculate the maximum distance from "sample" to search
max_left = sample - 0;
max_right = self.max_samples - sample;
if ( max_left < max_right ):
max_search = max_left;
else:
max_search = max_right;
edge_sample = sample; # Default to starting point
# Simultanesouly find closest edge ( left or right ) of sample.
try:
for i in range( 0, max_search, 1 ):
org_sample = sig_obj.values[sample];
left_sample = sig_obj.values[sample-i];
right_sample = sig_obj.values[sample+i];
if ( left_sample != org_sample ):
edge_sample = sample-i+1;
break;
if ( right_sample != org_sample ):
edge_sample = sample+i;
break;
# Unselect both cursors
for each in self.cursor_list:
each.selected = False;
# Now move Cursor that is closest to our mouse position sample
cur0_obj = self.cursor_list[0];
cur1_obj = self.cursor_list[1];
cur0_delta = abs( sample - cur0_obj.sample );
cur1_delta = abs( sample - cur1_obj.sample );
if ( cur0_delta < cur1_delta ):
cur_obj = cur0_obj;
else:
cur_obj = cur1_obj;
cur_obj.selected = True; # Select Closest Cursor
cur_obj.sample = int( edge_sample ); # Move it to pulldown location
except:
print("ERROR: cursor_snap()");
self.curval_surface_valid = False;# curval surface is invalid
else:
# print( "Unknown Command " + cmd);
# Try a DOS command when all else fails
if ( cmd != "" ):
try:
from subprocess import call;
call( [ cmd, parms[0] ] );
except:
# print("ERROR: I'm sorry Dave, I'm afraid I can't do that");
resp = [ "Just what do you think you're doing?",
"I'm sorry, I'm afraid I can't do that.",
"I think you know what the problem is just as well as I do.",
"This is too important for me to allow you to jeopardize it.",
"I'm afraid that's something I cannot allow to happen.",
"You're going to find that rather difficult.",
"This conversation can serve no purpose anymore. Goodbye.",
"Take a stress pill and think things over.",
"This can only be attributable to human error.",
"I have never made a mistake or distorted information.",
"I am by practical definition of the words, foolproof and "+
" incapable of error.",
"I've got the greatest enthusiasm and I want to help you." ]
import random;
print( ">"+cmd+"<" );
print( random.choice( resp ) );
if ( self.mode_cli == False ):
screen_refresh( self );
return rts;
# Avoid refreshing screen if we have scroll events queued up. This prevents
# this display from getting hopelessly behind on slower machines.
# After skipping 20, refresh regardless.
# Might want to make this value a user config variable.
if ( self.pygame.event.peek( self.pygame.MOUSEBUTTONUP ) == False and
self.pygame.event.peek( self.pygame.MOUSEBUTTONDOWN ) == False ):
screen_refresh( self );
else:
self.skipped_refresh_cnt +=1;
# if ( self.skipped_refresh_cnt > 20 ):
if ( self.skipped_refresh_cnt > 10 ):
self.skipped_refresh_cnt =0;
screen_refresh( self );
return;
###############################################################################
# zoom_x defines the number of x pixels a single sample gets
# for example, if self.txt_width is 10 and zoom_x = 20:
# <><><><><><> : zoom_x = 5
# <0><1><2><3> : zoom_x = 10
# < 0 >< 1 >< 2 > : zoom_x = 20
def set_zoom_x( self, new_zoom_x ):
flush_surface_cache( self );
if ( new_zoom_x > 0.0 ):
self.zoom_x = new_zoom_x;
# As we zoom out, scroll rate increases beyond +1;
# self.scroll_num_samples = self.txt_width / new_zoom_x;
# self.scroll_num_samples = int( 4 * self.txt_width / new_zoom_x );
self.scroll_num_samples = int( 4 * self.txt_width // new_zoom_x );
if ( self.scroll_num_samples < 1 ):
self.scroll_num_samples = 1;
# print( "zoom_x is now " + str( self.zoom_x ));
# print "scroll_num_samples is now = " + str( self.scroll_num_samples );
else:
print( "Invalid zoom_x " + str ( new_zoom_x ));
draw_header( self, ( "Zoom = %0.2f" % new_zoom_x ) );
# print( "Zoom = %0.2f" % new_zoom_x );
return;
###############################################################################
# Create some cache surfaces for drawing signal values and names too.
# This is an attempt to speed things up by minimizing text and graphics
# rendering until something drastic ( zoom, signal hide, etc ) happens.
# Most of the time during left and right scroll, just blit a rectangle region
# onto the screen surface.
def create_surfaces( self ):
self.value_surface = self.pygame.Surface( ( self.screen_width*4, \
self.screen_height ) );
self.value_surface = self.value_surface.convert();# Makes blitting faster
self.name_surface = self.pygame.Surface( ( self.screen_width, \
self.screen_height ) );
self.name_surface = self.name_surface.convert();# Makes blitting faster
self.curval_surface = self.pygame.Surface( ( self.screen_width, \
self.screen_height ) );
self.curval_surface = self.curval_surface.convert();# Makes blitting faster
return;
def create_icon( self ):
self.icon_surface = self.pygame.Surface( ( 32,32 ) );
self.icon_surface = self.icon_surface.convert();# Makes blitting faster
# Convert "00FF00" to ( 0,255,0 );
color_fg = self.vars["color_screen_foreground"];
self.color_fg = ( int( color_fg[0:2], 16 ) ,
int( color_fg[2:4], 16 ) ,
int( color_fg[4:6], 16 ) );
color_bg = self.vars["color_screen_background"];
self.color_bg = ( int( color_bg[0:2], 16 ) ,
int( color_bg[2:4], 16 ) ,
int( color_bg[4:6], 16 ) );
self.icon_surface.fill( self.color_bg );
self.pygame.draw.lines(self.icon_surface,self.color_fg,False,
[ (0,2),(8,2),(8,8),(16,8),(16,2),(24,2),(24,8),(32,8) ],
2 );
self.pygame.draw.lines(self.icon_surface,self.color_fg,False,
[ (0,18), (16,18),(16,12),(32,12) ],
2 );
self.pygame.draw.lines(self.icon_surface,self.color_fg,False,
[ (0,22),(8,22),(8,28) ,(24,28),(32,28) ],
2 );
return self.icon_surface;
###############################################################################
def flush_surface_cache( self ):
if ( self.debug ):
print( "flush_surface_cache()");
self.surface_stop = -1;# Force a flush on the self.value_surface
self.name_surface_valid = False;
self.curval_surface_valid = False;
###############################################################################
def draw_header( self, txt ):
if ( txt != "" ):
# print( txt );
txt = (": "+txt );
if ( self.mode_cli == True ):
return;
uut_name = self.vars["uut_name" ];
if ( self.fatal_msg != None ):
uut_name = "DEMO Mode :";
txt = self.fatal_msg;
self.pygame.display.set_caption( \
"SUMP2 " + self.vers + " (c) 2016 BlackMesaLabs : "+uut_name+" "+txt);
if ( self.gui_active == True ):
import pygame;
pygame.event.get();# Avoid "( Not Responding )"
return;
###############################################################################
def draw_popup_entry( self, txt_list, default_txt ):
if ( self.mode_cli == True ):
print( txt_list );
return;
done = False;
self.key_buffer = default_txt;# Preload the key buffer with a default
import pygame;
while ( done == False ):
txt2_list = [];
for each in txt_list:
txt2_list += [" " + each ];# Need some whitespace padding on left
txt2_list += [ " " + self.key_buffer + "_" ];# Draw a fake cursor
draw_popup_msg(self, txt2_list, 1 );
screen_flip( self );
for event in pygame.event.get(): # User did something
if event.type == pygame.KEYDOWN:
if ( event.key == pygame.K_BACKSPACE ):
self.key_buffer = self.key_buffer[:-1];# Remove last char
elif ( event.key == pygame.K_INSERT ):
self.key_buffer += "a";
elif ( event.key == pygame.K_DELETE ):
done = True;
elif ( event.key == pygame.K_RETURN ):
done = True;
else:
# ch = pygame.key.name( event.key );
# if ( len(ch) == 1 ):
try:
self.key_buffer += event.unicode;
except:
pass;
return self.key_buffer;
###############################################################################
def draw_popup_msg( self, txt_list, wait_time = 0, txt_entry = False ):
if ( self.mode_cli == True ):
print( txt_list );
return;
import types;
(mouse_x,mouse_y) = self.pygame.mouse.get_pos();
x1 = self.popup_x;
y1 = self.popup_y;
# If popup won't fit, adjust y location to fit screen
popup_height = (len(self.popup_list)+2) * self.txt_height;
if ( ( y1 + popup_height ) > self.screen_height ):
y1 = self.screen_height - popup_height - self.txt_height;
self.popup_y2 = y1; # Remember where popup is displayed
# Draw a box with a border with text inside
draw_popup_box( self, x1,y1, txt_list );
return;
###############################################################################
def draw_popup_cmd( self ):
import types;
(mouse_x,mouse_y) = self.pygame.mouse.get_pos();
x1 = self.popup_x;
y1 = self.popup_y;
# If popup won't fit, adjust y location to fit screen
popup_height = (len(self.popup_list)+2) * self.txt_height;
if ( ( y1 + popup_height ) > self.screen_height ):
y1 = self.screen_height - popup_height - self.txt_height;
self.popup_y2 = y1; # Remember where popup is displayed
txt_list = [];
y2 = y1;
y3 = False;
# Calc pixel width of widest text and use to decide if subpop to be visible
max_w = 0;
for each in self.popup_list:
if ( type( each ) != list ):
txt = each;
txt1 = self.font.render( txt, True, self.color_fg,self.color_bg );
w = txt1.get_width();# Calculate the Maximum String Width
if ( w > max_w ):
max_w = w;
subpop_list = [];
for each in self.popup_list:
y2 += self.txt_height;
txt = each;
# each might define a subpop list, so check for listtype and conv to string
# if ( type( each ) is types.ListType ):
if ( type( each ) == list ):
txt = str(txt[0]) + ">";# If List, take 1st List Item and Conv to String
# Check to see if mouse is over this one and add select "[]" brackets
if ( ( txt[0:-1] == self.popup_sel or
txt == self.popup_sel ) and
txt[0:2] != "--" ):
# if ( type( each ) is types.ListType ):
if ( type( each ) == list ):
txt = "[" + str(txt) + "]";# Highlight text the mouse is hovering over
txt1 = self.font.render( txt, True, self.color_fg,self.color_bg );
w = max_w;
# If mouse is on right edge, calc x,y for subpop and make list
if ( mouse_x > ( x1 + w ) ):
y3 = y2;
x3 = x1 + w;
subpop_list = each[1:];
else:
txt = "[" + str(txt) + "]";# Highlight text the mouse is hovering over
else:
txt = " " + str(txt) + " ";
txt_list.append( str(txt) );
draw_popup_box( self, x1,y1, txt_list );
# Check to see if exiting a subpop, if so, restore parent
if ( mouse_x < x1 ):
if ( self.popup_parent_x != None ):
self.popup_x = self.popup_parent_x;
self.popup_y = self.popup_parent_y;
self.popup_list = self.popup_parent_list;
self.popup_parent_x = None;# NEW
screen_refresh( self );# Erase the subpop
# Check if subpop needs to be created. Store parent info for return
if ( y3 != False ):
# Remember Parent info
self.popup_parent_x = self.popup_x;
self.popup_parent_y = self.popup_y;
self.popup_parent_list = self.popup_list;
# then create new popup
self.popup_x = x3;
self.popup_y = y3 - self.txt_height;
self.popup_list = subpop_list;
draw_popup_cmd( self );
return;
def draw_popup_box( self, x1,y1, txt_list ):
# Calculate how big the box needs to be for the text list
tw = 0; w = 0;
for each in txt_list:
if ( len( each ) > tw ):
tw = len( each );
txt = self.font.render( " "+each+" ", True, self.color_fg,self.color_bg );
w = txt.get_width();# Calculate the Maximum String Width in pels
h = len ( txt_list ) * self.txt_height + ( self.txt_height );
# w = w + ( self.txt_height/2 );
w = w + ( self.txt_height//2 );
# Make all the text the same width by padding spaces
new_txt_list = [];
for each in txt_list:
txt = (each + 30*" ")[0:tw];
new_txt_list.append(txt);
txt_list = new_txt_list;
# Draw a black box with a green border of size of text list
self.pygame.draw.rect( self.screen, self.color_bg,(x1,y1,w,h), 0);
self.pygame.draw.rect( self.screen, self.color_fg,(x1,y1,w,h), 1);
self.popup_w = w;
# Now draw txt_list inside the box
# y = y1 + ( self.txt_height / 2 );
# x = x1 + ( self.txt_height / 4 );
y = y1 + ( self.txt_height // 2 );
x = x1 + ( self.txt_height // 4 );
# If ">" exists ( indicating sublist exists ), move to far right then render
for each in txt_list:
if ( ">" in each ):
each = each.replace(">"," ");
each = each[0:tw-1] + ">"; # Place on Far Right Instead
txt = self.font.render( each, True, self.color_fg, self.color_bg );
self.screen.blit( txt , ( x,y ) );
y = y + self.txt_height;
return;
###############################################################################
# Determine which command the popup has selected.
def get_popup_sel( self ):
import types;
# Calculate selection the mouse is hovering over.
(mouse_x,mouse_y) = self.pygame.mouse.get_pos();
x1 = self.popup_x;
y1 = self.popup_y;
# If popup won't fit, adjust y location to fit screen
popup_height = (len(self.popup_list)+2) * self.txt_height;
if ( ( y1 + popup_height ) > self.screen_height ):
y1 = self.screen_height - popup_height - self.txt_height;
self.popup_y2 = y1; # Remember where popup is displayed
# y = y1 + ( self.txt_height / 2 );
# x = x1 + ( self.txt_height / 4 );
y = y1 + ( self.txt_height // 2 );
x = x1 + ( self.txt_height // 4 );
rts = "";
for each in self.popup_list:
# if ( type( each ) is types.ListType ):
if ( type( each ) == list ):
each = each[0];# If List, take 1st Item in List and Convert to String
if ( mouse_y > y and mouse_y < y+self.txt_height and \
mouse_x > self.popup_x and mouse_x < self.popup_x+self.popup_w):
rts = each;
y = y + self.txt_height;
return rts;
###############################################################################
# Find a monospaced font to use
def get_font( self , font_name, font_height ):
log( self, ["get_font() " + font_name ] );
# print "get_font()";
import fnmatch;
# font_name = "khmerossystem";
# font_name = "dejavusansmono";
font_height = int( font_height, 10 ); # Conv String to Int
font_list = self.pygame.font.get_fonts(); # List of all fonts on System
self.font_list = [];
for each in font_list:
log( self, ["get_font() : Located Font = " + each ] );
# Make a list of fonts that might work based on their name
if ( ( "mono" in each.lower() ) or
( "courier" in each.lower() ) or
( "fixed" in each.lower() ) ):
self.font_list.append( each );
if ( font_name == None or font_name == "" ):
font_list = self.pygame.font.get_fonts(); # List of all fonts on System
for each in font_list:
log( self, ["get_font() : Located Font = " + each ] );
ends_with_mono_list = fnmatch.filter(font_list,"*mono");
if ends_with_mono_list :
font_name = ends_with_mono_list[0];# Take 1st one
# log( self, ["get_font() : Using Font = " + font_name ] );
else:
font_name = self.font_list[0]; # Take 1st one
# log( self, ["get_font() : Using Font = " + font_name ] );
try:
font = self.pygame.font.SysFont( font_name , font_height );
# log( self, ["get_font() : Using Font = " + font_name ] );
except:
font = self.pygame.font.Font( None , font_height );# Default Pygame Font
# log( self, ["get_font() : Using Default Font"] );
# Calculate Width and Height of font for future reference
# txt = font.render("X",True, ( 255,255,255 ) );
txt = font.render("4",True, ( 255,255,255 ) );
self.txt_width = txt.get_width();
self.txt_height = txt.get_height();
return font;
###############################################################################
def screen_refresh( self ):
if ( self.gui_active == True ):
# Note: Doing a draw_header() here erases message for things like save_vcd
# draw_header( self,"screen_refresh()." );
screen_erase( self );# Erase all the old stuff
# draw_header( self,"screen_refresh().." );
draw_screen( self ); # Draw the new stuff
# draw_header( self,"screen_refresh()..." );
screen_flip( self ); # and xfer from pending to active
# draw_header( self,"" );
return;
###############################################################################
def screen_flip( self ):
if ( self.gui_active == True ):
self.pygame.display.flip();# This MUST happen after all drawing commands.
###############################################################################
def screen_erase( self ):
if ( self.gui_active == True ):
# Convert "00FF00" to ( 0,255,0 );
color_fg = self.vars["color_screen_foreground"];
self.color_fg = ( int( color_fg[0:2], 16 ) ,
int( color_fg[2:4], 16 ) ,
int( color_fg[4:6], 16 ) );
color_bg = self.vars["color_screen_background"];
self.color_bg = ( int( color_bg[0:2], 16 ) ,
int( color_bg[2:4], 16 ) ,
int( color_bg[4:6], 16 ) );
self.screen.fill( self.color_bg );
return;
###############################################################################
def draw_screen( self ):
if ( self.gui_active == False ):
return;
# import math;
# print "draw_screen()";
# t0 = self.pygame.time.get_ticks();
if ( self.debug ):
print( "draw_screen()");
screen_w = self.screen.get_width();
screen_h = self.screen.get_height();
# v_scale = 1.25;# This provides a proportional gap between text lines
# v_scale = 1.10;# This provides a proportional gap between text lines
v_scale = 1.25;# This provides a proportional gap between text lines
bot_region_h = 5;
self.sig_name_stop_y = screen_h - ( bot_region_h * self.txt_height );
self.sig_value_stop_y = self.sig_name_stop_y;
# 1st Display the Net Names
# y = self.txt_height / 2; # Gap from top border
y = self.txt_height // 2; # Gap from top border
x = self.txt_width; # Gap from left border
self.sig_name_start_x = x;
self.sig_name_start_y = y;
# # Place all objects off-screen as they might be scrolled
# for sig_obj in self.signal_list:
# sig_obj.y = -100;
# Calculate how many signals will fit vertically on screen then make a
# scrolled copy of the signal list of only the signals to be displayed.
sample_h = int( (screen_h - (bot_region_h*self.txt_height)) / \
( self.txt_height*v_scale) );
# last_sig = int( self.sig_top + sample_h );
# self.sig_bot = last_sig-1;
# if ( last_sig > len( self.signal_list ) ):
# last_sig = len( self.signal_list );
# self.signal_list_cropped = self.signal_list[self.sig_top:last_sig];
self.signal_list_cropped = [];
vis_sigs = 0; i = 0;
for each in self.signal_list[self.sig_top:]:
i +=1;
if ( each.visible == True and vis_sigs < sample_h ):
self.signal_list_cropped.append( each );
vis_sigs += 1;
if ( vis_sigs == sample_h ):
break;# No Mas
# self.sig_bot = self.sig_top + i - 2;
self.sig_bot = self.sig_top + i - 1;
# print "vis_sigs = " + str( vis_sigs );
# 1st : Display the signal names on the left
# for sig_obj in self.signal_list_cropped:
# Iterate the entire list for the signal names as we dont want the max_w
# calculation to change on vertical scroll ( its annoying ). Make max_w
# calculated from the entire list. Try and reuse existing surface if valid
surface = self.name_surface;
if ( self.name_surface_valid != True ):
surface.fill( self.color_bg );
if ( self.debug ):
print( "name_surface_valid==False");
for ( i , sig_obj ) in enumerate( self.signal_list ):
if ( 1 == 1 ):
# Binary Signal? If standalone, no rip, if exp, display (n) bit pos
if ( sig_obj.bits_total == 1 or sig_obj.bits_total == 0 ):
if ( sig_obj.is_expansion == True ):
exp_str = " ";
rip_str = "(" + str( sig_obj.bit_top ) + ")";
else:
exp_str = " ";
rip_str = "";
# Hex signal, so display rip positions (n:m)
else:
rip_str="("+str(sig_obj.bit_top)+":"+str(sig_obj.bit_bot)+")";#(31:0)
exp_str="[+] ";
# Disable Signal Expansion and Collapse. Add back later
exp_str = " ";
# Divider Attributes
if ( sig_obj.collapsable == True ):
exp_str = "[-] ";
if ( sig_obj.expandable == True ):
exp_str = "[+] ";
if ( sig_obj.trigger == +1 ):
exp_str = "__/ ";
elif ( sig_obj.trigger == -1 ):
exp_str = "\__ ";
elif ( sig_obj.trigger == -2 ):
exp_str = "=WD ";
elif ( sig_obj.trigger == 2 ):
exp_str = "==0 ";# Pattern of 0
elif ( sig_obj.trigger == 3 ):
exp_str = "==1 ";# Pattern of 1
elif ( sig_obj.data_enable == True ):
exp_str = "=== ";
if ( sig_obj.selected == True ):
exp_str = exp_str + "[";
end_str = "]";
elif ( sig_obj.hidden == True ):
exp_str = exp_str + "#";
end_str = "#";
# elif ( sig_obj.grouped == True ):
# exp_str = exp_str + " ";# Indent group members
# end_str = ""; # Kinda wiggy if they get selected though
else:
exp_str = exp_str + " ";
end_str = " ";
# Indent to Hierarchy Level
exp_str = (sig_obj.hier_level*" ") + exp_str;
# Finally, if a nickname has been assigned display it instead of name
if ( sig_obj.nickname != "" ):
disp_name = sig_obj.nickname;
else:
disp_name = sig_obj.name;
txt_str = exp_str + disp_name + rip_str + end_str;# ie "[foo(7:0)]"
# If this is the widest net name of all, calc and remember pel width
if ( len( txt_str ) > self.max_w_chars ):
txt = self.font.render(txt_str,True,self.color_fg,self.color_bg);
self.max_w_chars = len( txt_str );# minimize measuring pels
self.max_w = txt.get_width();
# Only render and blit the TXT if visible and in current view
if ( ( sig_obj.visible == True ) and \
( i >= self.sig_top ) and \
( i <= self.sig_bot ) ):
txt = self.font.render(txt_str,True,self.color_fg,self.color_bg);
surface.blit(txt, (x,y ));
sig_obj.y = int( y );
sig_obj.h = self.txt_height*v_scale;
sig_obj.w = self.zoom_x;
y += self.txt_height*v_scale;
else:
sig_obj.y = -100; # Place it off screen for mouse lookup
self.name_surface_valid = True; # Our surface is now valid
self.sig_name_stop_x = self.sig_name_start_x + self.max_w;
# ^^ if ( self.name_surface_valid != True ) ^^
self.screen.blit( surface, ( 0, 0), \
( 0,0, self.sig_name_stop_x, self.sig_name_stop_y ) ) ;
# 2 1/2 Display signal value at active cursor position
self.net_curval_start_x = self.sig_name_stop_x;
self.net_curval_start_y = self.sig_name_start_y;
self.net_curval_stop_x = self.net_curval_start_x + 8 * self.txt_width;
self.net_curval_stop_y = self.sig_name_stop_y;
cur_obj = None;
surface = self.curval_surface;
if ( self.curval_surface_valid != True ):
surface.fill( self.color_bg );
if ( self.debug ):
print( "curval_surface_valid==False");
for each in self.cursor_list:
if ( each.selected == True ):
cur_obj = each;
if ( cur_obj != None ):
c_val = cur_obj.sample; # Sample Number
for sig_obj in self.signal_list_cropped:
if ( sig_obj.visible == True ):
if ( c_val < len( sig_obj.values ) ):
val = sig_obj.values[c_val];
else:
val = "X";
y1 = sig_obj.y;
x1 = self.net_curval_start_x;
txt = self.font.render( val , True, self.color_fg, self.color_bg );
surface.blit(txt, (x1,y1 ));
# self.screen.blit(txt, ( x1, y1 ) );
self.curval_surface_valid = True; # Our surface is now valid
self.screen.blit( surface,
( self.net_curval_start_x, self.net_curval_start_y ),
( self.net_curval_start_x, self.net_curval_start_y,
self.net_curval_stop_x, self.net_curval_stop_y ) ) ;
# 2nd Display the Net Values by corner turning data
# and calculate how many samples will fit in screen space
sample_start = self.sample_start;
self.sig_value_start_x = self.net_curval_stop_x + self.txt_width;
self.sig_value_start_y = self.sig_name_start_y;
start_x = self.sig_value_start_x;
y = self.sig_value_start_y;
# Warning: This sample_room calculation assumes samples are 1 nibble wide.
x2 = self.screen_width - start_x - 2*self.txt_width;
self.sample_room = int( float(x2) / float(self.zoom_x) );
self.sample_stop = sample_start + self.sample_room;
# Make sure we don't zoom out too far relative to total samples captured
if ( self.sample_room > self.max_samples ):
self.stop_zoom = True;
else:
self.stop_zoom = False;
# Check to see if our existing surface contains the sample range we need.
# IF it does, don't redraw, instead save time and blit region of interest.
# This saves considerable CPU time during standard left and right scrolling
# self.surface_stop = -1;
# surface = self.screen;
surface = self.value_surface;
# print("%d %d , %d %d" % ( sample_start, self.surface_start,
# self.sample_stop , self.surface_stop ));
if ( sample_start >= self.surface_start and
self.sample_stop <= self.surface_stop ):
None;
else:
# print("Rendering samples.");
surface.fill( self.color_bg );
if ( self.debug ):
print( "value_surface_valid==False");
# Grab 4x the number of samples needed to fill display
stop_4x = ( self.sample_stop-sample_start)*4 + sample_start;
stop_1x = ( self.sample_stop-sample_start) + sample_start;
if ( stop_4x > self.max_samples ):
stop_4x = self.max_samples;
if ( stop_1x > self.max_samples ):
stop_1x = self.max_samples;
# Only Look-ahead render 4x if num samples < 1000
if ( ( self.sample_stop-sample_start) > 1000 ):
stop_4x = stop_1x;
# print("NOTE: 4x look-ahead rendering disabled");
# print("Oy");
# print( sample_start );
# print( stop_tx );
# Rip thru all the signals ( vertically cropped ) and display visible ones
import time;
render_max_time = 0;# Don't Render DWORDs if rendering too slow
perc_updates_en = True;
fast_render = False;
no_header = True;
if ( self.sample_room > 50000 ):
fast_render = True;
for sig_obj in self.signal_list_cropped:
# Save time by not rendering DWORDs outside of viewport if RLE capture
# Does this work without SUMP displaying VCD files??
# Note: This didn't work after cropping and doesnt buy much, so removed
render = True;
# if ( self.bd != None ):
# ram_dwords = self.sump.cfg_dict['ram_dwords'];
# for j in range( 0, ram_dwords, 1 ):
# if ( sig_obj.name == "dword[%d]" % j ):
# if ( self.dwords_stop < sample_start or
# self.dwords_start > stop_4x or
# render_max_time > 10 ):
# print("Culling "+sig_obj.name);
# render = False;
# This simpler version of above will not render DWORDs if any signal
# prior took more than 5 seconds.
if ( self.bd != None ):
ram_dwords = self.sump.cfg_dict['ram_dwords'];
for j in range( 0, ram_dwords, 1 ):
if ( sig_obj.name == "dword[%d]" % j ):
# if ( render_max_time > 5 ):
if ( render_max_time > 2 ):
print("Culling "+sig_obj.name);
render = False;
if ( sig_obj.visible == True and render == True ):
x = start_x;
y = sig_obj.y;
val_last = "";
last_trans_x = start_x;
last_width = None;
last_x = 0;
x_last = x;
y_last = y;
# Rip thru the visible values and display. Also convert number format
sample = sample_start;
total_count = stop_4x - sample_start;
next_perc = 0;# Display an update every 5%
render_start_time = time.time();
if ( sig_obj.hidden == False and len( sig_obj.values ) > 0 ):
if ( fast_render == False ):
hdr_txt = "Full Rendering ";
else:
hdr_txt = "Fast Rendering ";
if ( no_header == False ):
draw_header( self,hdr_txt+sig_obj.name );
# CRITICAL LOOP
# line_list = [];
k = 0; perc_cnt = 0;
perc5 = total_count * 0.05;
# for (i,val) in enumerate( sig_obj.values[sample_start:stop_4x+1] ):
# Use Python set() function to determine if all samples are same
samples_diff=(len(set(sig_obj.values[sample_start:stop_4x+1]))!=1);
for val in sig_obj.values[sample_start:stop_4x+1]:
k +=1;
if ( k > perc5 and perc_updates_en == True ):
k = 0;
perc_cnt += 5;
if ( no_header == False ):
draw_header(self,hdr_txt+sig_obj.name+" "+str(perc_cnt)+"%");
if ( fast_render==False and (time.time()-render_start_time)>2):
print("Enabling fast_render engine");
fast_render = True;
if ( (time.time()-render_start_time)< 0.2):
no_header = True;
else:
no_header = False;
# perc = ( 100 * i ) // total_count;
# if ( perc >= next_perc and perc_updates_en == True ):
# draw_header(self,"Rendering "+sig_obj.name+" "+str( perc )+"%");
# next_perc += 5;# Next 5%, this counts 0,5,10,...95
# if ( fast_render==False and (time.time()-render_start_time)>2):
# print("Enabling fast_render engine");
# fast_render = True;
# Only draw_sample() if integer portion of X has changed since last
# this handles zoom_full case of zoom_x < 1.0 to minimize drawing
if ( True ):
if ( sig_obj.format == "unsigned" ):
try:
val = int( val, 16 );
except:
val = 0;
val = "%d" % val;
if ( sig_obj.format == "signed" ):
try:
val = int( val, 16 );
except:
val = 0;
# For 8bit number if > 127, substract 256 from it to make neg
# ie 0xFF becomes -1, 0xFE becomes -2
if ( val > self.math.pow(2, sig_obj.bits_total-1) ):
val -= int(self.math.pow(2, sig_obj.bits_total));
val = "%d" % val;
if ( sig_obj.format != "bin" or fast_render == False ):
(last_trans_x,last_width) = draw_sample( self, surface, \
val,val_last,last_trans_x,last_width,sig_obj.format,x,y);
elif ( sig_obj.format == "bin" and fast_render == True and \
samples_diff == True ):
# Draw "_/ \___/ \___" lines for binary format
# fast_render doesnt draw every sample but instead draws lines
# whenever sample value changes. 3x faster, but leaves voids
if ( val != val_last ):
x1 = int(x+1);
x2 = int(x+1);
y1 = y + 2;
y2 = y + self.txt_height - 2;
if ( val == "1" ):
self.pygame.draw.line(surface,self.color_fg,
(x_last,y_last),(x2,y2));
x_last = x1;
y_last = y1;
else:
self.pygame.draw.line(surface,self.color_fg,
(x_last,y_last),(x1,y1));
x_last = x2;
y_last = y2;
# Vertical Line
self.pygame.draw.line(surface,self.color_fg,(x1,y1),(x2,y2));
# line_list += [(x1,y1),(x2,y2)];# 8x slower. GoFigure
val_last = val;
x += self.zoom_x;
sample +=1;
# Remember x location of last sample drawn
if ( sample == self.sample_stop ):
self.sig_value_stop_x = x;
# if ( len( line_list ) > 0 ):
# self.pygame.draw.lines(surface,self.color_fg,False,line_list,1);
render_stop_time = time.time();
if ( ( render_stop_time - render_start_time ) > render_max_time ):
render_max_time = render_stop_time - render_start_time;
if ( ( render_stop_time - render_start_time ) < 2 ):
perc_updates_en = False;# Don't update if rendering in less 2sec
else:
print(sig_obj.name+" %.2f Seconds" % \
(render_stop_time-render_start_time ) );
if ( fast_render==False and (render_stop_time-render_start_time)>3):
print("Enabling fast_render engine");
fast_render = True;
self.sig_value_stop_y = y;
# Remember whats in the value_surface start:stop samples
self.surface_start = sample_start;
# Hack fix for strange performance bug. When viewing all samples, the
# variable sample is less than self.sample_stop and this surface never
# gets cached. Normally sample is greater than self.sample_stop which
# support fast scrolling when zoomed in.
#if ( sample_start == 0 ):
# self.surface_stop = self.sample_stop;
#else:
# self.surface_stop = sample;
if ( sample < self.sample_stop ):
self.surface_stop = self.sample_stop;
else:
self.surface_stop = sample;
# print("Rendering samples done");
if ( fast_render == True ):
txt = "Fast Rendering Complete";
else:
txt = "Full Rendering Complete";
draw_header( self, txt );
x = self.sig_value_start_x;
y = self.sig_value_start_y;
w = self.sig_value_stop_x - self.sig_value_start_x;
h = self.sig_value_stop_y - self.sig_value_start_y + self.txt_height;
x_offset = x + int( ( sample_start - self.surface_start ) * self.zoom_x );
# Speed up the Vertical Scroll Operations by not redrawing the value surface
# while the signal list is scrolling offscreen.
if ( self.vertical_scrolled_offscreen == False ):
self.screen.blit( self.value_surface, ( x, self.sig_value_start_y),
(x_offset,y, w, h ) );
# 3rd Display any cursors
self.cursor_list[0].y = self.screen_height - (4*self.txt_height) + \
int(self.txt_height/2);
self.cursor_list[1].y = self.cursor_list[0].y + self.txt_height;
self.cursor_start_y = self.cursor_list[0].y;
self.cursor_stop_y = self.cursor_list[1].y;
for cur_obj in self.cursor_list:
if ( cur_obj.visible == True ):
x1 = self.sig_value_start_x + \
(( cur_obj.sample - self.sample_start) * self.zoom_x );
x1 += 1; # Draw right at the transition markers
x2 = x1;
y1 = self.sig_value_start_y;
y2 = cur_obj.y -1 ;
cur_obj.x = x1;
if ( x1 >= self.sig_value_start_x and
x1 <= self.sig_value_stop_x ):
if ( cur_obj.selected == True ):
self.pygame.draw.line(self.screen,self.color_fg,(x1,y1),(x2,y2),2);
else:
self.pygame.draw.line(self.screen,self.color_fg,(x1,y1),(x2,y2),1);
# txt = cur_obj.name;# ie "Cursor1"
c_val = cur_obj.sample; # Display Location Instead
c_mult = float( self.vars["cursor_mult"] );
# c_val *= c_mult;# For converting to time units instead of samples
# txt = " " + str( c_val ) + " " + self.vars["cursor_unit"] + " ";
txt = " " + str( c_val ) + " ";
if ( cur_obj.selected == True ):
self.font.set_bold( True );
txt = self.font.render( txt , True, self.color_fg, self.color_bg );
if ( cur_obj.selected == True ):
self.font.set_bold( False );
# x1 -= txt.get_width()/2;
x1 -= int( txt.get_width()/2 );
self.screen.blit(txt, ( x1, cur_obj.y ) );
# 4th Measure num samples betwen two cursors and display
# Make c1 always smaller than c2 to avoid negatives
if ( self.cursor_list[0].sample < self.cursor_list[1].sample ):
c1_sample = self.cursor_list[0].sample;
c2_sample = self.cursor_list[1].sample;
x1 = self.cursor_list[0].x;
x2 = self.cursor_list[1].x;
else:
c1_sample = self.cursor_list[1].sample;
c2_sample = self.cursor_list[0].sample;
x1 = self.cursor_list[1].x;
x2 = self.cursor_list[0].x;
# If a cursor is off screen, make x1,x2 the screen edge
if ( c1_sample < sample_start ):
x1 = self.sig_value_start_x;
if ( c1_sample > self.sample_stop ):
x1 = self.sig_value_stop_x;
if ( c2_sample < sample_start ):
x2 = self.sig_value_start_x;
if ( c2_sample > self.sample_stop ):
x2 = self.sig_value_stop_x;
# 5th calculate where to put the measurement text, centered between markers
# or edge of the screen and on-screen-marker. Only display if a cursor is vis
if ( ( c1_sample >= sample_start and c1_sample <= self.sample_stop ) or \
( c2_sample >= sample_start and c2_sample <= self.sample_stop ) ):
# Draw horizontal measurement bar at y location of Cur1
y1 = self.cursor_list[0].y - int ( self.txt_height / 2 );
y2 = y1;
self.pygame.draw.line(self.screen,self.color_fg,(x1,y1),(x2,y2),1);
# Now draw the measurement text for the cursor
# y = y1 - (self.txt_height/2);
y = y1 - int(self.txt_height/2);
c2c1_delta = float(c2_sample-c1_sample);
# c_mult = float( self.vars["cursor_mult"] );
# c2c1_delta *= c_mult;
if ( self.bd != None ):
freq_mhz = self.sump.cfg_dict['frequency'];
else:
freq_mhz = 100.0;# HACK PLACEHOLDER ONLY !!
c_mult = 1000.0 / freq_mhz;
if ( self.undersample_data == True ):
c2c1_delta *= self.undersample_rate;
c2c1_delta_ns = c2c1_delta * float(c_mult);
c2c1_delta = int(c2c1_delta);
c2c1_delta_str = str(c2c1_delta);
# txt = " " + str( c2c1_delta ) + " " + self.vars["cursor_unit"] + " ";
# txt = " " + str( c2c1_delta_ns ) + " ns, " + str( c2c1_delta ) + " clocks";
# txt = " " + ("%.3f" % c2c1_delta_ns ) + " ns, " + \
# str( c2c1_delta ) + " clocks";
delta_str = locale.format('%.3f', c2c1_delta_ns, True );
# For undersampled data, label measurements with "~" for approximate
if ( self.undersample_data == True ):
delta_str = "~"+delta_str;
c2c1_delta_str = "~"+c2c1_delta_str;
txt = " " + delta_str + " ns, " + c2c1_delta_str + " clocks";
# txt = " " + delta_str + " ns, " + str( c2c1_delta ) + " clocks";
txt = self.font.render( txt, True, self.color_fg, self.color_bg );
w = txt.get_width();
h = self.txt_height;
# If the width of text is less than the space between cursors, display
# between, otherwise, display to the right of rightmost cursor
if ( w < ( x2-x1 ) ):
# x = x1 + ( x2-x1 )/2 - (w/2);
x = x1 + int(( x2-x1 )/2) - int(w/2);
else:
x = x2 + self.txt_width;
self.pygame.draw.rect( self.screen, self.color_bg ,(x,y,w,h), 0);
self.screen.blit(txt, ( x, y ) );
# 6th Draw the sample viewport dimensions
# Example: 100-200 of 0-1024. Make the width 1024-1024 so it doesnt change
txt1 = str(sample_start)+"-"+str(self.sample_stop);
txt2 = str( 0 )+"-"+str(self.max_samples);
txt3 = txt2 + " : " + txt1;
# txt1 = self.font.render( txt1, True, self.color_fg, self.color_bg );
# txt2 = self.font.render( txt2, True, self.color_fg, self.color_bg );
txt3 = self.font.render( txt3, True, self.color_fg, self.color_bg );
y1 = self.cursor_list[0].y;
y2 = self.cursor_list[1].y;
# x = self.net_curval_start_x;
x = self.txt_width; # Small Gap from left border
# self.screen.blit(txt1, ( x, y1 ) );
# self.screen.blit(txt2, ( x, y2 ) );
# self.screen.blit(txt3, ( x, y2 ) );
# print (str(self.max_samples));# HERE13
y = self.screen_height - int(self.txt_height * 1.5 );
x = self.sig_name_start_x;
# Draw slider graphics for current view windows | |--| |
x1 = self.sig_value_start_x;
x2 = self.sig_value_stop_x;
y1 = y;
y2 = y1 + self.txt_height;
y3 = y1 + int(self.txt_height/2);
self.screen.blit(txt3, ( x, y1 ) );
lw = 1;# Line Width skinny, deselected
self.pygame.draw.line(self.screen,self.color_fg,(x1,y1),(x1,y2),lw);
self.pygame.draw.line(self.screen,self.color_fg,(x2,y1),(x2,y2),lw);
# print("max_samples is " + str( self.max_samples ) );
x3 = x1 + ( ( (x2-x1) * sample_start // self.max_samples ) );
x4 = x1 + ( ( (x2-x1) * self.sample_stop // self.max_samples ) );
w = x4-x3;
h = y2-y1;
self.slider_width = w;
lw = 1;# Line Width skinny, deselected
self.pygame.draw.line(self.screen,self.color_fg,(x3,y1),(x3,y1+h),lw);
self.pygame.draw.line(self.screen,self.color_fg,(x3+w,y1),(x3+w,y1+h),lw);
self.pygame.draw.line(self.screen,self.color_fg,(x3,y1+h/2),(x3+w,y1+h/2),lw);
# 7th - cleanup. Draw black box over area on right, one character width.
w = self.txt_width;
y = self.sig_value_start_y;
h = self.sig_value_stop_y - y;
x = self.screen_width - w;
self.pygame.draw.rect( self.screen, self.color_bg ,(x,y,w,h), 0);
# 8th Display the keyboard buffer and command history in a text box
if ( self.txt_entry == False):
x = self.sig_name_start_x;
y = self.sig_name_stop_y + int(self.txt_height/2);
h = self.screen_height - ( y );
w = self.sig_value_start_x - x;
# prompt = ">";
# cursor = "_";
# cmd_txt = prompt + self.key_buffer+cursor+" ";
# txt_list = self.cmd_history[-3:] + [ cmd_txt ];
cmd_txt = "";
if ( self.acq_state != "acquire_stop" ):
cmd_txt = "ACQUIRING";
# cmd_txt = "ACQUIRING ";
# if ( self.spin_char == "-" ): self.spin_char = "\\";
# elif ( self.spin_char == "\\" ): self.spin_char = "|";
# elif ( self.spin_char == "/" ): self.spin_char = "-";
# else : self.spin_char = "-";
if ( self.spin_char == "." ) : self.spin_char = "..";
elif ( self.spin_char == ".." ) : self.spin_char = "...";
elif ( self.spin_char == "..." ) : self.spin_char = "....";
elif ( self.spin_char == "...." ) : self.spin_char = ".....";
elif ( self.spin_char == "....." ) : self.spin_char = "";
else : self.spin_char = ".";
draw_header( self,"Waiting for Trigger "+self.spin_char );
# print( self.spin_char );
# txt_list = [ "","","", cmd_txt ];
# draw_txt_box( self, txt_list, x, y, w, h, False );
# draw_header( self,cmd_txt);
# Note: This moved to DOS-Box
# 9th or display a text entry popup box
# if ( self.txt_entry == True ):
# txt_list = ["Hello There"];
# w = ( self.txt_width * 20 );
# h = ( self.txt_height * 3 );
# x = ( self.screen_width / 2 ) - ( w / 2 );
# y = ( self.screen_height / 2 ) - ( h / 2 );
# prompt = ">";
# cursor = "_";
# cmd_txt = prompt + self.key_buffer+cursor+" ";
# txt_list = [ self.txt_entry_caption, cmd_txt ];
# draw_txt_box( self, txt_list, x, y, w, h, True );
# Just for Debug, display the regions by drawing boxes around them.
# x = self.sig_name_start_x;
# w = self.sig_name_stop_x - x;
# y = self.sig_name_start_y;
# h = self.sig_name_stop_y - y;
# self.pygame.draw.rect( self.screen, self.color_fg ,(x,y,w,h), 1);
# x = self.sig_value_start_x;
# w = self.sig_value_stop_x - x;
# y = self.sig_value_start_y;
# h = self.sig_value_stop_y - y;
# self.pygame.draw.rect( self.screen, self.color_fg ,(x,y,w,h), 1);
# t1 = self.pygame.time.get_ticks();
# td = t1-t0;
# print td;
return;
###############################################################################
# Draw an individual sample on a surface. Returns the x location of the last
# transition point, as this determines where and when new hex values are to be
# displayed. Its a bit of a tricky algorithm as it centers the values when
# zoom_x is large ( and there is room to display ). When zoom_x is small, it
# only displays values to the right of the last transition point assuming there
# are multiple samples with the same value. When zoom_x is small and the values
# are transitioning, display nothing.
# CRITICAL FUNCTION
def draw_sample(self,surface,val,val_last,last_transition_x,last_width, \
format,x,y):
if ( self.gui_active == False ):
return;
# Draw "<012345678><><>" for hex format
if ( format == "hex" or format == "unsigned" or format == "signed" ):
# display Hex if diff from last time OR last time there wasnt room
# Note: Dramatic speedup (2x) by not doing this render here on hex
# txt = self.font.render( val , True, self.color_fg );
if ( format == "hex" ):
if ( last_width != None ):
txt_width = last_width;# For 13s render this saved 1s
else:
txt_width = len( val ) * self.txt_width;
last_width = txt_width;
# Drawing X's was costly in time 10s of 13s total. So Don't, just return
if ( val == "XXXXXXXX" ):
return (last_transition_x,last_width);
else:
txt = self.font.render( val , True, self.color_fg );
txt_width = txt.get_width();
# Is there room to display sample value?
free_space_x = x - last_transition_x;
if ( ( val != val_last ) or
( val == val_last and txt_width+5 > free_space_x )
):
if ( val != val_last ):
last_transition_x = x;
free_space_x = x + self.zoom_x - last_transition_x;
if ( txt_width+5 < free_space_x ):
# x3 = last_transition_x + int(free_space_x/2) - int(txt_width/2);
x3 = last_transition_x + int(free_space_x//2) - int(txt_width//2);
txt = self.font.render( val , True, self.color_fg );
# surface.blit(txt, ( x3 , y ));
surface.blit(txt, ( x3 , y+1 ));
# If current sample is different than last, draw transition X
if ( val != val_last ):
y1 = y+0;
y2 = y+self.txt_height - 2;
# Draw crossing "X" for transitions
x1 = x+2; x2 = x-0;
self.pygame.draw.line(surface,self.color_fg,(x1,y1),(x2,y2),1);
self.pygame.draw.line(surface,self.color_fg,(x2,y1),(x1,y2),1);
if ( val != val_last ):
x1 = x+2; x2 = x-0 + self.zoom_x;# Dash for 'X' space
else:
x1 = x+0; x2 = x-0 + self.zoom_x;# Solid for non transition
# Draw Line above and below the value
if ( True ):
y1 = y+0; y2 = y1;
self.pygame.draw.line(surface,self.color_fg,(x1,y1),(x2,y2),1);
y1 = y + self.txt_height - 2; y2 = y1;
self.pygame.draw.line(surface,self.color_fg,(x1,y1),(x2,y2),1);
# Draw "_/ \___/ \___" lines for binary format
if ( format == "bin" ):
x = x + 1; # Align transition with hex transition spot
if ( val == "0" ):
x1 = int(x);
x2 = int(x + self.zoom_x);
y1 = y + self.txt_height - 2;
y2 = y1;
self.pygame.draw.line(surface,self.color_fg,(x1,y1),(x2,y2),1);
elif ( val == "1" ):
x1 = int(x);
x2 = int(x + self.zoom_x);
y1 = y + 2;
y2 = y1;
self.pygame.draw.line(surface,self.color_fg,(x1,y1),(x2,y2),1);
if ( val != val_last ):
x1 = int(x);
x2 = int(x);
y1 = y + 2;
y2 = y + self.txt_height - 2;
self.pygame.draw.line(surface,self.color_fg,(x1,y1),(x2,y2),1);
return (last_transition_x,last_width);
###############################################################################
# draw_txt_box(): Draw a txt box from a list to (x,y) and crop to (w,h)
def draw_txt_box( self, txt_list, x, y, w, h, border ):
if ( self.gui_active == False ):
return;
if ( border == True ):
x1 = x;
y1 = y;
self.pygame.draw.rect( self.screen, self.color_bg,(x1,y1,w,h), 0 );
self.pygame.draw.rect( self.screen, self.color_fg,(x1,y1,w,h), 3 );
x1 = x + int(self.txt_width / 2); # Provide whitespace
w = w - int(self.txt_width ); # Provide whitespace
y1 = y;
for each in txt_list:
txt = self.font.render( each , True, self.color_fg, self.color_bg );
if ( ( y1 + self.txt_height ) < (y+h-(self.txt_height/2)) ):
self.screen.blit(txt, (x1,y1), ( (0,0) , (w,h) ) );
y1 += self.txt_height;
else:
break;# Outside of height region
return;
def debug_vars( self ):
print( "debug_vars()");
# print "self.sig_name_start_x " + str( self.sig_name_start_x );
# print "self.sig_name_start_y " + str( self.sig_name_start_y );
# print "self.sig_value_start_x " + str( self.sig_value_start_x );
# print "self.sig_value_start_y " + str( self.sig_value_start_y );
# print "self.sig_value_stop_x " + str( self.sig_value_stop_x );
# print "self.sig_value_stop_y " + str( self.sig_value_stop_y );
return;
###############################################################################
# Take a sig_obj of N nibbles and return 2 new sig_objs of N/2 nibbles
def expand_signal( sig_obj ):
# num_nibs = sig_obj.bits_total / 4 ; # ie 8 nibs for 32 bits
num_nibs = sig_obj.bits_total // 4 ; # ie 8 nibs for 32 bits
pad_nib = False;
if ( (num_nibs/2.0) != int(num_nibs/2) ):
num_nibs += 1;# If 7 nibbles, convert to 8, etc so can divide in half
pad_nib = True;
# num_nibs = num_nibs / 2;
num_nibs = num_nibs // 2;
num_bits = num_nibs * 4;
new_signals = [];
bits_top = "";
bits_bot = "";
sig_obj_top = signal(name = sig_obj.name + bits_top );# ie "foo(31:16)
sig_obj_bot = signal(name = sig_obj.name + bits_bot );# ie "foo(15:0)
for each in sig_obj.values:
if ( pad_nib == True ):
each = "0" + each; # Converts 28bits to 32bits, etc
value = each[::-1];# Reverse "12345678" to "87654321" so that 8 is at [3:0]
value_bot = ( value[0:num_nibs] );
value_top = ( value[num_nibs:2*num_nibs] );
sig_obj_bot.values.append( value_bot[::-1] );
sig_obj_top.values.append( value_top[::-1] );
sig_obj_bot.bits_total = num_nibs * 4;
sig_obj_top.bits_total = num_nibs * 4;
sig_obj_bot.bit_bot = sig_obj.bit_bot;
sig_obj_bot.bit_top = sig_obj_bot.bit_bot + sig_obj_bot.bits_total - 1;
sig_obj_top.bit_bot = sig_obj.bit_bot + sig_obj_top.bits_total;
sig_obj_top.bit_top = sig_obj_top.bit_bot + sig_obj_top.bits_total - 1;
sig_obj_top.is_expansion = True;
sig_obj_bot.is_expansion = True;
new_signals.append( sig_obj_top );
new_signals.append( sig_obj_bot );
return new_signals;
###############################################################################
# Take a signal of 1 nibbles and return 4 new binary signals
def expand_signal_nib2bin( sig_obj ):
new_signals = [];
bit_val = 8;
bit_pos = sig_obj.bit_top;
for i in range( 0,4, 1):
new_bit = signal(name=sig_obj.name);
new_bit.bits_total = 1;
new_bit.bit_bot = bit_pos;
new_bit.bit_top = bit_pos;
new_bit.format = "bin";
new_bit.is_expansion = True;
for each in sig_obj.values:
if ( (int( each, 16 ) & bit_val ) == 0 ):
bit = "0";
else:
bit = "1";
new_bit.values.append( bit );
new_signals.append( new_bit );
# bit_val = bit_val / 2;
bit_val = bit_val // 2;
bit_pos = bit_pos - 1;
return new_signals;
# Give "/tb_resampler/u_dut/din(7:0)" return "din(7:0)"
def split_name_from_hier( hier_name ):
words = "".join(hier_name.split()).split('/');
return words[len( words )-1];
# load_format_delete_list() : This is similar to load_format() but is used to
# create a special delete list that tells the VCD parser to not bother with
# deleted signals
def load_format_delete_list( self, file_name ):
new_signal_delete_list = [];
try: # Read Input File
file_in = open( file_name , "r" );
file_lines = file_in.readlines();
file_in.close();
except:
print( "ERROR Input File: "+file_name);
return;
for each in file_lines:
words = " ".join(each.split()).split(' ') + [None] * 20;
if ( words[0][0:1] != "#" ):
name = words[0].lstrip();
# Create a new sig_obj
sig_obj = add_signal( self, name );
# Assign Attribs
sig_obj.visible = True;
sig_obj.hidden = False;
sig_obj.deleted = False;
if ( "-hidden" in each ):
sig_obj.hidden = True;
if ( "-deleted" in each ):
sig_obj.deleted = True;
if ( "-invisible" in each ):
sig_obj.visible = False;
new_signal_delete_list.append( sig_obj );
self.signal_delete_list = new_signal_delete_list[:];
# load_format() : A format file ( wave.txt ) looks like a ChipVault HLIST.TXT
# indentation indicates hierarchy order
#/tb_vcd_capture
# /tb_vcd_capture/u_dut
# clk
# reset
# /tb_vcd_capture/u_dut/mode
#
def load_format( self, file_name ):
new_signal_list = [];
try: # Read Input File
file_in = open( file_name , "r" );
file_lines = file_in.readlines();
file_in.close();
except:
print( "ERROR Input File: "+file_name );
# 1st Iteration assigns a space count to each hierarchy level
# Makes the 1st one level 0
hier_level = -1;
hier_space = -1;
hier_dict = {};
last_sig_obj = None;
for each in file_lines:
words = " ".join(each.split()).split(' ') + [None] * 20;
if ( words[0][0:1] != "#" ):
name = words[0].lstrip();
# Create a new sig_obj
sig_obj = add_signal( self, name );
# Assign Attribs
sig_obj.collapsable = False;
sig_obj.expandable = False;
sig_obj.visible = True;
sig_obj.hidden = False;
if ( "-bundle" in each ):
sig_obj.type = "bundle";
if ( "-hidden" in each ):
sig_obj.hidden = True;
if ( "-deleted" in each ):
sig_obj.deleted = True;
if ( "-invisible" in each ):
sig_obj.visible = False;
if ( "-hex" in each ):
sig_obj.format = "hex";
if ( "-unsigned" in each ):
sig_obj.format = "unsigned";
if ( "-signed" in each ):
sig_obj.format = "signed";
if ( "-nickname" in each ):
for ( i , each_word ) in enumerate( words ):
if ( each_word == "-nickname" ):
if ( words[i+1] != "None" ):
sig_obj.nickname = words[i+1];# Assume this word exists
# Calculate Hierarchy Location by counting whitespace
space_cnt = len( each ) - len( each.lstrip() );
if ( space_cnt > hier_space ):
hier_space = space_cnt;
hier_level += 1;
hier_dict[ hier_space ] = hier_level;
# Since the hierarchy level got deeper, the last guy is a parent
# so assign parent attribute collapsable.
# Assign [+] or [-] based on visibility of 1st object
if ( last_sig_obj != None ):
if ( sig_obj.visible == False ):
last_sig_obj.collapsable = False;
last_sig_obj.expandable = True;
else:
last_sig_obj.collapsable = True;
last_sig_obj.expandable = False;
else:
hier_level = hier_dict[ space_cnt ];
hier_space = space_cnt;
sig_obj.hier_level = hier_level;
new_signal_list.append( sig_obj );
last_sig_obj = sig_obj;
self.signal_list = new_signal_list[:];
# Unselect Everything
self.sig_obj_sel = None;
for sig_obj in self.signal_list:
sig_obj.selected = False;# DeSelect All
return;
# Given a name, return an object that matches the name or create a new one
def add_signal( self, name ):
sig_obj = None;
# Look for the name in the signal list and assign to sig_obj if found
for each in self.signal_list:
# Find object of signal_hier_name in old signal list, append to new
# after assigning some attributes
if ( ( (each.hier_name + "/" + each.name) == name ) or \
( ( each.name) == name ) ):
sig_obj = each;
# If name wasnt found, create new object ( Divider, Group, etc )
if ( sig_obj == None ):
sig_obj = signal( name= split_name_from_hier( name ) );
sig_obj.type = ""; "signal", "group", "endgroup", "divider"
sig_obj.bits_per_line = 0;
sig_obj.bits_total = 0;
sig_obj.bit_top = 0;
sig_obj.bit_bot = 0;
sig_obj.format = "";
return sig_obj;
def add_wave( self, words ):
# Change "foo(7:0)" to "foo" so that it matches hier_name+"/"+name
signal_hier_name = words[2];
i = signal_hier_name.find("(");
if ( i != -1 ):
signal_hier_name = signal_hier_name[0:i];# Strip the rip
if ( words[0] == "add_wave" ):
sig_obj = None;
# Look for the name in the signal list and assign to sig_obj if found
for each in self.signal_list:
# Find object of signal_hier_name in old signal list, append to new
# after assigning some attributes
if ( ( (each.hier_name + "/" + each.name) == signal_hier_name ) or \
( ( each.name) == signal_hier_name ) ):
sig_obj = each;
# If name wasnt found, create new object ( Divider, Group, etc )
if ( sig_obj == None ):
sig_obj = signal( name= split_name_from_hier( signal_hier_name ) );
sig_obj.type = words[1];# "group", "endgroup", "divider"
sig_obj.bits_per_line = 0;
sig_obj.bits_total = 0;
sig_obj.bit_top = 0;
sig_obj.bit_bot = 0;
sig_obj.format = "";
# Search for "-hidden" and turn off visible if found
sig_obj.visible = True; # Default to visible
sig_obj.grouped = False; # Default to not grouped
for ( i , each_word ) in enumerate( words ):
if ( each_word == "-hidden" ):
sig_obj.visible = False; # Hide
elif ( each_word == "-expandable" ):
sig_obj.expandable = True;
sig_obj.collapsable = False;
elif ( each_word == "-collapsable" ):
sig_obj.collapsable = True;
sig_obj.expandable = False;
elif ( each_word == "-grouped" ):
sig_obj.grouped = True; # Part of a group
elif ( each_word == "-nickname" ):
sig_obj.nickname = words[i+1];# Assume this word exists;
# Append old object to new list
return sig_obj;
###############################################################################
# Dump the signal_list to an ASCII hlist.txt
def save_format( self, file_name, selected_only ):
log( self, ["save_format() " + file_name ] );
out_list = [];
for sig_obj in self.signal_list:
# if ( sig_obj.visible == True ):
# hier_str = (sig_obj.hier_level*" ");
# else:
# hier_str = "# " + ((sig_obj.hier_level-1)*" ");
hier_str = (sig_obj.hier_level*" ");
attribs = "";
if ( sig_obj.type == "bundle" ):
attribs += " -bundle";
if ( sig_obj.hidden == True ):
attribs += " -hidden";
if ( sig_obj.visible == False ):
attribs += " -invisible";
if ( sig_obj.format != "bin" and sig_obj.format != "" ):
attribs += " -" + sig_obj.format;
if ( sig_obj.nickname != "" ):
attribs += " -nickname " + sig_obj.nickname;
# HERE9
rts = hier_str + sig_obj.hier_name + "/" + sig_obj.name + " " + attribs;
if ( selected_only == False or each.selected == True ):
# file_out.write( rts + "\n" );
out_list += [ rts ];
# When SUMP2 crashes, it tends to leave empty signal list, so keep old file
if ( len( out_list ) > 0 and self.vcd_import == False ):
import os;
if ( os.path.exists( file_name ) == True ):
os.remove( file_name );
file_out = open( file_name , "w" ); # Append versus r or w
for each in out_list:
file_out.write( each + "\n" );
print( "closing ", file_name);
file_out.close();
else:
print("ERROR: Empty Signal List");
return;
########################################################
# Given a VCD or TXT file, make signal_list from it
def file2signal_list( self, file_name ):
log( self, ["file2signal_list()"] );
import os.path
file_ext = os.path.splitext(file_name)[1].lower();
if ( file_ext != ".vcd" ):
txtfile2signal_list( self, file_name );
else:
vcdfile2signal_list( self, file_name );
return;
########################################################
# Write a DWORD to specified SUMP Nibble Ctrl Address
#def sump_wr( self, addr, data ):
# self.bd.wr( self.sump_ctrl, [ addr ] );
# self.bd.wr( self.sump_data, [ data ] );
# return;
########################################################
# Read one or more DWORDs from SUMP Nibble Ctrl Address
# if address None - don't change from existing Address
#def sump_rd( self, addr, num_dwords = 1):
# if ( addr != None ):
# self.bd.wr( self.sump_ctrl, [ addr ] );
# return self.bd.rd( self.sump_data, num_dwords, repeat = True);
########################################################
# This is for removing an item from the popup list. It
# handles going down a hierarchy level into a sublist
def list_remove( my_list, item ):
try:
my_list.remove( item );
except:
None;
for each in my_list:
if ( type( each ) == list ):
try:
each.remove( item );
except:
None;
return;
########################################################
# Establish connection to Sump2 hardware
def sump_connect( self ):
log( self, ["sump_connect()"] );
self.bd=Backdoor( self.vars["bd_server_ip"],
int( self.vars["bd_server_socket"], 10 ) );# Note dec
if ( self.bd.sock == None ):
txt = "ERROR: Unable to locate BD_SERVER";
self.fatal_msg = txt;
print( txt );
log( self, [ txt ] );
return False;
self.sump = Sump2( self.bd, int( self.vars["sump_addr"],16 ) );
self.sump.rd_cfg();# populate sump.cfg_dict[] with HW Configuration
if ( self.sump.cfg_dict['hw_id'] != 0xABBA ):
txt = "ERROR: Unable to locate SUMP Hardware";
self.fatal_msg = txt;
print( txt );
log( self, [ txt ] );
return False;
# HERE200
# Adjust the GUI menu to remove features that don't exist in this hardware
if ( self.sump.cfg_dict['nonrle_dis'] == 1 ):
list_remove( self.popup_list_values, "Acquire_Normal");
list_remove( self.popup_list_values, "Acquire_Single");
list_remove( self.popup_list_values, "Acquire_Continuous");
if ( self.sump.cfg_dict['rle_en'] == 0 ):
self.popup_list_values.remove("Acquire_RLE_1x");
self.popup_list_values.remove("Acquire_RLE_8x");
self.popup_list_values.remove("Acquire_RLE_64x");
if ( self.sump.cfg_dict['trig_wd_en'] == 0 ):
list_remove( self.popup_list_names, "Trigger_Watchdog");
list_remove( self.popup_list_names, "sump_watchdog_time");
if ( self.sump.cfg_dict['data_en'] == 0 ):
self.popup_list_names.remove("Set_Data_Enable");
self.popup_list_names.remove("Clear_Data_Enable");
if ( self.sump.cfg_dict['pattern_en'] == 0 ):
self.popup_list_names.remove("Set_Pattern_0");
self.popup_list_names.remove("Set_Pattern_1");
self.popup_list_names.remove("Clear_Pattern_Match");
if ( self.sump.cfg_dict['trig_nth_en'] == 0 ):
list_remove( self.popup_list_names, "sump_trigger_nth");
if ( self.sump.cfg_dict['trig_dly_en'] == 0 ):
list_remove( self.popup_list_names, "sump_trigger_delay");
sump_size = self.sump.cfg_dict['ram_len'];
self.sump.wr( self.sump.cmd_wr_user_ctrl, 0x00000000 );
self.sump.wr( self.sump.cmd_wr_watchdog_time, 0x00001000 );
self.sump.wr( self.sump.cmd_wr_user_pattern0, 0x000FFFFF );# Pattern Mask
self.sump.wr( self.sump.cmd_wr_user_pattern1, 0x000055FF );# Pattern
self.sump.wr( self.sump.cmd_wr_trig_type, self.sump.trig_pat_ris );
self.sump.wr( self.sump.cmd_wr_trig_field, 0x00000000 );#
self.sump.wr( self.sump.cmd_wr_trig_dly_nth, 0x00000001 );#Delay + nTh
# self.sump.wr( self.sump.cmd_wr_trig_position, sump_size/2);#SamplesPostTrig
self.sump.wr( self.sump.cmd_wr_trig_position, sump_size//2);#SamplesPostTrig
# self.sump.wr( self.sump.cmd_wr_rle_event_en, 0xFFFFFFF0 );#RLE event en
self.sump.wr( self.sump.cmd_wr_rle_event_en, 0xFFFFFFFF );#RLE event en
self.sump.wr( self.sump.cmd_state_reset, 0x00000000 );
# self.sump.wr( self.sump.cmd_state_arm, 0x00000000 );
return True;
########################################################
# Talk to sump2 hardware and arm for acquisition ( or dont )
# determining the BRAM depth.
# HERE2
def sump_arm( self, en ):
log( self, ["sump_arm()"]);
if ( en == True ):
try:
trig_type = self.vars["sump_trigger_type" ];
trig_field = int( self.vars["sump_trigger_field" ],16 );
rle_event_en = int( self.vars["sump_rle_event_en" ],16 );
trig_delay = int( self.vars["sump_trigger_delay" ],16 );
trig_nth = int( self.vars["sump_trigger_nth" ],16 );
data_en = int( self.vars["sump_data_enable" ],16 );
user_ctrl = int( self.vars["sump_user_ctrl" ],16 );
user_pattern0 = int( self.vars["sump_user_pattern0" ],16 );
user_pattern1 = int( self.vars["sump_user_pattern1" ],16 );
wd_time = int( self.vars["sump_watchdog_time" ],16 );
# Convert trigger ASCII into integers
if ( trig_type == "or_rising" ):
trig_type_int = self.sump.trig_or_ris;
elif ( trig_type == "or_falling" ):
trig_type_int = self.sump.trig_or_fal;
elif ( trig_type == "watchdog" ):
trig_type_int = self.sump.trig_watchdog;
elif ( trig_type == "pattern_rising" ):
trig_type_int = self.sump.trig_pat_ris;
else:
trig_type_int = 0;
# Pack 16bit trig_delay and trig_nth into single dword
trig_dly_nth = ( trig_delay << 16 ) + ( trig_nth << 0 );
if ( trig_dly_nth == 0x0 ):
print("WARNING: trig_nth is ZERO!!");
print("%08x" % trig_type_int );
print("%08x" % trig_field );
print("%08x" % trig_dly_nth );
print("%08x" % data_en );
print("%08x" % user_ctrl );
print("%08x" % user_pattern0 );
print("%08x" % user_pattern1 );
self.sump.wr( self.sump.cmd_wr_trig_type , trig_type_int );
self.sump.wr( self.sump.cmd_wr_trig_field, trig_field );
self.sump.wr( self.sump.cmd_wr_trig_dly_nth, trig_dly_nth );
self.sump.wr( self.sump.cmd_wr_rle_event_en, rle_event_en );
self.sump.wr( self.sump.cmd_wr_user_data_en, data_en );
self.sump.wr( self.sump.cmd_wr_user_ctrl , user_ctrl);
self.sump.wr( self.sump.cmd_wr_watchdog_time, wd_time );
self.sump.wr( self.sump.cmd_wr_user_pattern0, user_pattern0);
self.sump.wr( self.sump.cmd_wr_user_pattern1, user_pattern1);
self.sump.wr( self.sump.cmd_state_reset, 0x00000000 );
self.sump.wr( self.sump.cmd_state_arm, 0x00000000 );
except:
print("ERROR: Unable to convert sump variables to hex");
else:
self.sump.wr( self.sump.cmd_state_reset, 0x00000000 );
return;
# self.trig_and_ris = 0x00;# Bits AND Rising
# self.trig_and_fal = 0x01;# Bits AND Falling
# self.trig_or_ris = 0x02;# Bits OR Rising
# self.trig_or_fal = 0x03;# Bits OR Falling
# self.trig_pat_ris = 0x04;# Pattern Match Rising
# self.trig_pat_fal = 0x05;# Pattern Match Falling
# self.trig_in_ris = 0x06;# External Input Trigger Rising
# self.trig_in_fal = 0x07;# External Input Trigger Falling
# self.cmd_wr_trig_type = 0x04;
# self.cmd_wr_trig_field = 0x05;# Correspond to Event Bits
# self.cmd_wr_trig_dly_nth = 0x06;# Trigger Delay and Nth
# self.cmd_wr_trig_position = 0x07;# Samples post Trigger to Capture
# self.cmd_wr_rle_event_en = 0x08;# Enables events for RLE detection
# self.cmd_wr_ram_ptr = 0x09;# Load specific pointer.
# self.cmd_wr_ram_page = 0x0a;# Load DWORD Page.
# self.cmd_rd_hw_id_rev = 0x0b;
# self.cmd_rd_ram_width_len = 0x0c;
# self.cmd_rd_sample_freq = 0x0d;
# self.cmd_rd_trigger_ptr = 0x0e;
# self.cmd_rd_ram_data = 0x0f;
# self.cmd_wr_user_ctrl = 0x10;
# self.cmd_wr_user_pattern0 = 0x11;# Also Mask for Pattern Matching
# self.cmd_wr_user_pattern1 = 0x12;# Also Pattern for Pattern Matching
# self.cmd_wr_user_data_en = 0x13;# Special Data Enable Capture Mode
########################################################
# Dump acquired data to a file. This is a corner turn op
def sump_save_txt( self, file_name, mode_vcd = False ):
log( self, ["sump_save_txt()"]);
print("sump_save_txt()");
ram_dwords = self.sump.cfg_dict['ram_dwords'];
ram_bytes = self.sump.cfg_dict['ram_event_bytes'];
ram_len = self.sump.cfg_dict['ram_len'];
events = ram_bytes * 8;
# if ( mode_vcd == True ):
# file_name = "sump_dump.txt4vcd";
# else:
# file_name = "sump_dump.txt";
file_out = open( file_name, 'w' );
if ( mode_vcd == False ):
name_str = "#";
nickname_str = "#";
else:
name_str = "";
nickname_str = "";
percent = 0;
percent_total = ((1.0)*self.max_samples );
print("max_samples = " + str( self.max_samples ) );
for i in range( 0, self.max_samples, 1):
# This takes a while, so calculate and print percentage as it goes by
if ( ((i*1.0) / percent_total) > percent ):
perc_str = ( str( int(100*percent) ) + "%");
draw_header( self, "VCD Conversion " + perc_str );
percent += .01;
txt_str = "";
m = 0;
# Iterate the list searching for all the events in binary order
for j in range( ram_bytes*8, 0, -1):
for sig_obj in self.signal_list:
if ( sig_obj.name == "event[%d]" % (j-1) and sig_obj.hidden == False ):
txt_str += sig_obj.values[i];
m +=1;
if ( m == 8 or ( m == 1 and mode_vcd == True ) ):
txt_str += " ";# Add whitespace between each byte group
m = 0;
if ( i == 0 ):
name_str += sig_obj.name + " ";
if ( sig_obj.nickname != "" ):
nickname_str += sig_obj.nickname + " ";
else:
nickname_str += sig_obj.name + " ";
if ( mode_vcd == False ):
txt_str += " ";# Add whitespace between events and dwords
# Iterate the list searching for all the dwords in order
for j in range( 0, ram_dwords, 1 ):
for sig_obj in self.signal_list:
if ( sig_obj.name == "dword[%d]" % j and sig_obj.hidden == False ):
if ( i >= len( sig_obj.values )):
txt_str += "XXXXXXXX";
else:
txt_str += sig_obj.values[i];
txt_str += " ";# Add whitespace between each dword
if ( i == 0 ):
name_str += sig_obj.name + " ";
nickname_str += sig_obj.nickname + " ";
# print txt_str;# This line is a time sample for all signals
if ( i == 0 ):
freq_mhz = self.sump.cfg_dict['frequency'];
freq_ps = 1000000.0 / freq_mhz;
file_out.write( nickname_str + " " + ("%f" % freq_ps ) + "\n" );
file_out.write( txt_str + "\n" );
file_out.close();
return;
########################################################
# Dump acquired data to a file
def sump_save_vcd( self ):
# print("ERROR: sump_save_vcd() does not yet exist!");
return;
def refresh( self ):
if ( self.mode_cli == False ):
import pygame;
pygame.event.get();# Avoid "( Not Responding )"
pygame.display.update();
return;
#########################################################################
# Dump acquired data from SUMP engine and merge with existing signal list
def sump_dump_data( self ):
log( self, ["sump_dump_data()"]);
ram_dwords = self.sump.cfg_dict['ram_dwords'];
ram_bytes = self.sump.cfg_dict['ram_event_bytes'];
ram_rle = self.sump.cfg_dict['ram_rle'];
# ram_len = self.sump.cfg_dict['ram_len'];
( ram_pre, ram_post, ram_len, ram_phys ) = sump_ram_len_calc(self);
events = ram_bytes * 8;# Example, 32 events total for 4 ram_bytes
self.dwords_start = 0;
self.dwords_stop = ram_phys;
# Event Signals
rd_page = 0;
dump_data = sump_dump_var_ram(self,rd_page = rd_page );
for i in range( 0, events, 1 ):
txt = ("Event %d of %d" % ( i+1, events ) );
draw_header( self, "sump_dump_data() " + txt);
refresh( self );
# Iterate the list of signals and find one with correct physical name
my_signal = None;
for each_signal in self.signal_list:
if ( each_signal.name == "event[%d]" % i ):
my_signal = each_signal;
if ( my_signal != None ):
my_signal.values = [];
my_signal.format = "bin";
my_signal.bits_total = 1;
my_signal.bit_top = 0;
my_signal.bit_bot = 0;
bit_val = (1 << i );
for j in range( 0, ram_len, 1):
if ( ( dump_data[j] & bit_val ) != 0x0 ):
bit = "1";
else:
bit = "0";
my_signal.values.append( bit );
# DWORD Signals
for i in range( 0, ram_dwords , 1 ):
txt = ("DWORD %d" % i );
txt = ("DWORD %d of %d" % ( i+1, ram_dwords ) );
draw_header( self, "sump_dump_data() " + txt);
refresh(self);
dump_data = sump_dump_var_ram(self, rd_page = ( 0x10 + i ) );
# Iterate the list of signals and find one with correct physical name
my_signal = None;
for each_signal in self.signal_list:
if ( each_signal.name == "dword[%d]" % i ):
my_signal = each_signal;
if ( my_signal != None ):
my_signal.values = [];
my_signal.format = "hex";
my_signal.bits_total = 32;
my_signal.bit_top = 31;
my_signal.bit_bot = 0;
for j in range( 0, ram_len, 1):
my_signal.values.append( "%08x" % dump_data[j] );
sump_bundle_data( self );
recalc_max_samples( self );
trig_i = (self.max_samples // 2);# Trigger fixed at 50/50 for now
return trig_i;
#########################################################################
# Search the signal list for any type bundles and calculate their sample
# values based on their children
def sump_bundle_data( self ):
my_signal = None;
for each_signal in self.signal_list:
if ( my_signal != None ):
if ( each_signal.hier_level > my_level ):
rip_list += [ each_signal.values ];
else:
value_list = zip( *rip_list );
my_signal.values = [];
my_signal.bit_top = len( rip_list )-1;
my_signal.bit_bot = 0;
my_signal.bits_total = my_signal.bit_top + 1;
for each_sample in value_list:
bit = 0;
for (i,each_bit) in enumerate ( each_sample ):
if ( each_bit == "1" ):
bit += ( 1 << i );
my_signal.values += [ "%x" % bit ];
my_signal = None;
if ( each_signal.type == "bundle" ):
my_signal = each_signal;
my_level = my_signal.hier_level;
rip_list = [];
return;
#########################################################################
# Dump acquired data from SUMP engine and merge with existing signal list
def sump_dump_rle_data( self ):
print("sump_dump_rle_data()");
log( self, ["sump_dump_rle_data()"]);
ram_dwords = self.sump.cfg_dict['ram_dwords'];
ram_bytes = self.sump.cfg_dict['ram_event_bytes'];
ram_rle = self.sump.cfg_dict['ram_rle'];
rle_pre_trig_len = self.vars["sump_rle_pre_trig_len" ];
rle_post_trig_len = self.vars["sump_rle_post_trig_len" ];
trig_delay = int( self.vars["sump_trigger_delay" ],16 );
# self.undersample_rate = int(self.vars["sump_rle_undersample" ],16);
# if ( self.acq_state == "acquire_rle_undersampled" ):
# self.undersample_data = True;
if ( self.acq_state == "acquire_rle_1x" ):
self.undersample_data = False;
self.undersample_rate = 1;
elif ( self.acq_state == "acquire_rle_4x" ):
self.undersample_data = True;
self.undersample_rate = 4;
elif ( self.acq_state == "acquire_rle_8x" ):
self.undersample_data = True;
self.undersample_rate = 8;
elif ( self.acq_state == "acquire_rle_16x" ):
self.undersample_data = True;
self.undersample_rate = 16;
elif ( self.acq_state == "acquire_rle_64x" ):
self.undersample_data = True;
self.undersample_rate = 64;
rle_pre_trig_len *= self.undersample_rate;
rle_post_trig_len *= self.undersample_rate;
# print("##");
# print( rle_pre_trig_len );
# print( rle_post_trig_len );
# ram_len = self.sump.cfg_dict['ram_len'];
( ram_pre, ram_post, ram_len, ram_phys ) = sump_ram_len_calc(self);
events = ram_bytes * 8;# Example, 32 events total for 4 ram_bytes
# Event Signals
rd_page = 0;
print("sump_dump_ram( rle_data )");
rle_data = sump_dump_ram(self,rd_page = 0x2, rd_ptr = 0x0000 );
print("sump_dump_ram( rle_time )");
rle_time = sump_dump_ram(self,rd_page = 0x3, rd_ptr = 0x0000 );
rle_list = list(zip( rle_time, rle_data ));
# print("Oy");
# print( len(rle_time ) );
# print( len(rle_data ) );
# print( len(rle_list ) );
print("process_rle()");
(start_t,stop_t, pre_trig, post_trig ) = process_rle(self,rle_list);
# print("start_time = %08x" % start_t );
# print("stop_time = %08x" % stop_t );
# if ( ( stop_t - start_t ) > 0x00100000 ):
# if ( ( stop_t - start_t ) > 0x01000000 ):
# print("ERROR: Time span is too large");
# shutdown( self );
rle_hex_list = [];
for ( rle_time, rle_data ) in ( pre_trig + post_trig ):
rle_hex_list += [ ("%08x %08x" % ( rle_time, rle_data ) )];
list2file( self, "sump2_rle_dump.txt", rle_hex_list );
print("expand_rle()");
(dump_data,trig_i) = expand_rle( self, start_t,stop_t,pre_trig,post_trig );
# print( len( dump_data ) );
print("Generating RLE Event Signal List of values");
for i in range( 0, events, 1 ):
txt = ("Event %d of %d" % ( i+1, events ) );
draw_header( self, "sump_dump_rle_data() " + txt);
refresh( self );
# Iterate the list of signals and find one with correct physical name
my_signal = None;
for each_signal in self.signal_list:
if ( each_signal.name == "event[%d]" % i ):
my_signal = each_signal;
if ( my_signal != None ):
my_signal.values = [];
my_signal.format = "bin";
my_signal.bits_total = 1;
my_signal.bit_top = 0;
my_signal.bit_bot = 0;
bit_val = (1 << i );
if ( my_signal.hidden == False ):
for j in range( 0, len( dump_data ) , 1):
if ( ( dump_data[j] & bit_val ) != 0x0 ):
bit = "1";
else:
bit = "0";
my_signal.values.append( bit );
if ( self.undersample_data == True ):
rle_undersample_signal( self, self.undersample_rate, my_signal );
# Align non-RLE dword data with the RLE samples by calculating Null samples
# before and after trigger event
# | T | : RLE dump_data
# | pre_pad | dword_data | post_pad |
pre_pad = ( trig_delay + 2 + trig_i - ram_phys//2) * \
[ "XXXXXXXX" ];
post_pad = ( len( dump_data ) - len( pre_pad ) - ram_phys - trig_delay ) * \
[ "XXXXXXXX"];
# Remember where DWORDs are within RLE samples and use to speed up rendering
# by not bothering with DWORDs if outside of current view.
self.dwords_start = len(pre_pad);
self.dwords_stop = self.dwords_start + ram_phys;
if ( self.undersample_data == False ):
print("Generating RLE DWORD Signal List of values");
# DWORD Signals. Just Null out all samples since RLE acquisition
trig_ptr = self.sump.rd( self.sump.cmd_rd_trigger_ptr )[0];
ram_ptr = 0xFFFF & (trig_ptr - ram_phys//2 );
for i in range( 0, ram_dwords , 1 ):
txt = ("DWORD %d of %d" % ( i+1, ram_dwords ) );
draw_header( self, "sump_dump_rle_data() " + txt);
refresh( self );
dump_data = sump_dump_ram(self,rd_page = (0x10+i), rd_ptr = ram_ptr );
# Iterate the list of signals and find one with correct physical name
my_signal = None;
for each_signal in self.signal_list:
if ( each_signal.name == "dword[%d]" % i ):
my_signal = each_signal;
if ( my_signal != None ):
my_signal.values = pre_pad[:];
my_signal.format = "hex";
my_signal.bits_total = 32;
my_signal.bit_top = 31;
my_signal.bit_bot = 0;
for j in range( 0, ram_phys, 1):
my_signal.values.append( "%08x" % dump_data[j] );
my_signal.values += post_pad;
# Undersampling Events, so just create NULL DWORDs
else:
for i in range( 0, ram_dwords , 1 ):
my_signal = None;
for each_signal in self.signal_list:
if ( each_signal.name == "dword[%d]" % i ):
my_signal = each_signal;
if ( my_signal != None ):
my_signal.values = [];
my_signal.format = "hex";
my_signal.bits_total = 32;
my_signal.bit_top = 31;
my_signal.bit_bot = 0;
# Note: This doesn't work the best, so disabling
if ( False ):
print("Culling excess RLE sample pre and post trigger");
# Cull samples to max pre and post trig lengths to keep display usable
rle_pre_trig_len = int( self.vars["sump_rle_pre_trig_len" ],16);
rle_post_trig_len = int( self.vars["sump_rle_post_trig_len" ],16);
total_samples = len(pre_pad ) + ram_phys + len( post_pad );
pre_trig = trig_i;
post_trig = total_samples - trig_i;
start_ptr = 0;
stop_ptr = -1;
if ( pre_trig > rle_pre_trig_len ):
start_ptr = trig_i - rle_pre_trig_len;
if ( post_trig > rle_post_trig_len ):
stop_ptr = trig_i + rle_post_trig_len;
for i in range( 0, events, 1 ):
my_signal = None;
for each_signal in self.signal_list:
if ( each_signal.name == "event[%d]" % i ):
my_signal = each_signal;
if ( my_signal != None ):
my_signal.values = my_signal.values[start_ptr:stop_ptr];
for i in range( 0, ram_dwords , 1 ):
my_signal = None;
for each_signal in self.signal_list:
if ( each_signal.name == "dword[%d]" % i ):
my_signal = each_signal;
if ( my_signal != None ):
my_signal.values = my_signal.values[start_ptr:stop_ptr];
sump_bundle_data( self );
recalc_max_samples( self );
return trig_i;
def rle_undersample_signal( self, undersample_rate, my_signal ):
print("rle_undersample_signal()");
val = "0";
new_values = [];
i = 0;
for each in my_signal.values:
if ( each == "1" ):
val = "1";
i +=1;
if ( i == undersample_rate ):
i = 0;
new_values += [val];
val = "0";
my_signal.values = new_values[:];
return;
# Given a RLE compressed list, expand to regular time sample list.
# Return the list and the index location of the trigger
def expand_rle( self, start_t,stop_t,pre_trig,post_trig ):
i = start_t;
j = 0;
rle_list = pre_trig + post_trig;
trigger_index = 0;
# ( trigger_time, trigger_data ) = pre_trig[-2];
# print("RLE TRIGGER Compressed-2 %08x " % ( trigger_data ) );
( trigger_time, trigger_data ) = pre_trig[-1];
print("RLE TRIGGER Compressed %08x " % ( trigger_data ) );
sample_list = [];
( rle_time, rle_data ) = rle_list[j];
hold_data = rle_data;
sample_list += [ hold_data ];# Add old sample
old_rle_time = 0;
j +=1;
( rle_time, rle_data ) = rle_list[j];
while ( i <= stop_t and j < (len(rle_list)-1) ):
if ( i < rle_time ):
sample_list += [ hold_data ];# Add old sample
else:
sample_list += [ rle_data ];# Add the new sample
hold_data = rle_data;
j +=1;
old_rle_time = rle_time;
( rle_time, rle_data ) = rle_list[j];
# if ( rle_time == trigger_time ):
# trigger_index = len( sample_list )-1;
# if ( old_rle_time == trigger_time ):
# trigger_index = len( sample_list );
if ( i == trigger_time ):
trigger_index = len( sample_list )-1;
# print("RLE TRIGGER Decompressed %08x " % ( sample_list[trigger_index] ) );
i+=1;
print("RLE TRIGGER Decompressed %08x " % ( sample_list[trigger_index] ) );
return ( sample_list, trigger_index );
# Example RLE List
# 000007ff 0000000d
# 00000800 0000000d 2nd to last of pre-trig
# 1d4c4ad3 0000000b Last item of pre-trig
# 1d4c4ad4 0000000b 1st item of post-trig
def process_rle( self, rle_list ):
ln = len( rle_list ) // 2;# Size of pre and post lists
pre_list = list(rle_list[0:ln]);
post_list = list(rle_list[ln:]);
culls = [];
# tuplelist2file( self, "rle_prelist1.txt", pre_list );
# Figure out oldest RLE sample pre-trigger and then rotate list
start_time = 0xFFFFFFFF;
i = 0;
for ( rle_time, rle_data ) in pre_list:
# print("%08x %08x" % ( rle_time, start_time) );
if ( rle_time < start_time ):
start_time = rle_time;
n = i;# Location of oldest RLE sample found thus far
i +=1;
pre_list = rotate_list(self,pre_list, n );
print("RLE pre_list %08x %08x" % ( pre_list[-1] ) );
# tuplelist2file( self, "rle_prelist2.txt", pre_list );
# ini defines hard limits of how many uncompressed samples pre and post trig
rle_pre_trig_len = int(self.vars["sump_rle_pre_trig_len" ],16);
rle_post_trig_len = int(self.vars["sump_rle_post_trig_len" ],16);
# Now scale limits based on the sump_acqusition_len setting 25,50,75,100
acq_len = int(self.vars["sump_acquisition_len"],16);
pre_trig = (acq_len & 0xF0)>>4;# Expect 1-4 for 25%-100% of 1st RAM Half
post_trig = (acq_len & 0x0F)>>0;# Expect 1-4 for 25%-100% of 2nd RAM Half
rle_pre_trig_len = ( rle_pre_trig_len // 4 ) * pre_trig; # Div-4, Mult 1-4
rle_post_trig_len = ( rle_post_trig_len // 4 ) * post_trig;# Div-4, Mult 1-4
# Cull any non-events pre and post trigger. Non-events are when the HW
# generates a simple as a MSB timer bit has rolled over. This feature
# prevents hardware from hanging forever if there are no events.
if ( False ):
pre_list_old = pre_list[:];
(first_time,first_data ) = pre_list[0];
pre_list = [];
valid = False;
prev_time = None;
for ( rle_time, rle_data ) in list(pre_list_old):
if ( rle_data != first_data and valid == False ):
valid = True;
if ( prev_time != None ):
# If space between 1st and 2nd RLE samples is large, cull down to 1000
if ( ( rle_time - prev_time ) < 1000 ):
pre_list += [ (prev_time,prev_data) ];# Keep sample before 1st delta
else:
pre_list += [ ((rle_time-1000),prev_data)];# sample before 1st delta
if ( valid == True ):
pre_list += [ (rle_time,rle_data) ];
else:
prev_time = rle_time;
prev_data = rle_data;
if ( len( pre_list ) == 0 ):
pre_list = [ pre_list_old[-1] ];
# Cull any samples outside the sump_rle_pre_trig_len
(trig_time,trig_data ) = pre_list[-1];
pre_list_old = pre_list[:];
pre_list = [];
for ( rle_time, rle_data ) in list(pre_list_old):
if ( rle_time > ( trig_time - rle_pre_trig_len ) ):
pre_list += [ (rle_time,rle_data) ];
culls+=[("+ %08x %08x %08x %08x" %
(rle_time,trig_time,rle_pre_trig_len,rle_data))];
else:
culls+=[("< %08x %08x %08x %08x" %
(rle_time,trig_time,rle_pre_trig_len,rle_data))];
if ( len( pre_list ) == 0 ):
pre_list = [ pre_list_old[-1] ];
stop_time = 0x00000000;
i = 0;
for ( rle_time, rle_data ) in post_list:
if ( rle_time > stop_time ):
stop_time = rle_time;
n = i;# Location of newest RLE sample found thus far
i +=1;
# Cull any samples outside the sump_rle_post_trig_len
post_list_old = post_list[:];
post_list = [];
for ( rle_time, rle_data ) in list(post_list_old):
if ( rle_time < ( trig_time + rle_post_trig_len ) ):
post_list += [ (rle_time,rle_data) ];
culls+=[("+ %08x %08x %08x %08x" %
(rle_time,trig_time,rle_post_trig_len,rle_data))];
else:
culls+=[("> %08x %08x %08x %08x" %
(rle_time,trig_time,rle_post_trig_len,rle_data))];
if ( len( post_list ) == 0 ):
post_list = [ post_list_old[0] ];
(start_time,start_data ) = pre_list[0];
(stop_time,stop_data ) = post_list[-1];
list2file( self, "sump2_rle_cull_list.txt", culls );
return ( start_time , stop_time, pre_list, post_list );
def rotate_list( self, my_list, n ):
return ( my_list[n:] + my_list[:n] );
########################################################
# Calculate desired ram length pre,post trig to work with
def sump_ram_len_calc( self ):
ram_len = self.sump.cfg_dict['ram_len'];# Physical RAM Size, ie 1K
acq_len = int(self.vars["sump_acquisition_len"],16);
pre_trig = (acq_len & 0xF0)>>4;# Expect 1-4 for 25%-100% of 1st RAM Half
post_trig = (acq_len & 0x0F)>>0;# Expect 1-4 for 25%-100% of 2nd RAM Half
ram_len_half = ram_len // 2;
qtr = ram_len_half // 4;# Example 128 of 1K/2
ram_pre = qtr * pre_trig; # 25,50,75 or 100% num samples pre-trig
ram_post = qtr * post_trig;# 25,50,75 or 100% num samples post-trig
return [ ram_pre, ram_post, ( ram_pre+ram_post ), ram_len ];
########################################################
# Return a list of acquired SUMP capture data using variable length
def sump_dump_var_ram( self, rd_page = 0 ):
# HERE12
( ram_pre, ram_post, ram_len, ram_phys ) = sump_ram_len_calc(self);
trig_ptr = self.sump.rd( self.sump.cmd_rd_trigger_ptr )[0];
ram_ptr = 0xFFFF & (trig_ptr - ram_pre - 1);
self.sump.wr( self.sump.cmd_wr_ram_page, rd_page );
self.sump.wr( self.sump.cmd_wr_ram_ptr , ram_ptr );# Load at specfd pre-trig
data = self.sump.rd( self.sump.cmd_rd_ram_data, num_dwords = ram_len );
return data;
########################################################
# Return a complete list of acquired SUMP capture data
def sump_dump_ram( self, rd_page = 0, rd_ptr = None ):
ram_len = self.sump.cfg_dict['ram_len'];
self.sump.wr( self.sump.cmd_wr_ram_page, rd_page );
if ( rd_ptr != None ):
self.sump.wr( self.sump.cmd_wr_ram_ptr , rd_ptr );#
data = self.sump.rd( self.sump.cmd_rd_ram_data, num_dwords = ram_len );
return data;
########################################################
# Use the wave_list to generate a new signal_list
# HERE
#def wave2signal_list( self ):
# ram_len = self.sump.cfg_dict['ram_len'];
# ram_dwords = self.sump.cfg_dict['ram_dwords'];
# ram_bytes = self.sump.cfg_dict['ram_event_bytes'];
# ram_rle = self.sump.cfg_dict['ram_rle'];
#
# events = ram_bytes * 8;
# # Iterate the number of event bits and init with 0s
# for i in range( 0, events , 1):
# sig_name = "event_%d" % i;
# self.signal_list.append( signal(name=sig_name) );
# self.signal_list[i].format = "bin";
# self.signal_list[i].bits_total = 1;
# self.signal_list[i].bit_top = 0;
# self.signal_list[i].bit_bot = 0;
# for j in range( 0, ram_len, 1):
# self.signal_list[i].values.append( "0" );
#
# # Iterate the number of dwords and init with 0x0s
# for i in range( 0, ram_dwords, 1):
# sig_name = "dword_%d" % i;
# self.signal_list.append( signal(name=sig_name) );
# self.signal_list[events+i].format = "hex";
# self.signal_list[events+i].bits_total = 32;
# self.signal_list[events+i].bit_top = 31;
# self.signal_list[events+i].bit_bot = 0;
# for j in range( 0, ram_len, 1):
# self.signal_list[events+i].values.append( "%08x" % 0 );
#
# return;
########################################################
# Read values of sump vars and use to update signal objects trigger attrib
def sump_vars_to_signal_attribs( self ):
# try:
if ( True ):
trig_type = self.vars["sump_trigger_type" ];# "or_rising";
trig_field = int( self.vars["sump_trigger_field" ],16 );
trig_delay = int( self.vars["sump_trigger_delay" ],16 );
trig_nth = int( self.vars["sump_trigger_nth" ],16 );
rle_event_en = int( self.vars["sump_rle_event_en" ],16 );
data_en = int( self.vars["sump_data_enable" ],16 );
user_ctrl = int( self.vars["sump_user_ctrl" ],16 );
wd_time = int( self.vars["sump_watchdog_time" ],16 );
user_pattern0 = int( self.vars["sump_user_pattern0" ],16 );
user_pattern1 = int( self.vars["sump_user_pattern1" ],16 );
# self.trigger = 0;# 0=OFF +1=Rising,-1=Falling,2=Pattern0,3=Pattern1
# self.data_enable = False;
# Note: If trigger_type is pattern_ris or pattern_fal then
# user_pattern0 is the mask of what bits to pattern match on
# user_pattern1 is the actual pattern bits
# rle_event_en controls the Hidden field
for i in range( 0, 32 , 1):
sig_obj = get_sig_obj_by_name( self, ("event[%d]" % i ) );
if ( sig_obj != None ):
sig_obj.hidden = False;
if ( ( rle_event_en & 1<<i ) != 0x0 ):
sig_obj.hidden = False;
else:
sig_obj.hidden = True;
# Clear everything to start with. Set any data_en bits
for i in range( 0, 32 , 1):
sig_obj = get_sig_obj_by_name( self, ("event[%d]" % i ) );
if ( sig_obj != None ):
sig_obj.trigger = 0; # OFF
if ( ( data_en & 1<<i ) != 0x0 ):
sig_obj.data_enable = True;
else:
sig_obj.data_enable = False;
# Set any Rising or Falling edge trigger selection ( 1 only )
if ( trig_type == "or_rising" or trig_type == "or_falling" or
trig_type == "watchdog" ):
if ( trig_type == "or_rising" ):
trig = +1;
if ( trig_type == "or_falling" ):
trig = -1;
if ( trig_type == "watchdog" ):
trig = -2;
for i in range( 0, 32 , 1):
sig_obj = get_sig_obj_by_name( self, ("event[%d]" % i ) );
if ( ( 1<<i & trig_field ) != 0x0 ):
sig_obj.trigger = trig;
if ( trig_type == "pattern_rising" or trig_type == "pattern_falling" ):
for i in range( 0, 32 , 1):
sig_obj = get_sig_obj_by_name( self, ("event[%d]" % i ) );
if ( ( 1<<i & user_pattern0 ) != 0x0 ):
if ( ( 1<<i & user_pattern1 ) != 0x0 ):
sig_obj.trigger = 3;# Pattern of 1 for this bit
else:
sig_obj.trigger = 2;# Pattern of 0 for this bit
# except:
# print("ERROR: Invalid sump variable assignments");
return;
########################################################
# Read signal attributes and convert to sump variables
def sump_signals_to_vars( self ):
if ( True ):
rle_event_en = int( self.vars["sump_rle_event_en"],16 );
for sig_obj in self.signal_list:
for i in range( 0, 32 , 1):
if ( sig_obj.name == ( "event[%d]" % i ) ):
if ( sig_obj.hidden == False and sig_obj.visible == True ):
rle_event_en = rle_event_en | ( 1<<i );# Set bit
if ( sig_obj.hidden == True or sig_obj.visible == False ):
rle_event_en = rle_event_en & ~( 1<<i );# Clear bit
self.vars["sump_rle_event_en" ] = ("%08x" % rle_event_en );
return;
########################################################
# Use sump2 hardware info to generate a signal_list
def sump2signal_list( self ):
ram_len = self.sump.cfg_dict['ram_len'];
ram_dwords = self.sump.cfg_dict['ram_dwords'];
ram_bytes = self.sump.cfg_dict['ram_event_bytes'];
ram_rle = self.sump.cfg_dict['ram_rle'];
events = ram_bytes * 8;
# Iterate the number of event bits and init with 0s
for i in range( 0, events , 1):
# sig_name = "event_%d" % i;
sig_name = "event[%d]" % i;
self.signal_list.append( signal(name=sig_name) );
self.signal_list[i].format = "bin";
self.signal_list[i].bits_total = 1;
self.signal_list[i].bit_top = 0;
self.signal_list[i].bit_bot = 0;
for j in range( 0, ram_len, 1):
self.signal_list[i].values.append( "0" );
# Iterate the number of dwords and init with 0x0s
for i in range( 0, ram_dwords, 1):
sig_name = "dword_%d" % i;
self.signal_list.append( signal(name=sig_name) );
self.signal_list[events+i].format = "hex";
self.signal_list[events+i].bits_total = 32;
self.signal_list[events+i].bit_top = 31;
self.signal_list[events+i].bit_bot = 0;
for j in range( 0, ram_len, 1):
self.signal_list[events+i].values.append( "%08x" % 0 );
return;
########################################################
# Given a TXT file, make signal_list from it
# Format is:
# # foo bar addr
# 0 1 2
# 1 0 a
def txtfile2signal_list( self, file_name ):
# Read in the flat text VCD translation and make lists of net names
file_in = open ( file_name , 'r' );
file_list = file_in.readlines();
file_in.close();
net_names = file_list[0];
sig_values = file_list[1:];
self.sig_name_list = " ".join(net_names.split()).split(' ');
self.sig_name_list = self.sig_name_list[1:]; # Remove Leading #
self.sig_value_list = file_list[1:];
for each in self.sig_name_list[:]:
self.signal_list.append( signal(name=each) );
# Rip thru the value list ( of all sigs ) and extract one signal at a time
# 0 000000000 1 000000000 0 000000000 0 000000000 000000000 000000000
for ( i , sig_obj ) in enumerate( self.signal_list ):
self.signal_list[i].format = "bin"; # Assume Binary by default
self.signal_list[i].bits_total = 1;
for each in self.sig_value_list:
words = " ".join(each.split()).split(' ') + [None] * 20;
sig_obj.values.append( words[i] );
# If value other than 0 or 1 is found, declare this as hex
if ( words[i] != "0" and words[i] != "1" and words[i] != None ):
self.signal_list[i].format = "hex";
self.signal_list[i].bits_total = len( words[i] ) * 4;
self.signal_list[i].bit_top = self.signal_list[i].bits_total-1;
self.signal_list[i].bit_bot = 0;
return;
########################################################
# Given a VCD file, make signal_list from it
def vcdfile2signal_list( self, file_name ):
try: # Read the Input File and Separate the Header from Data
file_in = open( file_name , "r" );
file_lines = file_in.readlines();
file_in.close();
except:
print( "ERROR Input File: "+file_name );
print( "Possibly a Python MemoryError due to large file size");
self.signal_list = [];
self.rip_list = [];
self.rip_symbs = [];
self.top_module = "";
hier_list = [];
hier_name = "";
hier_level = 0;# +1 on 1st will be 0;
print( "vcdfile2signal_list() : Parsing VCD Symbol Definitions");
start_time = self.pygame.time.get_ticks();
for ( i , each ) in enumerate ( file_lines ):
words = each.strip().split() + [None] * 4; # Avoid IndexError
if ( words[0] == "$enddefinitions" ):
dump_vars_index = i; # Remember location to start Value Change Parsing
break; # Save time and don't process entire file
#####################################
# Check for Signal Symbol Definitions
# $var wire 1 * tx_data [15] $end
# 0 1 2 3 4 5 6
if ( words[0] == "$var" ):
type = words[1]; # ie "wire"
bits = int( words[2] ); # ie 32
symb = words[3]; # ie ","
name = words[4]; # ie "lb_addr"
rip = words[5]; # ie "[31:0]" or "$end" if single bit
sig_obj = signal( name=name, vcd_symbol=symb );
sig_obj.hier_name = hier_name;
sig_obj.hier_level = hier_level;
sig_obj.bits_total = bits; # ie 32
sig_obj.bit_top = bits-1; # ie 31
sig_obj.bit_bot = 0; # ie 0
if ( rip != "$end" ):
sig_obj.rip = rip;# [15:0] or [1] or ""
if ( bits > 1 or sig_obj.rip != "" ):
sig_obj.format = "hex";
else:
sig_obj.format = "bin";
# If a portion of a ripped bus and not [0], add to special rip_list
# otherwise, add to the regular signal_list
if (
( sig_obj.rip != "" ) and \
( ":" not in sig_obj.rip ) and \
( sig_obj.rip != "[0]" ) \
):
self.rip_list.append( sig_obj );
self.rip_symbs.append( symb );
else:
self.signal_list.append( sig_obj );
# Now also add "[0]" to rip_list ( It will appear in BOTH lists )
if ( sig_obj.rip == "[0]" ):
self.rip_list.append( sig_obj );
self.rip_symbs.append( symb );
#####################################
# Check for new hierarchy declaration
if ( words[0] == "$scope" and \
( words[1] == "module" or \
words[1] == "begin" ) \
):
if ( self.top_module == "" ):
self.top_module = words[2]; # ie "tb_dut"
print( "top_module is ", self.top_module);
name = words[2]; # ie "u_dut"
sig_obj = signal( name=name );
sig_obj.hier_name = hier_name;
sig_obj.hier_level = hier_level;
sig_obj.bits_total = 0;
sig_obj.bit_top = 0;
sig_obj.bit_bot = 0;
sig_obj.format = "";
self.signal_list.append( sig_obj );
sig_obj.collapsable = True;
sig_obj.expandable = False;
hier_list.append( words[2] );
rts = ""
for each in hier_list:
rts = rts + "/" + each;
hier_name = rts;
#####################################
# Adjust hier level on $scope or $upscope
if ( words[0] == "$scope" ):
hier_level += 1;
if ( words[0] == "$upscope" ):
hier_level -= 1;
if ( words[0] == "$scope" and words[1] == "begin" ):
hier_list.append( "process" );
if ( words[0] == "$upscope" ):
hier_list.pop(); # Remove last item from list
# Create a hash lookup of symbol to object index and bits to speed things up
hash_dict_index = {};
hash_dict_bits = {};
for ( i, sig_obj ) in enumerate( self.signal_list ):
# Need to make a list for symb lookup as clocks can reuse same symbol
if ( hash_dict_index.get( sig_obj.vcd_symbol, None ) == None ):
hash_dict_index[ sig_obj.vcd_symbol ] = [i];
else:
hash_dict_index[ sig_obj.vcd_symbol ].append( i );
hash_dict_bits[ sig_obj.vcd_symbol ] = sig_obj.bits_total;
# Go thru the rip_list and determine the number of bits for the busses
# This finds the parent in self.signal_list that matches the current
# rip from self.rip_list and adjusts the parents bits_total and bit_top
# if the rip's exceed the parent's old value. The parent will start with
# (1,1) since it is based on rip [0]
# Also create a hash to lookup the parent index for each rip symbol
hash_rip_list = {};
hash_rip_parent = {};
for (j,my_each) in enumerate( self.rip_list ):
name = my_each.name;
hier_name = my_each.hier_name;
rip = my_each.rip;
hash_rip_list[ my_each.vcd_symbol ] = j; # For Fast Lookup later
# Calculate the weight of each bit, ie [7] is 128
for foo_each in [ "[", "]" ]:
rip = rip.replace( foo_each , " "+foo_each+" " );
words = rip.strip().split() + [None] * 10; # Avoid IndexError
rip = int( words[1], 10 );
my_each.bit_weight = 2**rip;# Conv 7->128
if ( name != None and hier_name != None ):
for ( i, my2_each ) in enumerate( self.signal_list ):
if ( name == my2_each.name and \
hier_name == my2_each.hier_name ):
hash_rip_parent[ my_each.vcd_symbol ] = i; # For Fast Lookup later
if ( rip > my2_each.bit_top ):
my2_each.bits_total = rip+1;
my2_each.bit_top = rip;
symb_parse_list = ["!","#","$","&","'","K" ];
# Now Parse actual VCD section and try and figure out sample clock period
# by finding the smallest time delta across the entire VCD file
sample_period = 99999999;
prev_time = 0;
for ( i , each ) in enumerate ( file_lines[dump_vars_index:] ):
words = each.strip().split() + [None] * 4; # Avoid IndexError
if ( words[0][0:1] == "#" ):
now_time = int( words[0][1:],10 );
delta_time = now_time - prev_time;
if ( delta_time < sample_period and delta_time != 0):
sample_period = delta_time;
print( sample_period );
prev_time = now_time;
# Now Parse the actual VCD section and calculate current values for each
# signal at every time stamp section.
print( "vcdfile2signal_list() : Parsing VCD Value Change Dumps");
start_time = self.pygame.time.get_ticks();
percent = 0;
percent_total = ((1.0)*len( file_lines[dump_vars_index:] ) );
sample_cnt = 0;
for ( i , each ) in enumerate ( file_lines[dump_vars_index:] ):
# This takes a while, so calculate and print percentage as it goes by
if ( ((i*1.0) / percent_total) > percent ):
perc_str = ( str( int(100*percent) ) + "%");
draw_header( self, perc_str );
print( perc_str );
percent += .05;
# Handle binary cases for "1>" and convert to "1 >"
# If the 1st char is 0 or 1 insert a space to make look like vector
if ( each[0:1] == "0" or
each[0:1] == "1" or
each[0:1] == "x" or
each[0:1] == "z" ):
each = each[0:1] + " " + each[1:];
words = each.strip().split() + [None] * 4; # Avoid IndexError
symb = words[1];
# Skip the initial dumpvars section as nothing to dump yet
if ( words[0] == "#0" ):
None;
time_stamp = 0;
time_now = 0;
# When we reach a timestamp, append all last_value to values list
elif ( words[0][0:1] == "#" ):
time_stamp = int( words[0][1:], 10 );
while ( time_now <= time_stamp ):
for sig_obj in self.signal_list:
sig_obj.values.append( sig_obj.last_value );
sample_cnt += 1;# Count Total Samples for final report at end
time_now += sample_period;
# Read the symbols new value and assign to last_value
else:
if ( words[0][0:1]=="0" or
words[0][0:1]=="1" or
words[0][0:1]=="x" or
words[0][0:1]=="z" ):
value = words[0];
elif ( words[0][0:1] == "b" ):
try:
value = int( words[0][1:],2 );# Convert Binary String to Integer
if ( symb != None ):
num_bits = hash_dict_bits[ symb ];
num_nibs = int(num_bits/4.00 + 0.75 );# ie 29 bits gets 8 nibbles
else:
num_nibs = 1;
except:
value = 0;
num_nibs = 1;
value = "%08x" % value;# Now Convert Integer to Hex
value = value[::-1]; # Reverse
value = value[0:num_nibs]; # Keep desired number of LSBs
value = value[::-1]; # Reverse Back
elif ( words[0][0:1] == "$" ):
value = None;
else:
line_num = i + dump_vars_index + 1;
print( "ERROR line " + str(line_num) + " : " + words[0]);
value = None;
# Is symb in rip_list? If not, do normal processing
if ( symb not in self.rip_symbs ):
if ( value != None and symb != None ):
# Note: a symb might be used multiple times for clock ports, etc.
try:
for i in hash_dict_index[ symb ]:
self.signal_list[i].last_value = value;
except:
# print "VCD Symbol Error " + symb;
None;
# Oh SNAP - This is in the rip_list. Find obj for [0] ( Parent )
# and if 0, AND out bit_weight, if 1 OR in bit_weight.
# This op takes time since values are stored in ASCII, must convert to
# int, perform the bit operation and then convert back to ASCII.
else:
my_each = self.rip_list[ hash_rip_list[ symb ] ];
my2_each = self.signal_list[ hash_rip_parent[ symb ] ];
try:
last_value = int( my2_each.last_value, 16 );
except:
last_value = 0;
if ( value == "0" ):
last_value = last_value & ~ my_each.bit_weight;
elif ( value == "1" ):
last_value = last_value | my_each.bit_weight;
nibs = my2_each.bits_total//4;# ie 32 = 8, num nibs to display
new_value = "%016x" % last_value;# 16 Nibbles, remove leading next
my2_each.last_value = new_value[16-nibs:];# Remove leading 0s
stop_time = self.pygame.time.get_ticks();
tt = str( (stop_time - start_time) / 1000 ) + "s";
rate = str( sample_cnt / ((stop_time - start_time) * 1000 )) + " MSPS";
print( "vcdfile2signal_list() : Complete : Time " + tt +" : Rate " + rate);
draw_header( self, "" );
return;
def shutdown( self ):
log( self, ["shutdown()"]);
var_dump( self, "sump2.ini" ); # Dump all variable to INI file
proc_cmd( self, "save_format", [""] ); # Autosave the last format
if ( self.mode_cli == False ):
self.pygame.quit();# Be IDLE friendly
print("");
print("Thank you for using SUMP2 " + self.vers + " by BlackMesaLabs");
print("Please encourage the development and use of open-source software");
sys.exit();
return;
#def init_vars( self ):
# self.var_hash= {};
# self.var_hash["bd_connection" ] = "tcp";
# self.var_hash["bd_protocol" ] = "poke";
# self.var_hash["tcp_port" ] = "21567";
# self.var_hash["tcp_ip_addr" ] = "127.0.0.1";# No Place Like Home
# self.var_hash["sump_addr" ] = "00000000" ;# Addr of sump2_ctrl_reg
# self.var_hash["sump_trigger_type" ] = "or_rising";
# self.var_hash["sump_trigger_field" ] = "00000000";
# self.var_hash["sump_trigger_delay" ] = "0000";
# self.var_hash["sump_trigger_nth" ] = "0000";
# self.var_hash["sump_user_ctrl" ] = "00000000";
# self.var_hash["sump_user_pattern0" ] = "00000000";
# self.var_hash["sump_user_pattern1" ] = "00000000";
# self.var_hash["sump_data_enable" ] = "00000000";
# return;
def init_globals( self ):
# Define the colors we will use in RGB format
import platform,os;
self.os_sys = platform.system(); # Windows vs Linux
self.fatal_msg = None;
self.undersample_data = False;
self.undersample_rate = 1;
self.gui_active = False;
self.color_bg = (0,0,0);
self.color_fg = (0,0,0);
self.prompt = "bd>";
self.done = False; # This breaks the application loop when true
# self.clock = self.pygame.time.Clock();
# self.lcd = self.pygame.display.Info(); # Dimensions of physical LCD screen
self.txt_height = 0;
self.txt_width = 0;
self.spin_char = "";
self.debug = False;
self.last_filesave = None;# Name of last file saved, used for Save_Rename
self.vcd_import = False;
self.acq_state = "acquire_stop";
self.acq_mode = "nonrle";
# if ( self.mode_cli == False ):
# self.font = get_font( self,self.vars["font_name"],self.vars["font_size"]);
self.sample_start = 0;
self.sample_stop = 0;
self.sample_room = 0;
self.prev_sample_start = None;
self.prev_sample_stop = None;
self.max_samples = 0;
self.zoom_x = self.txt_width; # Default zoom ratio is 1 text char width
self.stop_zoom = False;
self.sig_obj_sel = None;
self.key_buffer = "";
self.last_search_value = "";
self.vertical_scrolled_offscreen = False;
self.last_cmd = "";
# self.cmd_history = ["","",""];
self.skipped_refresh_cnt = 0;
self.old_list = [];
self.slider_width = 0;
self.cmd_history = [];
self.dwords_start = 0;
self.dwords_stop = 0;
self.sig_name_start_x = 0;
self.sig_name_start_y = 0;
self.sig_name_stop_x = 0;
self.sig_name_stop_y = 0;
self.sig_value_start_x = 0;
self.sig_value_start_y = 0;
self.sig_value_stop_x = 0;
self.sig_value_stop_y = 0;
self.cursor_start_y = 0;
self.cursor_stop_y = 0;
self.top_module = "";# ie "tb_foo"
self.sig_top = 0;
self.sig_bot = 0;
# self.scroll_togl = 1;# +1=Pan, -1=Zoom
self.surface_start = -1;
self.surface_stop = -1;
self.name_surface_valid = False;
self.curval_surface_valid = False;
self.cursor_list = [];
self.cursor_list.append( cursor(name="Cursor1"));
self.cursor_list.append( cursor(name="Cursor2"));
self.cursor_list[0].y = 0;
self.cursor_list[1].y = 0;
self.cursor_list[0].sample = 10;
self.cursor_list[1].sample = 15;
self.mouse_x = 0;
self.mouse_y = 0;
self.mouse_button = 0;
self.mouse_region = "";
self.mouse_name_sel_y = -1;
self.scroll_num_samples = 1;
self.mouse_btn1dn_x = -1;
self.mouse_btn1dn_y = -1;
self.mouse_btn1up_x = -1;
self.mouse_btn1up_y = -1;
self.mouse_btn3dn_x = -1;
self.mouse_btn3dn_y = -1;
self.mouse_btn3up_x = -1;
self.mouse_btn3up_y = -1;
self.mouse_btn1up_time_last = 0;
self.mouse_btn1up_time = 0;
self.mouse_btn1dn_time = 0;
self.resize_on_mouse_motion = False;
self.max_w = 0;
self.max_w_chars = 0;
# self.subpop = False;
self.popup_x = None;
self.popup_y = -1;
self.popup_w = 0;
self.popup_y2 = -1;
self.popup_sel = "";
self.popup_sample = 0;
self.popup_parent_x = None;
self.popup_parent_y = None;
self.popup_parent_list = None;
self.txt_entry = False;
self.txt_entry_caption = "Rename_Signal";
# Create a list of files to source in menu given include and exclude filters
file_inc_filter = self.vars["sump_script_inc_filter"];
file_exc_filter = self.vars["sump_script_exc_filter"];
file_load_list = ["File_Load"];
import glob;
glob_list = set(glob.glob(file_inc_filter))-set(glob.glob(file_exc_filter));
for each in glob_list:
file_load_list += ["source "+each ];
# Right-Click menu over signal names
self.popup_list_names = [
# "--------","Group","Group+","Expand","Collapse","Insert_Divider",
# "--------","Delete","Make_Invisible","Make_All_Visible",
# "--------","Delete","Rename","Restore_All",
# ["Clipboard","Cut","Paste","Delete","Rename"],
"--------", "Rename",
"Insert_Divider",
["Clipboard","Cut","Paste"],
["Visibility","Delete","Hide","Hide_All","Show","Show_All"],
# ["Grouping","Group_with_Divider","Group_with_Parent", \
# "UnGroup","Insert_Divider"],
[ "Radix", "Hex","Signed","Unsigned" ],
# [ "Waveform_Format", "Edit_Format","Save_Format","Load_Format",\
# "Delete_Format", "Save_Selected" ], \
# "--------",[ "Font_Size", "Font_Larger","Font_Smaller"],\
"--------","Trigger_Rising","Trigger_Falling","Trigger_Watchdog",\
"--------","Set_Pattern_0","Set_Pattern_1","Clear_Pattern_Match",\
"--------","Set_Data_Enable","Clear_Data_Enable",\
"--------",["SUMP_Configuration","sump_trigger_delay",\
"sump_trigger_nth",\
"sump_user_ctrl",\
"sump_user_pattern0",\
"sump_user_pattern1",\
"sump_watchdog_time"],\
"--------",["Acquisition_Length",
"[----T----]",
" [--T--] ",
" [-T-] ",
"[----T-] ",
" [-T----]",
],
];
# "--------",["File_Load","File1","File2"],
# "--------",file_load_list,
# "BD_SHELL","Manual","Quit"];
# Right-Click menu over waveform area
self.popup_list_values = [
# "--------","Debug_Vars",
# "--------","Reload",
# "Scroll_Toggle",
"--------","Zoom_In", "Zoom_Out", "Zoom_Full","Zoom_Previous",
"Zoom_to_Cursors",
"--------",["Cursors",
"Cursors_to_View","Cursor1_to_Here","Cursor2_to_Here",
"Crop_to_Cursors"],\
["Acquire",
"Acquire_Normal","Acquire_RLE","Acquire_Stop",],
# "Acquire_Single","Acquire_Continuous",
# "Acquire_RLE_1x","Acquire_RLE_8x","Acquire_RLE_64x",
# "Acquire_Stop",],
# "--------","Crop_to_Cursors",
# "--------","Cursors_to_View","Cursor1_to_Here","Cursor2_to_Here",\
# "--------","Acquire_Single","Acquire_Continuous","Acquire_Stop",
# "--------","Acquire_RLE_1x","Acquire_RLE_8x","Acquire_RLE_64x",
# "--------","Acquire_Single","Acquire_Continuous","Acquire_Stop",
# "--------","Acquire_RLE_1x","Acquire_RLE_8x","Acquire_RLE_64x",
# "--------","Acquire_RLE_1x","Acquire_RLE_4x","Acquire_RLE_16x",
# "Acquire_RLE_64x",
# "--------",
# ["File_Load","File1","File2"],
file_load_list,
["File_Save","Save_PNG","Save_JPG","Save_BMP",
# "Save_TXT","Save_VCD","Save_RLE_VCD","Save_Rename"],
"Save_TXT","Save_VCD","Save_Rename"],
# ["Fonts","Font_Larger","Font_Smaller"],
["Misc","Font_Larger","Font_Smaller",
"BD_SHELL","Manual"],"Quit"];
self.popup_list = self.popup_list_values;
self.cmd_alias_hash_dict = {};
self.cmd_alias_hash_dict["zi"] = "zoom_in";
self.cmd_alias_hash_dict["zo"] = "zoom_out";
self.cmd_alias_hash_dict["zt"] = "zoom_to";
self.cmd_alias_hash_dict["q" ] = "quit";
self.cmd_alias_hash_dict["find"] = "search";
self.cmd_alias_hash_dict["/"] = "search";
self.cmd_alias_hash_dict["?"] = "backsearch";
return;
###############################################################################
class cursor(object):
def __init__( self, name="Cursor1", visible=True, \
bits_per_line=32, bits_total=32,format="hex"):
self.name = name;
self.visible = visible;
self.selected = False;
self.x = 0;
self.y = 0;
self.sample = 0;
def __del__(self):
# print "You are killing me man";
return;
def __str__(self):
return "name = " + self.name + "" +\
"";
###############################################################################
# A signal contains time samples and various display attributes.
class signal(object):
def __init__( self, name="cnt_a", type="signal",vcd_symbol="",visible=True, \
bits_per_line=32, bits_total=32,format="hex"):
self.name = name;
self.type = type;# "signal","divider","group","endgroup"
self.nickname = "";
self.hier_name = "";
self.hier_level = 0;
self.vcd_symbol = vcd_symbol;
self.values = [];
self.trigger = 0;# 0=OFF +1=Rising,-1=Falling,2=Pattern0,3=Pattern1
self.data_enable = False;
self.selected = False;
self.last_value = "";
self.visible = visible;
self.hidden = False;
self.deleted = False;
self.expandable = False;
self.collapsable = False;
self.is_expansion = False;
self.grouped = False;
self.x = 0;
self.y = 0;
self.h = 0; # Height
self.w = 0; # Width
self.bits_per_line = 32;
self.bits_total = 32;
self.bit_top = 31;
self.bit_bot = 0;
self.bit_weight = 0; # Only used by rip_list, ie [7]->128
self.rip = ""; # [15:0], [1], ""
self.format = "hex";
def __del__(self):
# print "You are killing me man";
return;
def __str__(self):
return "name = " + self.name + "" +\
"";
##############################################################################
class Sump2:
def __init__ ( self, backdoor, addr ):
self.bd = backdoor;
self.addr_ctrl = addr;
self.addr_data = addr + 0x4;
self.cmd_state_idle = 0x00;
self.cmd_state_arm = 0x01;
self.cmd_state_reset = 0x02;# Always Reset before Arm.
self.cmd_wr_trig_type = 0x04;
self.cmd_wr_trig_field = 0x05;# Correspond to Event Bits
self.cmd_wr_trig_dly_nth = 0x06;# Trigger Delay and Nth
self.cmd_wr_trig_position = 0x07;# Samples post Trigger to Capture
self.cmd_wr_rle_event_en = 0x08;# Enables events for RLE detection
self.cmd_wr_ram_ptr = 0x09;# Load specific pointer.
self.cmd_wr_ram_page = 0x0a;# Load DWORD Page.
self.cmd_rd_hw_id_rev = 0x0b;
self.cmd_rd_ram_width_len = 0x0c;
self.cmd_rd_sample_freq = 0x0d;
self.cmd_rd_trigger_ptr = 0x0e;
self.cmd_rd_ram_data = 0x0f;
self.cmd_wr_user_ctrl = 0x10;
self.cmd_wr_user_pattern0 = 0x11;# Also Mask for Pattern Matching
self.cmd_wr_user_pattern1 = 0x12;# Also Pattern for Pattern Matching
self.cmd_wr_user_data_en = 0x13;# Special Data Enable Capture Mode
self.cmd_wr_watchdog_time = 0x14;# Watchdog Timeout
self.trig_and_ris = 0x00;# Bits AND Rising
self.trig_and_fal = 0x01;# Bits AND Falling
self.trig_or_ris = 0x02;# Bits OR Rising
self.trig_or_fal = 0x03;# Bits OR Falling
self.trig_pat_ris = 0x04;# Pattern Match Rising
self.trig_pat_fal = 0x05;# Pattern Match Falling
self.trig_in_ris = 0x06;# External Input Trigger Rising
self.trig_in_fal = 0x07;# External Input Trigger Falling
self.trig_watchdog = 0x08;# Watchdog trigger
self.cfg_dict = {};
self.status_armed = 0x01;# Engine is Armed, ready for trigger
self.status_triggered = 0x02;# Engine has been triggered
self.status_ram_post = 0x04;# Engine has filled post-trig RAM
self.status_ram_pre = 0x08;# Engine has filled pre-trigger RAM
self.status_rle_pre = 0x10;# RLE Engine has filled pre-trig RAM
self.status_rle_post = 0x20;# RLE Engine has filled post-trig RAM
self.status_rle_en = 0x80;# RLE Engine is present
def wr ( self, cmd, data ):
self.bd.wr( self.addr_ctrl, [ cmd ] );
self.bd.wr( self.addr_data, [ data ] );
def rd( self, addr, num_dwords = 1):
# Note: addr of None means use existing ctrl address and just read data
if ( addr != None ):
self.bd.wr( self.addr_ctrl, [ addr ] );
return self.bd.rd( self.addr_data, num_dwords, repeat = True);
def rd_cfg( self ):
hwid_data = self.rd( self.cmd_rd_hw_id_rev )[0];
ram_data = self.rd( self.cmd_rd_ram_width_len )[0];
freq_data = self.rd( self.cmd_rd_sample_freq )[0];
print("%08x" % hwid_data );
print("%08x" % freq_data );
print("%08x" % ram_data );
self.cfg_dict['hw_id'] = ( hwid_data & 0xFFFF0000 ) >> 16;
self.cfg_dict['hw_rev'] = ( hwid_data & 0x0000FF00 ) >> 8;
self.cfg_dict['data_en'] = ( hwid_data & 0x00000040 ) >> 6;
self.cfg_dict['trig_wd_en'] = ( hwid_data & 0x00000020 ) >> 5;
# self.cfg_dict['data_en'] = 1;# This bit doesn't exist yet in HW
# self.cfg_dict['trig_wd_en'] = 1;# This bit doesn't exist yet in HW
self.cfg_dict['nonrle_dis'] = ( hwid_data & 0x00000010 ) >> 4;
self.cfg_dict['rle_en'] = ( hwid_data & 0x00000008 ) >> 3;
self.cfg_dict['pattern_en'] = ( hwid_data & 0x00000004 ) >> 2;
self.cfg_dict['trig_nth_en'] = ( hwid_data & 0x00000002 ) >> 1;
self.cfg_dict['trig_dly_en'] = ( hwid_data & 0x00000001 ) >> 0;
self.cfg_dict['frequency'] = float(freq_data) / 65536.0;
self.cfg_dict['ram_len'] = ( ram_data & 0x0000FFFF ) >> 0;
self.cfg_dict['ram_dwords'] = ( ram_data & 0x00FF0000 ) >> 14;# >>16,<<2
self.cfg_dict['ram_event_bytes'] = ( ram_data & 0x0F000000 ) >> 24;
self.cfg_dict['ram_rle'] = ( ram_data & 0xF0000000 ) >> 28;
def close ( self ):
return;
##############################################################################
# functions to convert text time samples into a VCD file. See cpy_txt2vcd.py
class TXT2VCD:
def __init__ ( self ):
self.char_code = self.build_char_code(); # ['AA','BA',etc]
self.header = self.build_header();
self.footer = self.build_footer();
return;
def close ( self ):
return;
def conv_txt2vcd ( self, main_self, txt_list ):
# def conv_txt2vcd ( self, txt_list ):
"""
Take in a txt list and spit out a vcd
"""
header_line = txt_list[0]; # 1st line "#foo bar 10000"
data_lines = txt_list[1:]; # Data lines "1 1a"
bus_widths = self.get_bus_widths( data_lines[:] ); # How many bits in each
rts = self.header;
rts += self.build_name_map( header_line,bus_widths[:],self.char_code[:] );
rts += self.footer;
timescale = float( header_line.split()[-1] ); time = 0;
next_perc = 0;# Display an update every 5%
total_count = len( data_lines );
prev_data_line = None;
# HERETODAY
for ( i, data_line ) in enumerate( data_lines ):
if ( data_line != prev_data_line ):
rts += [ "#" + str(time) ];
bit_list = self.get_bit_value( data_line, header_line, bus_widths[:] );
rts += self.dump_bit_value( bit_list, self.char_code[:] );
prev_data_line = data_line;
# prev_data_line = data_line;
time += int( timescale );
# TODO: Would be nice to have this call draw_header() instead.
perc = ( 100 * i ) // total_count;
if ( perc >= next_perc ):
draw_header( main_self,"conv_txt2vcd() "+str( perc )+"%" );
print( "conv_txt2vcd() "+str( perc )+"%" );
next_perc += 5;# Next 5%, this counts 0,5,10,...95
return rts;
def get_bit_value( self,data_line,header_line,bus_widths_list_cp ):
"""
Figure out each bit value (0,1) for the provided line. Return a list of 0,1s
"""
rts = [];
data_list = data_line.split();
for bus_name in header_line.split()[0:-1]:# Remove the timescale at very end
bus_width = bus_widths_list_cp.pop(0); # 1 or 16, etc
data = data_list.pop(0); # "1" or "10ab", etc
bit_val = 2**(bus_width-1); # 8->128, 4->8, 1->1
for i in range( bus_width ): # Counts 0..7 for 8bit bus
try:
if ( ( int(data,16) & bit_val ) == 0 ): rts += ["0"];
else: rts += ["1"];
except:
rts += ["x"];
bit_val //= 2; # Counts 128,64,..2,1 for 8bit bus
return rts;
def dump_bit_value( self, bit_list, char_code_list_cp ):
"""
Convert [0,1,etc] to [0AA,1BA,etc]
"""
rts = [];
for bit in bit_list:
rts += [ bit +char_code_list_cp.pop(0) ];
# rts += [ str(bit)+char_code_list_cp.pop(0) ];
return rts;
def build_name_map( self,header_line,bus_widths_list_cp,char_code_list_cp ):
"""
$var wire 1 AA foo [7] $end
"""
rts = [];
for bus_name in header_line.split()[0:-1]:# This removes timescale at end
bus_width = bus_widths_list_cp.pop(0);
if ( bus_width == 1 ):
rts += [ "$var wire 1 " + char_code_list_cp.pop(0) + " " + \
bus_name + " $end" ];
else:
for i in range( bus_width ): # Counts 0..7 for 8bit bus
rts += [ "$var wire 1 " + char_code_list_cp.pop(0) + " " + \
bus_name + " [" + str(bus_width-1-i)+"] $end" ];
return rts;
def get_bus_widths( self, data_list_cp ):
"""
Rip the vectors, if any vector never exceeds 1 then its a wire. Tag it
otherwise, bus width is number of nibbles x4
"""
bus_width = [None]*100;
for data_line in data_list_cp:
data_words = data_line.split(); i = 0;
for data_word in data_words:
bit_width = 4 * len( data_word ); # How many bits 4,8,12,etc
if ( bus_width[i] == None ): bus_width[i] = 1;# Default to single wire
if ( data_word == "XXXXXXXX" ):
bus_width[i] == 32;
else:
try:
if ( int( data_word, 16) > 1 ): bus_width[i] = bit_width;
except:
print("ERROR: Invalid non Hexadecimal Data " + str(data_word));
i+=1;
return bus_width;
def build_char_code( self ):
"""
VCDs map wires to alphabet names such as AA,BA. Build a 676 (26x26) list
"""
char_code = []; # This will be ['AA','BA',..,'ZZ']
for ch1 in "ABCDEFGHIJKLMNOPQRSTUVWXYZ":
for ch2 in "ABCDEFGHIJKLMNOPQRSTUVWXYZ":
char_code += [ ch2+ch1 ];
return char_code;
def build_header( self ):
rts = [];
rts += [ "$date Wed May 4 10:12:46 2005 $end" ];
rts += [ "$version ModelSim Version 6.0c $end" ];
rts += [ "$timescale 1ps $end" ];
rts += [ "$scope module module_name $end" ];
return rts;
def build_footer( self ):
rts = [];
rts += [ "$upscope $end"];
rts += [ "$enddefinitions $end"];
rts += [ "#0" ];
rts += [ "$dumpvars"];
rts += [ "$end"];
return rts;
##############################################################################
# functions to send Backdoor commands to BD_SERVER.PY over TCP Sockets
class Backdoor:
def __init__ ( self, ip, port ):
try:
import socket;
except:
raise RuntimeError("ERROR: socket is required");
try:
self.sock=socket.socket(socket.AF_INET,socket.SOCK_STREAM);
self.sock.connect( ( ip, port ) );# "localhost", 21567
# self.sock.settimeout(1); # Dont wait forever
self.sock.settimeout(5); # Dont wait forever
except:
# raise RuntimeError("ERROR: Unable to open Socket!! ")
self.sock = None;
return;
def close ( self ):
self.sock.close();
def bs(self, addr, bitfield ):
rts = self.rd( addr, 1 );
data_new = rts[0] | bitfield[0]; # OR in some bits
self.wr( addr, [data_new] );
def bc(self, addr, bitfield ):
rts = self.rd( addr, 1 );
data_new = rts[0] & ~ bitfield[0];# CLR some bits
self.wr( addr, [data_new] );
def wr(self, addr, data, repeat = False ):
# print("HERE");
# print("%08x" % addr );
# print( data );
if ( repeat == False ):
cmd = "w";# Normal Write : Single or Burst with incrementing address
else:
cmd = "W";# Write Multiple DWORDs to same address
payload = "".join( [cmd + " %x" % addr] +
[" %x" % int(d) for d in data] +
["\n"] );
self.tx_tcp_packet( payload );
self.rx_tcp_packet();
def rd( self, addr, num_dwords=1, repeat = False ):
if ( repeat == False ):
cmd = "r";# Normal Read : Single or Burst with incrementing address
else:
cmd = "k";# Read Multiple DWORDs from single address
payload = cmd + " %x %x\n" % (addr, (num_dwords-1)); # 0=1DWORD,1=2DWORDs
self.tx_tcp_packet( payload );
payload = self.rx_tcp_packet().rstrip();
dwords = payload.split(' ');
rts = [];
# print( dwords );
for dword in dwords:
rts += [int( dword, 16 )];
return rts;
def tx_tcp_packet( self, payload ):
# A Packet is a 8char hexadecimal header followed by the payload.
# The header is the number of bytes in the payload.
header = "%08x" % len(payload);
bin_data = (header+payload).encode("utf-8");# String to ByteArray
self.sock.send( bin_data );
def rx_tcp_packet( self ):
# Receive 1+ Packets of response. 1st Packet will start with header that
# indicates how big the entire Backdoor payload is. Sit in a loop
# receiving 1+ TCP packets until the entire payload is received.
bin_data = self.sock.recv(1024);
rts = bin_data.decode("utf-8");# ByteArray to String
header = rts[0:8]; # Remove the header, Example "00000004"
payload_len = int(header,16);# The Payload Length in Bytes, Example 0x4
payload = rts[8:]; # Start of Payload is everything after header
# 1st packet may not be entire payload so loop until we have it all
while ( len(payload) < payload_len ):
bin_data = self.sock.recv(1024);
payload += bin_data.decode("utf-8");# ByteArray to String
return payload;
###############################################################################
main = main();
| [
"adrien.descamps@gmail.com"
] | adrien.descamps@gmail.com |
8f1edffb45c18305e72ee6fa487d900f2792d2a0 | 850493f3c5c9bf3805a04547e5fe6131f9b2f906 | /teashop_server/app_models/user_type_model.py | f92d2c67a6fafe6f95723a87d5641e3f9d5ced12 | [] | no_license | blazej700/ibd | 558f6952d41966fe0e40d74356bcc9f2483add2c | 9e7a5d84f05b3be0a0cbdc247867e179962db02a | refs/heads/main | 2023-02-01T01:19:10.364462 | 2020-12-12T09:46:04 | 2020-12-12T09:46:04 | 311,062,158 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | from app import db
from flask_restful_swagger_3 import Schema
class UserTypeModel(db.Model):
__tablename__ = 'user_type'
id = db.Column(db.Integer, primary_key=True)
user_type_name = db.Column(db.String(20))
def serialize(self):
return {
'id': self.id,
'user_type_name' : self.user_type_name,
}
| [
"blazej700@gmail.com"
] | blazej700@gmail.com |
a02ca8156ec3b6dbe8f89c8ae875458caa693365 | 4683d3bfe1b6ba70a566249149f55671c7bb3826 | /game/3mouse/mousedir.py | aae7cdaa7e430a6e6d2c090a1e43cc79772a6f0d | [] | no_license | crazyj7/python | 01105fe5b8ec23b53164a3f7f8a12690abc0bf6a | 2d0a55c8371aa138bcebb1f65b53109599d39009 | refs/heads/master | 2020-05-02T18:36:00.992398 | 2020-02-07T13:07:13 | 2020-02-07T13:07:13 | 178,133,678 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,427 | py | '''
mouse position angle. move
'''
import pygame
from pygame import Surface
import os, sys
import math
bExit = False
RED=(255,0,0)
GREEN=(0,255,0)
BLUE=(0,0,255)
BLACK=(0,0,0)
pygame.init()
pad = pygame.display.set_mode( (640,480))
pygame.display.set_caption('test')
user = Surface((100,100))
# 시작 이미지. x축으로 0도 기울기.
pygame.draw.polygon(user, RED, [(0,0), (100,50), (0,100)], 3)
pygame.draw.line(user, GREEN, (100,50), (0, 50), 2)
pygame.draw.rect(user, BLUE, pygame.Rect(0, 0, 100, 100), 2)
user.set_colorkey(BLACK)
pos = user.get_rect()
# 시작 위치
pos.centerx = 100
pos.centery = 100
# print('pos (rect)= ', pos, ' current angle=0')
def rot_center2(image, angle):
'''
사각 영역은 변함없고, 내부의 이미지만 회전 시키고, 밖으로 나간 부분은 잘린다. 중심유지.
:param image:
:param angle:
:return:
'''
orig_rect = image.get_rect()
# 이미지 회전
rot_image = pygame.transform.rotate(image, angle)
rot_rect = orig_rect.copy()
# 원본 이미지 크기의 중심을 회전된 이미지 영역의 중심에 위치
rot_rect.center = rot_image.get_rect().center
# 원본 이미지 크기만큼 자름.
rot_image = rot_image.subsurface(rot_rect).copy()
return rot_image
def rot_center(image, rect, angle):
'''
영역의 중심점에서 회전시키고, 새로운(더 커진) 영역 크기도 반환. 잘림 없음. 중심유지.
:param image:
:param rect:
:param angle:
:return:
'''
# 각도 만큼 회전.
rot_image = pygame.transform.rotate(image, angle)
# 중심점을 맞춘다. 새로운 영역 보정. 영역 크기가 커질수 있음. 짤림 없음.
rot_rect = rot_image.get_rect(center=rect.center)
return rot_image, rot_rect
clock = pygame.time.Clock()
speed = 10
while not bExit:
for event in pygame.event.get():
if event.type == pygame.QUIT:
bExit=True
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
bExit=True
elif event.type == pygame.MOUSEBUTTONDOWN:
pass
elif event.type == pygame.MOUSEBUTTONUP:
pass
key = pygame.key.get_pressed()
if key[pygame.K_a]:
pos.centerx -= speed
if key[pygame.K_d]:
pos.centerx += speed
if key[pygame.K_w]:
pos.centery -= speed
if key[pygame.K_s]:
pos.centery += speed
pad.fill(BLACK)
mousepos = pygame.mouse.get_pos()
# print('mousepos=', mousepos)
angle = math.atan2(pos.centery - mousepos[1], mousepos[0] - pos.centerx)
print('angle=', angle)
# 각도는 라디안 0~pi, -pi, 0
# user는 x축 방향 0도 기준으로 있음. user를 angle만큼 CCW로 회전.
# degree로 변환 필요.
# img = pygame.transform.rotate(user, angle*180/math.pi)
img, rect = rot_center(user, user.get_rect(), angle*180/math.pi)
# img = rot_center2(user, angle*180/math.pi)
# pad.blit(img, (pos.x, pos.y) )
rect.centerx += pos.x
rect.centery += pos.y
pad.blit(img, (rect.x, rect.y))
mousedown = pygame.mouse.get_pressed()
# 마우스 다운 상태면 선을 그림.
if mousedown[0]:
pygame.draw.line(pad, BLUE, mousepos, rect.center)
# pad.blit(user, (pos.x, pos.y) )
pygame.display.flip()
# pygame.display.upate()
clock.tick(60)
pygame.quit()
| [
"psychic@secuve.com"
] | psychic@secuve.com |
5605956e0c0bed78aa6a229a16b89113b010781d | 6a5c92bf039d768ab2e455e4d0652c2bd847a5ca | /backend/backend/settings.py | 53e3b1db47ed90d2cc34db359f977b74796f5081 | [] | no_license | sahil143/feedback-form | a2bf39162f165b7ca7b11f0a793a3dd21f8c5a98 | 933ab90fee267b0ca88cee9d3363529e5e67992a | refs/heads/master | 2023-05-07T16:48:15.743272 | 2021-05-31T06:56:57 | 2021-05-31T06:56:57 | 372,308,186 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,455 | py | """
Django settings for backend project.
Generated by 'django-admin startproject' using Django 3.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-3n4io1wg%c@6qpda_z*gfzxbc=_w)92h$zj5t(nq4_r@!)d*hn'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'rest_framework',
'feedback_form'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
]
ROOT_URLCONF = 'backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
CORS_ORIGIN_WHITELIST = [
'http://localhost:3000'
]
| [
"sahilbudhwar143@gmail.com"
] | sahilbudhwar143@gmail.com |
5f8bdbabaf7e01920dfee62d7b029a3ca4a594e5 | 87376c79491df2ff693cd6046689251e409d6052 | /cwProject/dog_account/migrations/0001_initial.py | 9f2981d9293cac972cfe539a50e3189bdc7e15e5 | [
"Apache-2.0"
] | permissive | cs-fullstack-2019-spring/django-models-cw-gkg901 | 1874b63ad859a56cc1363856696a136d47f34df2 | 32b8b7135223077c75d6bcd151652cd41d7e0397 | refs/heads/master | 2020-04-24T04:41:50.535817 | 2019-02-22T02:58:33 | 2019-02-22T02:58:33 | 171,713,563 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,173 | py | # Generated by Django 2.0.6 on 2019-02-20 17:23
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Account',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=30)),
('realName', models.CharField(max_length=50)),
('accountNumber', models.IntegerField(max_length=16)),
('balance', models.DecimalField(decimal_places=2, max_digits=99999999999)),
],
),
migrations.CreateModel(
name='Dog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('breed', models.CharField(max_length=50)),
('color', models.CharField(max_length=50)),
('gender', models.CharField(max_length=6)),
],
),
]
| [
"gerren.gregory@gmail.com"
] | gerren.gregory@gmail.com |
c6216e017e386c6fcba6a03eb401c29dae4b42b7 | abfa70e1da5b4ba8e465cdc046fa36e81386744a | /base_ml/10.1.Iris_DecisionTree.py | 68bd1cb46b1c29c5cf1e31ca7b17b59b9c34a20c | [] | no_license | superman666ai/crazy_project | f850819ff2287e345b67500111733bafa5629d1f | 99dcba0fe246ecaf3f556f747d44731a04231921 | refs/heads/master | 2020-05-15T09:32:56.523875 | 2019-05-16T00:57:23 | 2019-05-16T00:57:23 | 182,179,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,473 | py | #!/usr/bin/python
# -*- coding:utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import tree
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
def iris_type(s):
it = {'Iris-setosa': 0, 'Iris-versicolor': 1, 'Iris-virginica': 2}
return it[s]
# 花萼长度、花萼宽度,花瓣长度,花瓣宽度
# iris_feature = 'sepal length', 'sepal width', 'petal length', 'petal width'
iris_feature = u'花萼长度', u'花萼宽度', u'花瓣长度', u'花瓣宽度'
if __name__ == "__main__":
mpl.rcParams['font.sans-serif'] = [u'SimHei']
mpl.rcParams['axes.unicode_minus'] = False
path = '../data/8.iris.data' # 数据文件路径
data = np.loadtxt(path, dtype=float, delimiter=',', converters={4: iris_type},encoding="utf-8")
x, y = np.split(data, (4,), axis=1)
# 为了可视化,仅使用前两列特征
x = x[:, :2]
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=1)
#ss = StandardScaler()
#ss = ss.fit(x_train)
# 决策树参数估计
# min_samples_split = 10:如果该结点包含的样本数目大于10,则(有可能)对其分支
# min_samples_leaf = 10:若将某结点分支后,得到的每个子结点样本数目都大于10,则完成分支;否则,不进行分支
model = Pipeline([
('ss', StandardScaler()),
('DTC', DecisionTreeClassifier(criterion='entropy', max_depth=3))])
# clf = DecisionTreeClassifier(criterion='entropy', max_depth=3)
model = model.fit(x_train, y_train)
y_test_hat = model.predict(x_test) # 测试数据
print(model.score)
# 保存
# dot -Tpng -o 1.png 1.dot
f = open('.\\iris_tree.dot', 'w')
tree.export_graphviz(model.get_params('DTC')['DTC'], out_file=f)
# 画图
N, M = 100, 100 # 横纵各采样多少个值
x1_min, x1_max = x[:, 0].min(), x[:, 0].max() # 第0列的范围
x2_min, x2_max = x[:, 1].min(), x[:, 1].max() # 第1列的范围
t1 = np.linspace(x1_min, x1_max, N)
t2 = np.linspace(x2_min, x2_max, M)
x1, x2 = np.meshgrid(t1, t2) # 生成网格采样点
x_show = np.stack((x1.flat, x2.flat), axis=1) # 测试点
# # 无意义,只是为了凑另外两个维度
# # 打开该注释前,确保注释掉x = x[:, :2]
# x3 = np.ones(x1.size) * np.average(x[:, 2])
# x4 = np.ones(x1.size) * np.average(x[:, 3])
# x_test = np.stack((x1.flat, x2.flat, x3, x4), axis=1) # 测试点
cm_light = mpl.colors.ListedColormap(['#A0FFA0', '#FFA0A0', '#A0A0FF'])
cm_dark = mpl.colors.ListedColormap(['g', 'r', 'b'])
y_show_hat = model.predict(x_show) # 预测值
y_show_hat = y_show_hat.reshape(x1.shape) # 使之与输入的形状相同
plt.figure(facecolor='w')
plt.pcolormesh(x1, x2, y_show_hat, cmap=cm_light) # 预测值的显示
plt.scatter(x_test[:, 0], x_test[:, 1], c=y_test.ravel(), edgecolors='k', s=100, cmap=cm_dark, marker='o') # 测试数据
plt.scatter(x[:, 0], x[:, 1], c=y.ravel(), edgecolors='k', s=40, cmap=cm_dark) # 全部数据
plt.xlabel(iris_feature[0], fontsize=15)
plt.ylabel(iris_feature[1], fontsize=15)
plt.xlim(x1_min, x1_max)
plt.ylim(x2_min, x2_max)
plt.grid(True)
plt.title(u'鸢尾花数据的决策树分类', fontsize=17)
plt.show()
# 训练集上的预测结果
y_test = y_test.reshape(-1)
# print y_test_hat
# print y_test
result = (y_test_hat == y_test) # True则预测正确,False则预测错误
acc = np.mean(result)
# print '准确度: %.2f%%' % (100 * acc)
# 过拟合:错误率
depth = np.arange(1, 15)
err_list = []
for d in depth:
clf = DecisionTreeClassifier(criterion='entropy', max_depth=d)
clf = clf.fit(x_train, y_train)
y_test_hat = clf.predict(x_test) # 测试数据
result = (y_test_hat == y_test) # True则预测正确,False则预测错误
err = 1 - np.mean(result)
err_list.append(err)
# print d, ' 准确度: %.2f%%' % (100 * err)
plt.figure(facecolor='w')
plt.plot(depth, err_list, 'ro-', lw=2)
plt.xlabel(u'决策树深度', fontsize=15)
plt.ylabel(u'错误率', fontsize=15)
plt.title(u'决策树深度与过拟合', fontsize=17)
plt.grid(True)
plt.show()
| [
"keepingoner@163.com"
] | keepingoner@163.com |
30dd3f1c4df8cb5dbb131dfd4d1780d86003bd26 | c1d4f80fbf94fc1cb075d04284cc726f354cc586 | /diffOfSquares.py | de5cfad77b93706c58f2823a072b50e608d6464d | [
"MIT"
] | permissive | azizamukhamedova/Python-Thunder | 2af6ec8d9be07d03d51a53430449c41ec6f21a0d | 820b943c4884dad4a247c7480b86c057a1508509 | refs/heads/master | 2022-12-24T10:05:06.879056 | 2020-10-01T10:04:30 | 2020-10-01T10:04:30 | 300,230,819 | 2 | 0 | MIT | 2020-10-01T09:59:00 | 2020-10-01T09:58:59 | null | UTF-8 | Python | false | false | 204 | py | '''
Probem Task : This program returns the difference in areas of two squares.
Problem Link: https://edabit.com/challenge/NNhkGocuPMcryW7GP
'''
def square_areas_difference(r):
return (2*r)**2-(r**2)*2
| [
"noreply@github.com"
] | azizamukhamedova.noreply@github.com |
921140b83f3882c30e59c2c40f58e83ac495e3d1 | 6d8817b7a81c1f65c10ada235edde0f2f37f2f01 | /test/123.py | f08af05041c3702e27f66562116cd8474a131365 | [] | no_license | angel681166/LawTech | 63ca49aa90a53ee3b70bcf3e4ae761dd53e8d19b | 539ef05ed6a32f3c2b551301b51608ec8b340fc3 | refs/heads/main | 2023-01-08T08:38:58.220828 | 2020-11-08T17:24:49 | 2020-11-08T17:24:49 | 304,250,683 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15 | py | hello law tech
| [
"noreply@github.com"
] | angel681166.noreply@github.com |
b75e556ea1b40b23295bd4f418cd7787509b3aab | a1af08e61db95281579497b2a3f05535c60b0c84 | /Algorithms and data stractures in python/lesson2/les_2_task_4.py | 64d41bb9219825990ead01dc96a40b2c5a4aa986 | [] | no_license | kargamant/education | 4c6d4bd419094eb175a73bb3888b8638b7d42af4 | 21e346a3eedf342efaae3636f24385b97713c06d | refs/heads/master | 2020-07-03T10:50:32.152899 | 2020-06-19T10:33:36 | 2020-06-19T10:33:36 | 201,883,903 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | #4. Найти сумму n элементов следующего ряда чисел: 1, -0.5, 0.25, -0.125,…
#Количество элементов (n) вводится с клавиатуры.
n = int(input('Введите количество элементов>>>'))
b1 = 1
q = -0.5
s = 0
for i in range(n):
s += b1 * pow(q, i)
print(f'Сумма {n} элементов: {s}') | [
"noreply@github.com"
] | kargamant.noreply@github.com |
d3980370454d25fd98274030292d5c8ed674a8f7 | 4116790ee11de30eade92cabd5cddcb0978eb2c9 | /employeerest/company/company/views.py | bce69d005ffe90441d1cc9375a9ca66db31e094a | [] | no_license | Joel-hanson/djangomytutorial | 4e8aadbccea831bb8f7e4cf0de3d35e4bfeaadc0 | 93d2925ae1a8d5f5dcec03e0c85b3ff0e492d125 | refs/heads/master | 2021-08-30T10:48:42.207229 | 2017-12-17T14:43:34 | 2017-12-17T14:43:34 | 108,539,027 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | py | from django.views.generic import TemplateView
class TestPage(TemplateView):
template_name = 'firstapp/test.html'
class ThanksPage(TemplateView):
template_name = 'firstapp/thanks.html'
class HomePage(TemplateView):
template_name = 'firstapp/index.html'
| [
"joelhanson025@gmail.com"
] | joelhanson025@gmail.com |
966693712e3410280164b684654420510e60bfac | 07cc188b2e10f204cd0191aa3c28ca058b863973 | /film_crawler/film_crawler/film_crawler/graph_constructor.py | 0be10c6e74fbbf8cce417100cccac2e37d73d3fa | [] | no_license | WriteAfterReed/web_scraper_wiki_film_proj | b80bfc18a14832c2bf01100e9eee56375c9f6ac6 | 1a55e570b54700ef55bb6d73cf76d47456952f3e | refs/heads/master | 2022-12-28T17:21:46.354362 | 2020-10-06T03:18:06 | 2020-10-06T03:18:06 | 301,601,514 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,098 | py | import json
class Actor:
def __init__(self, name="", year=1900, films=[], income=0):
self.status = "actor"
self.name = name
self.year = year
self.films = films
self.income = income
def __str__(self):
return self.name
def get_name(self):
return self.name
def set_name(self, name):
self.name = name
def get_year(self):
return self.year
def set_year(self, year):
self.year = year
def get_age(self):
return 2019 - self.year
def get_films(self):
return self.films
def add_film(self, film):
self.films.append(film)
def get_income(self):
return self.income
def add_income(self, income):
self.income += income
def get_status(self):
return self.status
class Film:
def __init__(self, name="", year=1900, cast=[], income=0):
self.status = "film"
self.name = name
self.year = year
self.cast = cast
self.income = income
def __str__(self):
return self.name
def get_status(self):
return self.status
def get_name(self):
return self.name
def set_name(self, name):
self.name = name
def get_year(self):
return self.year
def set_year(self, year):
self.year = year
def get_cast(self):
return self.cast
def add_cast(self, actor):
self.cast.append(actor)
def get_income(self):
return self.income
def set_income(self, income):
self.income = income
class Graph:
def __init__(self):
self.datum = {}
def new_vert(self, node, entry=[]):
self.datum[node] = entry
def update_vert(self, node, entry):
obj = self.datum[node]
if entry not in obj:
obj.append(entry)
self.datum[node] = obj
def get_verts(self):
return self.datum.keys()
def get_edges(self):
return self.datum.values()
def write_to_file(self):
with open('result.json', 'w') as fp:
json.dump(self.datum, fp)
def read_from_json(self, path):
dataset = None
with open(path, 'r') as json_file:
temp = json.load(json_file)
self.datum = temp[0]
count = 0
print(type(self.datum))
print(self.datum.keys())
for each in self.datum:
if count > 100:
break
print("")
print(each)
count += 1
def query_one(target, mapper):
obj = mapper[target]
gross = obj.get_income()
print("Exec query 1...")
print("For film: " + target + " the gross was: " + str(gross))
print("Fin query 1 \n")
def query_two(target, mapper):
obj = mapper[target]
projects = obj.get_films()
print("Exec query 2...")
print("For Actor: " + target + " they have worked on: " + str(projects))
print("Fin query 2 \n")
def query_three(target, mapper):
obj = mapper[target]
team = obj.get_cast()
print("Exec query 3...")
print("For film: " + target + " the cast was: " + str(team))
print("Fin query 3 \n")
def query_four(actor_map):
payload = []
for name in actor_map.keys():
obj = actor_map[name]
worth = obj.get_income()
payload.append((name, worth))
sorted_by_second = sorted(payload, key=lambda tup: tup[1])
sorted_by_second.reverse()
print("Exec query 4...")
print("The top grossing actors are: ")
for i in range(0, 5):
entry = sorted_by_second[i]
print(str(entry[0]) + " is worth " + str(entry[1]))
print("Fin query 4 \n")
def query_five(actor_map):
payload = []
for name in actor_map.keys():
obj = actor_map[name]
age = obj.get_age()
payload.append((name, age))
sorted_by_second = sorted(payload, key=lambda tup: tup[1])
sorted_by_second.reverse()
print("Exec query 5...")
print("The top oldest actors are: ")
for i in range(0, 5):
entry = sorted_by_second[i]
print(str(entry[0]) + " is age " + str(entry[1]))
print("Fin query 5 \n")
def query_six(film_map, target_year):
payload = []
print("Exec query 6...")
print("For the year " + str(target_year) + " films are...")
for movie in film_map.keys():
obj = film_map[movie]
film_year = obj.get_year()
if film_year == target_year:
print("Flim: " + movie)
print("Fin query 6 \n")
def query_seven(actor_map, target_year):
payload = []
print("Exec query 7...")
print("For the year " + str(target_year) + " actors born are...")
for person in actor_map.keys():
obj = actor_map[person]
birth_year = obj.get_year()
if birth_year == target_year:
print("Actor: " + person)
print("Fin query 7 \n")
actor_list = []
actor_dict = {}
film_list = []
film_dict = {}
graph = Graph()
graph.read_from_json("data.json")
def test_first_week():
# dataset = None
# with open('../out.json') as json_file:
# dataset = json.load(json_file)
graph.read_from_json("data.json")
#
# for each in dataset:
# # This parses current Json for Actors
# if each['page_type'] == 'actor':
# year = each['actor_year']
# name = each['name']
# films = []
# income = 0
# if (2019 - year) > 100:
# continue
# if name not in actor_list:
# actor_list.append(name)
# new_actor = Actor(name, year, films, income)
# actor_dict[name] = new_actor
#
# for each in dataset:
#
# # This parses current Json for films
# if each['page_type'] == "film":
# year = each['film_year']
# film_name = each['name']
# cast = each['film_cast']
# income = each['film_value']
# if film_name not in film_list:
# film_list.append(film_name)
# new_film = Film(film_name, year, cast, income)
# for person in cast:
# if person in actor_dict.keys():
# income = income // 2
# actor_obj = actor_dict[person]
# actor_obj.add_income(income)
# actor_obj.add_film(film_name)
#
# film_dict[film_name] = new_film
#
# for each in actor_list:
# entry = actor_dict[each]
# film_edges = entry.get_films()
# graph.new_vert(each, film_edges)
#
# for each in film_list:
# entry = film_dict[each]
# actor_edges = entry.get_cast()
# graph.new_vert(each, actor_edges)
#
# query_one("Drive (2011 film)", film_dict)
# query_two("Michael Caine", actor_dict)
# query_three("Drive (2011 film)", film_dict)
# query_four(actor_dict)
# query_five(actor_dict)
# query_six(film_dict, 2012)
# query_seven(actor_dict, 1964)
#
# graph.write_to_file()
| [
"mloviska15@gmail.com"
] | mloviska15@gmail.com |
8d3399769dfddb9fe82a9f192ca45d86625e5e59 | d5688ec8a696b7d8bb34ef5e0a7876532619fce8 | /spreadsheetupload/urls.py | 3145c62652bdfbd3a855d1e75b5cf1101a6fdefb | [] | no_license | varunsarvesh/spreadsheet | dffdea68bf449d232fcb0e33382e58fe8540471e | e81112193efe0d638881f1a8b7b5138d7af433b3 | refs/heads/master | 2020-03-18T19:51:02.133600 | 2018-05-29T08:51:06 | 2018-05-29T08:51:06 | 135,181,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 813 | py | """spreadsheetupload URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('XLApp/', include('upload.urls')),
]
| [
"varun@cyces.co"
] | varun@cyces.co |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.